aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergei Trofimov <sergei.trofimov@arm.com>2015-03-10 13:09:31 +0000
committerSergei Trofimov <sergei.trofimov@arm.com>2015-03-10 13:09:31 +0000
commita747ec7e4c2ea8a25bfc675f80042eb6600c7050 (patch)
tree077c0439a89a5c33b9fa1dbf9e81146ca9960d3c
Initial commit of open source Workload Automation.
-rwxr-xr-x.gitignore30
-rw-r--r--LICENSE202
-rw-r--r--MANIFEST.in2
-rw-r--r--README.rst73
-rw-r--r--dev_scripts/README23
-rwxr-xr-xdev_scripts/clean_install34
-rwxr-xr-xdev_scripts/clear_env3
-rwxr-xr-xdev_scripts/get_apk_versions25
-rwxr-xr-xdev_scripts/pep822
-rwxr-xr-xdev_scripts/pylint47
-rw-r--r--doc/Makefile184
-rwxr-xr-xdoc/build_extension_docs.py46
-rwxr-xr-xdoc/build_instrumentation_method_map.py48
-rw-r--r--doc/source/_static/.gitignore0
-rw-r--r--doc/source/_templates/.gitignore0
-rw-r--r--doc/source/additional_topics.rst101
-rw-r--r--doc/source/agenda.rst608
-rw-r--r--doc/source/changes.rst7
-rw-r--r--doc/source/conf.py270
-rw-r--r--doc/source/configuration.rst188
-rw-r--r--doc/source/contributing.rst45
-rw-r--r--doc/source/conventions.rst74
-rw-r--r--doc/source/daq_device_setup.rst246
-rw-r--r--doc/source/device_setup.rst407
-rw-r--r--doc/source/execution_model.rst115
-rw-r--r--doc/source/index.rst138
-rw-r--r--doc/source/installation.rst144
-rw-r--r--doc/source/instrumentation_method_map.rst73
-rw-r--r--doc/source/instrumentation_method_map.template17
-rw-r--r--doc/source/invocation.rst135
-rw-r--r--doc/source/quickstart.rst162
-rw-r--r--doc/source/resources.rst45
-rw-r--r--doc/source/revent.rst97
-rw-r--r--doc/source/wa-execution.pngbin0 -> 104977 bytes
-rw-r--r--doc/source/writing_extensions.rst956
-rw-r--r--extras/README12
-rw-r--r--extras/pylintrc70
-rw-r--r--extras/walog.vim21
-rw-r--r--scripts/create_workload17
-rw-r--r--scripts/list_extensions16
-rw-r--r--scripts/run_workloads17
-rw-r--r--scripts/wa17
-rw-r--r--setup.py96
-rw-r--r--wlauto/__init__.py36
-rw-r--r--wlauto/agenda-example-biglittle.yaml79
-rw-r--r--wlauto/agenda-example-tutorial.yaml43
-rw-r--r--wlauto/commands/__init__.py16
-rw-r--r--wlauto/commands/create.py300
-rw-r--r--wlauto/commands/list.py59
-rw-r--r--wlauto/commands/run.py87
-rw-r--r--wlauto/commands/show.py101
-rw-r--r--wlauto/commands/templates/UiAutomation.java25
-rw-r--r--wlauto/commands/templates/android_benchmark27
-rw-r--r--wlauto/commands/templates/android_uiauto_benchmark24
-rw-r--r--wlauto/commands/templates/basic_workload28
-rw-r--r--wlauto/commands/templates/setup.template102
-rw-r--r--wlauto/commands/templates/uiauto_workload35
-rw-r--r--wlauto/common/__init__.py16
-rw-r--r--wlauto/common/android/BaseUiAutomation.classbin0 -> 4182 bytes
-rw-r--r--wlauto/common/android/__init__.py16
-rw-r--r--wlauto/common/android/device.py678
-rw-r--r--wlauto/common/android/resources.py36
-rw-r--r--wlauto/common/android/workload.py425
-rwxr-xr-xwlauto/common/bin/arm64/busyboxbin0 -> 1914688 bytes
-rwxr-xr-xwlauto/common/bin/arm64/reventbin0 -> 708977 bytes
-rwxr-xr-xwlauto/common/bin/armeabi/busyboxbin0 -> 1397424 bytes
-rwxr-xr-xwlauto/common/bin/armeabi/reventbin0 -> 526128 bytes
-rw-r--r--wlauto/common/linux/__init__.py16
-rw-r--r--wlauto/common/linux/device.py966
-rw-r--r--wlauto/common/resources.py64
-rw-r--r--wlauto/config_example.py284
-rw-r--r--wlauto/core/__init__.py16
-rw-r--r--wlauto/core/agenda.py244
-rw-r--r--wlauto/core/bootstrap.py195
-rw-r--r--wlauto/core/command.py67
-rw-r--r--wlauto/core/configuration.py756
-rw-r--r--wlauto/core/device.py418
-rw-r--r--wlauto/core/entry_point.py75
-rw-r--r--wlauto/core/execution.py798
-rw-r--r--wlauto/core/extension.py652
-rw-r--r--wlauto/core/extension_loader.py400
-rw-r--r--wlauto/core/exttype.py35
-rw-r--r--wlauto/core/instrumentation.py374
-rw-r--r--wlauto/core/resolver.py109
-rw-r--r--wlauto/core/resource.py182
-rw-r--r--wlauto/core/result.py321
-rw-r--r--wlauto/core/signal.py189
-rw-r--r--wlauto/core/version.py26
-rw-r--r--wlauto/core/workload.py94
-rw-r--r--wlauto/devices/__init__.py16
-rw-r--r--wlauto/devices/android/__init__.py16
-rw-r--r--wlauto/devices/android/generic/__init__.py37
-rw-r--r--wlauto/devices/android/juno/__init__.py173
-rw-r--r--wlauto/devices/android/nexus10/__init__.py48
-rw-r--r--wlauto/devices/android/nexus5/__init__.py40
-rw-r--r--wlauto/devices/android/note3/__init__.py76
-rw-r--r--wlauto/devices/android/odroidxu3/__init__.py38
-rw-r--r--wlauto/devices/android/tc2/__init__.py847
-rw-r--r--wlauto/devices/android/tc2/resources/board_template.txt96
-rw-r--r--wlauto/devices/android/tc2/resources/images_iks.txt25
-rw-r--r--wlauto/devices/android/tc2/resources/images_mp.txt55
-rw-r--r--wlauto/devices/linux/__init__.py16
-rw-r--r--wlauto/devices/linux/generic/__init__.py37
-rw-r--r--wlauto/devices/linux/odroidxu3_linux/__init__.py35
-rw-r--r--wlauto/exceptions.py143
-rw-r--r--wlauto/external/README74
-rwxr-xr-xwlauto/external/bbench_server/build.sh31
-rw-r--r--wlauto/external/bbench_server/jni/Android.mk9
-rwxr-xr-xwlauto/external/bbench_server/jni/bbench_server.cpp151
-rw-r--r--wlauto/external/daq_server/daqpower-1.0.1.tar.gzbin0 -> 12964 bytes
-rw-r--r--wlauto/external/daq_server/src/MANIFEST.in0
-rw-r--r--wlauto/external/daq_server/src/README0
-rwxr-xr-xwlauto/external/daq_server/src/build.sh25
-rw-r--r--wlauto/external/daq_server/src/daqpower/__init__.py17
-rw-r--r--wlauto/external/daq_server/src/daqpower/client.py380
-rw-r--r--wlauto/external/daq_server/src/daqpower/common.py99
-rw-r--r--wlauto/external/daq_server/src/daqpower/config.py154
-rw-r--r--wlauto/external/daq_server/src/daqpower/daq.py265
-rw-r--r--wlauto/external/daq_server/src/daqpower/log.py53
-rw-r--r--wlauto/external/daq_server/src/daqpower/server.py480
-rw-r--r--wlauto/external/daq_server/src/scripts/run-daq-server3
-rw-r--r--wlauto/external/daq_server/src/scripts/send-daq-command3
-rw-r--r--wlauto/external/daq_server/src/setup.py52
-rw-r--r--wlauto/external/louie/LICENSE12
-rw-r--r--wlauto/external/louie/__init__.py46
-rw-r--r--wlauto/external/louie/dispatcher.py591
-rw-r--r--wlauto/external/louie/error.py22
-rw-r--r--wlauto/external/louie/plugin.py108
-rw-r--r--wlauto/external/louie/prioritylist.py128
-rw-r--r--wlauto/external/louie/robustapply.py58
-rw-r--r--wlauto/external/louie/saferef.py179
-rw-r--r--wlauto/external/louie/sender.py39
-rw-r--r--wlauto/external/louie/signal.py30
-rw-r--r--wlauto/external/louie/test/__init__.py0
-rw-r--r--wlauto/external/louie/test/conftest.py5
-rw-r--r--wlauto/external/louie/test/fixture.py0
-rw-r--r--wlauto/external/louie/test/test_dispatcher.py154
-rw-r--r--wlauto/external/louie/test/test_plugin.py145
-rw-r--r--wlauto/external/louie/test/test_prioritydispatcher.py41
-rw-r--r--wlauto/external/louie/test/test_prioritylist.py62
-rw-r--r--wlauto/external/louie/test/test_robustapply.py34
-rw-r--r--wlauto/external/louie/test/test_saferef.py83
-rw-r--r--wlauto/external/louie/version.py8
-rwxr-xr-xwlauto/external/pmu_logger/Makefile7
-rwxr-xr-xwlauto/external/pmu_logger/README35
-rwxr-xr-xwlauto/external/pmu_logger/pmu_logger.c294
-rw-r--r--wlauto/external/pmu_logger/pmu_logger.kobin0 -> 7821 bytes
-rw-r--r--wlauto/external/readenergy/Makefile11
-rwxr-xr-xwlauto/external/readenergy/readenergybin0 -> 695696 bytes
-rw-r--r--wlauto/external/readenergy/readenergy.c345
-rw-r--r--wlauto/external/revent/Makefile12
-rw-r--r--wlauto/external/revent/revent.c598
-rw-r--r--wlauto/external/terminalsize.py92
-rwxr-xr-xwlauto/external/uiauto/build.sh21
-rw-r--r--wlauto/external/uiauto/build.xml92
-rw-r--r--wlauto/external/uiauto/project.properties14
-rw-r--r--wlauto/external/uiauto/src/com/arm/wlauto/uiauto/BaseUiAutomation.java113
-rw-r--r--wlauto/instrumentation/__init__.py27
-rw-r--r--wlauto/instrumentation/coreutil/__init__.py278
-rw-r--r--wlauto/instrumentation/daq/__init__.py221
-rw-r--r--wlauto/instrumentation/delay/__init__.py181
-rw-r--r--wlauto/instrumentation/dmesg/__init__.py62
-rw-r--r--wlauto/instrumentation/energy_probe/__init__.py145
-rw-r--r--wlauto/instrumentation/fps/__init__.py298
-rw-r--r--wlauto/instrumentation/hwmon/__init__.py120
-rw-r--r--wlauto/instrumentation/juno_energy/__init__.py77
-rwxr-xr-xwlauto/instrumentation/juno_energy/readenergybin0 -> 695696 bytes
-rw-r--r--wlauto/instrumentation/misc/__init__.py365
-rw-r--r--wlauto/instrumentation/perf/LICENSE9
-rw-r--r--wlauto/instrumentation/perf/__init__.py176
-rwxr-xr-xwlauto/instrumentation/perf/bin/arm64/perfbin0 -> 6149310 bytes
-rwxr-xr-xwlauto/instrumentation/perf/bin/armeabi/perfbin0 -> 4964116 bytes
-rw-r--r--wlauto/instrumentation/pmu_logger/__init__.py148
-rw-r--r--wlauto/instrumentation/streamline/__init__.py298
-rw-r--r--wlauto/instrumentation/trace_cmd/LICENSE39
-rw-r--r--wlauto/instrumentation/trace_cmd/__init__.py322
-rwxr-xr-xwlauto/instrumentation/trace_cmd/bin/arm64/trace-cmdbin0 -> 1475074 bytes
-rwxr-xr-xwlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmdbin0 -> 1170276 bytes
-rw-r--r--wlauto/modules/__init__.py16
-rw-r--r--wlauto/modules/active_cooling.py64
-rw-r--r--wlauto/modules/flashing.py253
-rw-r--r--wlauto/modules/reset.py52
-rw-r--r--wlauto/resource_getters/__init__.py16
-rw-r--r--wlauto/resource_getters/standard.py350
-rw-r--r--wlauto/result_processors/__init__.py16
-rw-r--r--wlauto/result_processors/dvfs.py375
-rw-r--r--wlauto/result_processors/mongodb.py235
-rw-r--r--wlauto/result_processors/sqlite.py183
-rw-r--r--wlauto/result_processors/standard.py124
-rw-r--r--wlauto/result_processors/status.py51
-rw-r--r--wlauto/result_processors/syeg.py150
-rw-r--r--wlauto/tests/README12
-rw-r--r--wlauto/tests/__init__.py16
-rw-r--r--wlauto/tests/data/extensions/devices/test_device.py49
-rwxr-xr-xwlauto/tests/data/interrupts/after98
-rwxr-xr-xwlauto/tests/data/interrupts/before97
-rwxr-xr-xwlauto/tests/data/interrupts/result98
-rw-r--r--wlauto/tests/data/logcat.2.log14
-rw-r--r--wlauto/tests/data/logcat.log10
-rw-r--r--wlauto/tests/data/test-agenda.yaml25
-rw-r--r--wlauto/tests/data/test-config.py17
-rw-r--r--wlauto/tests/test_agenda.py195
-rw-r--r--wlauto/tests/test_config.py151
-rw-r--r--wlauto/tests/test_device.py99
-rw-r--r--wlauto/tests/test_diff.py44
-rw-r--r--wlauto/tests/test_execution.py1035
-rw-r--r--wlauto/tests/test_extension.py286
-rw-r--r--wlauto/tests/test_extension_loader.py51
-rw-r--r--wlauto/tests/test_instrumentation.py226
-rw-r--r--wlauto/tests/test_results_manager.py130
-rw-r--r--wlauto/tests/test_utils.py63
-rw-r--r--wlauto/tools/__init__.py16
-rw-r--r--wlauto/tools/extdoc.py134
-rw-r--r--wlauto/utils/__init__.py16
-rw-r--r--wlauto/utils/android.py368
-rw-r--r--wlauto/utils/cli.py27
-rw-r--r--wlauto/utils/cpuinfo.py44
-rw-r--r--wlauto/utils/doc.py305
-rw-r--r--wlauto/utils/formatter.py148
-rw-r--r--wlauto/utils/hwmon.py77
-rw-r--r--wlauto/utils/log.py223
-rw-r--r--wlauto/utils/misc.py703
-rw-r--r--wlauto/utils/netio.py98
-rw-r--r--wlauto/utils/serial_port.py111
-rw-r--r--wlauto/utils/ssh.py198
-rw-r--r--wlauto/utils/types.py176
-rw-r--r--wlauto/utils/uefi.py214
-rw-r--r--wlauto/workloads/__init__.py16
-rw-r--r--wlauto/workloads/andebench/__init__.py88
-rw-r--r--wlauto/workloads/andebench/com.arm.wlauto.uiauto.andebench.jarbin0 -> 3666 bytes
-rwxr-xr-xwlauto/workloads/andebench/uiauto/build.sh29
-rw-r--r--wlauto/workloads/andebench/uiauto/build.xml92
-rw-r--r--wlauto/workloads/andebench/uiauto/project.properties14
-rw-r--r--wlauto/workloads/andebench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java108
-rw-r--r--wlauto/workloads/angrybirds/__init__.py30
-rw-r--r--wlauto/workloads/angrybirds/angrybirds_classic.reventbin0 -> 37647 bytes
-rw-r--r--wlauto/workloads/angrybirds/revent_files/.empty0
-rw-r--r--wlauto/workloads/angrybirds_rio/__init__.py30
-rw-r--r--wlauto/workloads/angrybirds_rio/revent_files/.empty0
-rw-r--r--wlauto/workloads/angrybirds_rio/revent_files/Nexus10.run.reventbin0 -> 25208 bytes
-rw-r--r--wlauto/workloads/angrybirds_rio/revent_files/Nexus10.setup.reventbin0 -> 2088 bytes
-rw-r--r--wlauto/workloads/anomaly2/__init__.py63
-rw-r--r--wlauto/workloads/antutu/__init__.py136
-rw-r--r--wlauto/workloads/antutu/com.arm.wlauto.uiauto.antutu.jarbin0 -> 5442 bytes
-rwxr-xr-xwlauto/workloads/antutu/uiauto/build.sh28
-rw-r--r--wlauto/workloads/antutu/uiauto/build.xml92
-rw-r--r--wlauto/workloads/antutu/uiauto/project.properties14
-rw-r--r--wlauto/workloads/antutu/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java295
-rw-r--r--wlauto/workloads/applaunch/__init__.py169
-rw-r--r--wlauto/workloads/applaunch/device_script.template69
-rw-r--r--wlauto/workloads/audio/__init__.py102
-rw-r--r--wlauto/workloads/bbench/__init__.py231
-rwxr-xr-xwlauto/workloads/bbench/bin/arm64/bbench_serverbin0 -> 570819 bytes
-rwxr-xr-xwlauto/workloads/bbench/bin/armeabi/bbench_serverbin0 -> 570819 bytes
-rw-r--r--wlauto/workloads/bbench/patches/bbench.js177
-rw-r--r--wlauto/workloads/bbench/patches/index_noinput.html56
-rw-r--r--wlauto/workloads/bbench/patches/results.html158
-rw-r--r--wlauto/workloads/benchmarkpi/__init__.py63
-rw-r--r--wlauto/workloads/benchmarkpi/com.arm.wlauto.uiauto.benchmarkpi.jarbin0 -> 3079 bytes
-rwxr-xr-xwlauto/workloads/benchmarkpi/uiauto/build.sh28
-rw-r--r--wlauto/workloads/benchmarkpi/uiauto/build.xml92
-rw-r--r--wlauto/workloads/benchmarkpi/uiauto/project.properties14
-rw-r--r--wlauto/workloads/benchmarkpi/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java62
-rw-r--r--wlauto/workloads/caffeinemark/__init__.py68
-rw-r--r--wlauto/workloads/caffeinemark/com.arm.wlauto.uiauto.caffeinemark.jarbin0 -> 3569 bytes
-rwxr-xr-xwlauto/workloads/caffeinemark/uiauto/build.sh28
-rw-r--r--wlauto/workloads/caffeinemark/uiauto/build.xml92
-rw-r--r--wlauto/workloads/caffeinemark/uiauto/project.properties14
-rw-r--r--wlauto/workloads/caffeinemark/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java85
-rw-r--r--wlauto/workloads/cameracapture/__init__.py51
-rw-r--r--wlauto/workloads/cameracapture/com.arm.wlauto.uiauto.cameracapture.jarbin0 -> 3125 bytes
-rwxr-xr-xwlauto/workloads/cameracapture/uiauto/build.sh28
-rw-r--r--wlauto/workloads/cameracapture/uiauto/build.xml92
-rw-r--r--wlauto/workloads/cameracapture/uiauto/project.properties14
-rw-r--r--wlauto/workloads/cameracapture/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java68
-rw-r--r--wlauto/workloads/camerarecord/__init__.py47
-rw-r--r--wlauto/workloads/camerarecord/com.arm.wlauto.uiauto.camerarecord.jarbin0 -> 3053 bytes
-rwxr-xr-xwlauto/workloads/camerarecord/uiauto/build.sh28
-rw-r--r--wlauto/workloads/camerarecord/uiauto/build.xml92
-rw-r--r--wlauto/workloads/camerarecord/uiauto/project.properties14
-rw-r--r--wlauto/workloads/camerarecord/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java65
-rw-r--r--wlauto/workloads/castlebuilder/__init__.py28
-rw-r--r--wlauto/workloads/castlebuilder/revent_files/.empty0
-rw-r--r--wlauto/workloads/castlebuilder/revent_files/Nexus10.run.reventbin0 -> 32768 bytes
-rw-r--r--wlauto/workloads/castlebuilder/revent_files/Nexus10.setup.reventbin0 -> 1088 bytes
-rw-r--r--wlauto/workloads/castlemaster/__init__.py30
-rw-r--r--wlauto/workloads/castlemaster/revent_files/.empty0
-rw-r--r--wlauto/workloads/castlemaster/revent_files/Nexus10.run.reventbin0 -> 28348 bytes
-rw-r--r--wlauto/workloads/castlemaster/revent_files/Nexus10.setup.reventbin0 -> 3448 bytes
-rw-r--r--wlauto/workloads/cfbench/__init__.py72
-rw-r--r--wlauto/workloads/cfbench/com.arm.wlauto.uiauto.cfbench.jarbin0 -> 2445 bytes
-rwxr-xr-xwlauto/workloads/cfbench/uiauto/build.sh28
-rw-r--r--wlauto/workloads/cfbench/uiauto/build.xml92
-rw-r--r--wlauto/workloads/cfbench/uiauto/project.properties14
-rw-r--r--wlauto/workloads/cfbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java63
-rw-r--r--wlauto/workloads/citadel/__init__.py44
-rw-r--r--wlauto/workloads/citadel/revent_files/.empty0
-rw-r--r--wlauto/workloads/citadel/revent_files/Nexus10.run.reventbin0 -> 608 bytes
-rw-r--r--wlauto/workloads/citadel/revent_files/Nexus10.setup.reventbin0 -> 6068 bytes
-rw-r--r--wlauto/workloads/cyclictest/LICENSE8
-rw-r--r--wlauto/workloads/cyclictest/__init__.py141
-rwxr-xr-xwlauto/workloads/cyclictest/bin/arm64/cyclictestbin0 -> 810676 bytes
-rwxr-xr-xwlauto/workloads/cyclictest/bin/armeabi/cyclictestbin0 -> 610188 bytes
-rw-r--r--wlauto/workloads/dex2oat/__init__.py121
-rw-r--r--wlauto/workloads/dhrystone/__init__.py109
-rwxr-xr-xwlauto/workloads/dhrystone/dhrystonebin0 -> 523482 bytes
-rwxr-xr-xwlauto/workloads/dhrystone/src/build.sh23
-rw-r--r--wlauto/workloads/dhrystone/src/jni/Android.mk11
-rw-r--r--wlauto/workloads/dhrystone/src/jni/dhrystone.c959
-rw-r--r--wlauto/workloads/dungeondefenders/__init__.py34
-rw-r--r--wlauto/workloads/dungeondefenders/revent_files/Nexus10.run.reventbin0 -> 301908 bytes
-rw-r--r--wlauto/workloads/dungeondefenders/revent_files/Nexus10.setup.reventbin0 -> 52148 bytes
-rw-r--r--wlauto/workloads/facebook/__init__.py82
-rw-r--r--wlauto/workloads/facebook/com.arm.wlauto.uiauto.facebook.jarbin0 -> 3755 bytes
-rwxr-xr-xwlauto/workloads/facebook/uiauto/build.sh27
-rw-r--r--wlauto/workloads/facebook/uiauto/build.xml92
-rw-r--r--wlauto/workloads/facebook/uiauto/project.properties14
-rw-r--r--wlauto/workloads/facebook/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java257
-rw-r--r--wlauto/workloads/geekbench/__init__.py351
-rw-r--r--wlauto/workloads/geekbench/com.arm.wlauto.uiauto.geekbench.jarbin0 -> 3523 bytes
-rwxr-xr-xwlauto/workloads/geekbench/uiauto/build.sh28
-rw-r--r--wlauto/workloads/geekbench/uiauto/build.xml92
-rw-r--r--wlauto/workloads/geekbench/uiauto/project.properties14
-rw-r--r--wlauto/workloads/geekbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java121
-rw-r--r--wlauto/workloads/glbcorp/__init__.py209
-rw-r--r--wlauto/workloads/glbenchmark/__init__.py158
-rw-r--r--wlauto/workloads/glbenchmark/com.arm.wlauto.uiauto.glb.jarbin0 -> 4629 bytes
-rwxr-xr-xwlauto/workloads/glbenchmark/uiauto/build.sh28
-rw-r--r--wlauto/workloads/glbenchmark/uiauto/build.xml92
-rw-r--r--wlauto/workloads/glbenchmark/uiauto/project.properties14
-rw-r--r--wlauto/workloads/glbenchmark/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java164
-rw-r--r--wlauto/workloads/gunbros2/__init__.py42
-rw-r--r--wlauto/workloads/homescreen/__init__.py42
-rw-r--r--wlauto/workloads/idle/__init__.py56
-rw-r--r--wlauto/workloads/ironman/__init__.py35
-rw-r--r--wlauto/workloads/ironman/revent_files/Nexus10.run.reventbin0 -> 1387568 bytes
-rw-r--r--wlauto/workloads/ironman/revent_files/Nexus10.setup.reventbin0 -> 3528 bytes
-rw-r--r--wlauto/workloads/krazykart/__init__.py28
-rw-r--r--wlauto/workloads/krazykart/revent_files/.empty0
-rw-r--r--wlauto/workloads/linpack/__init__.py64
-rw-r--r--wlauto/workloads/linpack/com.arm.wlauto.uiauto.linpack.jarbin0 -> 3138 bytes
-rwxr-xr-xwlauto/workloads/linpack/uiauto/build.sh28
-rw-r--r--wlauto/workloads/linpack/uiauto/build.xml92
-rw-r--r--wlauto/workloads/linpack/uiauto/project.properties14
-rw-r--r--wlauto/workloads/linpack/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java59
-rw-r--r--wlauto/workloads/manual/__init__.py105
-rw-r--r--wlauto/workloads/memcpy/__init__.py76
-rwxr-xr-xwlauto/workloads/memcpy/memcpybin0 -> 456813 bytes
-rwxr-xr-xwlauto/workloads/memcpy/src/build.sh21
-rw-r--r--wlauto/workloads/memcpy/src/jni/Android.mk11
-rw-r--r--wlauto/workloads/memcpy/src/jni/memcopy.c114
-rw-r--r--wlauto/workloads/nenamark/__init__.py58
-rw-r--r--wlauto/workloads/peacekeeper/__init__.py129
-rw-r--r--wlauto/workloads/peacekeeper/com.arm.wlauto.uiauto.peacekeeper.jarbin0 -> 3479 bytes
-rwxr-xr-xwlauto/workloads/peacekeeper/uiauto/build.sh27
-rw-r--r--wlauto/workloads/peacekeeper/uiauto/build.xml92
-rw-r--r--wlauto/workloads/peacekeeper/uiauto/project.properties14
-rw-r--r--wlauto/workloads/peacekeeper/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java115
-rw-r--r--wlauto/workloads/quadrant/__init__.py112
-rw-r--r--wlauto/workloads/quadrant/com.arm.wlauto.uiauto.quadrant.jarbin0 -> 3661 bytes
-rwxr-xr-xwlauto/workloads/quadrant/uiauto/build.sh28
-rw-r--r--wlauto/workloads/quadrant/uiauto/build.xml92
-rw-r--r--wlauto/workloads/quadrant/uiauto/project.properties14
-rw-r--r--wlauto/workloads/quadrant/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java120
-rw-r--r--wlauto/workloads/real_linpack/__init__.py66
-rw-r--r--wlauto/workloads/real_linpack/com.arm.wlauto.uiauto.reallinpack.jarbin0 -> 2951 bytes
-rwxr-xr-xwlauto/workloads/real_linpack/uiauto/build.sh28
-rw-r--r--wlauto/workloads/real_linpack/uiauto/build.xml92
-rw-r--r--wlauto/workloads/real_linpack/uiauto/project.properties14
-rw-r--r--wlauto/workloads/real_linpack/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java51
-rw-r--r--wlauto/workloads/realracing3/__init__.py35
-rw-r--r--wlauto/workloads/shellscript/__init__.py65
-rw-r--r--wlauto/workloads/skypevideo/__init__.py130
-rw-r--r--wlauto/workloads/skypevideo/com.arm.wlauto.uiauto.skypevideo.jarbin0 -> 3210 bytes
-rwxr-xr-xwlauto/workloads/skypevideo/uiauto/build.sh28
-rw-r--r--wlauto/workloads/skypevideo/uiauto/build.xml92
-rw-r--r--wlauto/workloads/skypevideo/uiauto/project.properties14
-rw-r--r--wlauto/workloads/skypevideo/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java72
-rw-r--r--wlauto/workloads/smartbench/__init__.py59
-rw-r--r--wlauto/workloads/smartbench/com.arm.wlauto.uiauto.smartbench.jarbin0 -> 2365 bytes
-rwxr-xr-xwlauto/workloads/smartbench/uiauto/build.sh28
-rw-r--r--wlauto/workloads/smartbench/uiauto/build.xml92
-rw-r--r--wlauto/workloads/smartbench/uiauto/project.properties14
-rw-r--r--wlauto/workloads/smartbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java62
-rw-r--r--wlauto/workloads/spec2000/__init__.py356
-rw-r--r--wlauto/workloads/sqlite/__init__.py48
-rw-r--r--wlauto/workloads/sqlite/com.arm.wlauto.uiauto.sqlite.jarbin0 -> 3488 bytes
-rwxr-xr-xwlauto/workloads/sqlite/uiauto/build.sh28
-rw-r--r--wlauto/workloads/sqlite/uiauto/build.xml92
-rw-r--r--wlauto/workloads/sqlite/uiauto/project.properties14
-rw-r--r--wlauto/workloads/sqlite/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java103
-rw-r--r--wlauto/workloads/sysbench/__init__.py111
-rw-r--r--wlauto/workloads/sysbench/sysbenchbin0 -> 76648 bytes
-rw-r--r--wlauto/workloads/templerun/__init__.py29
-rw-r--r--wlauto/workloads/templerun/revent_files/.empty0
-rw-r--r--wlauto/workloads/templerun/revent_files/Nexus10.run.reventbin0 -> 36864 bytes
-rw-r--r--wlauto/workloads/templerun/revent_files/Nexus10.setup.reventbin0 -> 88 bytes
-rwxr-xr-xwlauto/workloads/thechase/__init__.py46
-rw-r--r--wlauto/workloads/truckerparking3d/__init__.py29
-rw-r--r--wlauto/workloads/vellamo/__init__.py215
-rw-r--r--wlauto/workloads/vellamo/com.arm.wlauto.uiauto.vellamo.jarbin0 -> 5779 bytes
-rwxr-xr-xwlauto/workloads/vellamo/uiauto/build.sh28
-rw-r--r--wlauto/workloads/vellamo/uiauto/build.xml92
-rw-r--r--wlauto/workloads/vellamo/uiauto/project.properties14
-rw-r--r--wlauto/workloads/vellamo/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java260
-rw-r--r--wlauto/workloads/video/__init__.py137
-rw-r--r--wlauto/workloads/videostreaming/__init__.py73
-rw-r--r--wlauto/workloads/videostreaming/com.arm.wlauto.uiauto.videostreaming.jarbin0 -> 4334 bytes
-rwxr-xr-xwlauto/workloads/videostreaming/uiauto/build.sh28
-rw-r--r--wlauto/workloads/videostreaming/uiauto/build.xml92
-rw-r--r--wlauto/workloads/videostreaming/uiauto/project.properties14
-rw-r--r--wlauto/workloads/videostreaming/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java155
412 files changed, 41401 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100755
index 00000000..0f568f55
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,30 @@
+*.egg-info
+*.pyc
+*.bak
+*.o
+*.cmd
+Module.symvers
+modules.order
+*~
+tags
+build/
+dist/
+.ropeproject/
+wa_output/
+doc/source/api/
+doc/source/extensions/
+MANIFEST
+wlauto/external/uiautomator/bin/
+wlauto/external/uiautomator/*.properties
+wlauto/external/uiautomator/build.xml
+*.orig
+local.properties
+wlauto/external/revent/libs/
+wlauto/external/revent/obj/
+wlauto/external/bbench_server/libs/
+wlauto/external/bbench_server/obj/
+pmu_logger.mod.c
+.tmp_versions
+obj/
+libs/armeabi
+wlauto/workloads/*/uiauto/bin/
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 00000000..9790e788
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,2 @@
+recursive-include scripts *
+recursive-include doc *
diff --git a/README.rst b/README.rst
new file mode 100644
index 00000000..a48285fc
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,73 @@
+Workload Automation
++++++++++++++++++++
+
+Workload Automation (WA) is a framework for executing workloads and collecting
+measurements on Android and Linux devices. WA includes automation for nearly 50
+workloads (mostly Android), some common instrumentation (ftrace, ARM
+Streamline, hwmon). A number of output formats are supported.
+
+Workload Automation is designed primarily as a developer tool/framework to
+facilitate data driven development by providing a method of collecting
+measurements from a device in a repeatable way.
+
+Workload Automation is highly extensible. Most of the concrete functionality is
+implemented via plug-ins, and it is easy to write new plug-ins to support new
+device types, workloads, instrumentation or output processing.
+
+
+Requirements
+============
+
+- Python 2.7
+- Linux (should work on other Unixes, but untested)
+- Latest Android SDK (ANDROID_HOME must be set) for Android devices, or
+- SSH for Linux devices
+
+
+Installation
+============
+
+To install::
+
+ python setup.py sdist
+ sudo pip install dist/wlauto-*.tar.gz
+
+Please refer to the `installation section <./doc/source/installation.rst>`_
+in the documentation for more details.
+
+
+Basic Usage
+===========
+
+Please see the `Quickstart <./doc/source/quickstart.rst>`_ section of the
+documentation.
+
+
+Documentation
+=============
+
+Documentation in reStructuredText format may be found under ``doc/source``. To
+compile it into cross-linked HTML, make sure you have `Sphinx
+<http://sphinx-doc.org/install.html>`_ installed, and then ::
+
+ cd doc
+ make html
+
+
+License
+=======
+
+Workload Automation is distributed under `Apache v2.0 License
+<http://www.apache.org/licenses/LICENSE-2.0>`_. Workload automation includes
+binaries distributed under differnt licenses (see LICENSE files in specfic
+directories).
+
+
+Feedback, Contrubutions and Support
+===================================
+
+- Please use the GitHub Issue Tracker associated with this repository for
+ feedback.
+- ARM licensees may contact ARM directly via their partner managers.
+- We welcome code contributions via GitHub Pull requests. Please see
+ "Contributing Code" section of the documentation for details.
diff --git a/dev_scripts/README b/dev_scripts/README
new file mode 100644
index 00000000..6ea0e95c
--- /dev/null
+++ b/dev_scripts/README
@@ -0,0 +1,23 @@
+This directory contains scripts that aid the development of Workload Automation.
+They were written to work as part of WA development environment and are not
+guarnteed to work if moved outside their current location. They should not be
+distributed as part of WA releases.
+
+Scripts
+-------
+
+:clean_install: Performs a clean install of WA from source. This will remove any
+ existing WA install (regardless of whether it was made from
+ source or through a tarball with pip).
+
+:clear_env: Clears ~/.workload_automation.
+
+:get_apk_versions: Prints out a table of APKs and their versons found under the
+ path specified as the argument.
+
+:pep8: Runs pep8 code checker (must be installed) over wlauto with the correct
+ settings for WA.
+
+:pylint: Runs pylint (must be installed) over wlauto with the correct settings
+ for WA.
+
diff --git a/dev_scripts/clean_install b/dev_scripts/clean_install
new file mode 100755
index 00000000..7d647e5a
--- /dev/null
+++ b/dev_scripts/clean_install
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+import os
+import sys
+import shutil
+import logging
+
+
+logging.basicConfig(level=logging.INFO)
+
+
+def get_installed_path():
+ paths = [p for p in sys.path if len(p) > 2]
+ for path in paths:
+ candidate = os.path.join(path, 'wlauto')
+ if os.path.isdir(candidate):
+ return candidate
+
+
+if __name__ == '__main__':
+ installed_path = get_installed_path()
+ if installed_path:
+ logging.info('Removing installed package from {}.'.format(installed_path))
+ shutil.rmtree(installed_path)
+ if os.path.isdir('build'):
+ logging.info('Removing local build directory.')
+ shutil.rmtree('build')
+ logging.info('Removing *.pyc files.')
+ for root, dirs, files in os.walk('wlauto'):
+ for file in files:
+ if file.lower().endswith('.pyc'):
+ os.remove(os.path.join(root, file))
+
+ os.system('python setup.py install')
+
diff --git a/dev_scripts/clear_env b/dev_scripts/clear_env
new file mode 100755
index 00000000..ff720d75
--- /dev/null
+++ b/dev_scripts/clear_env
@@ -0,0 +1,3 @@
+#!/bin/bash
+# Clear workload automation user environment.
+rm -rf ~/.workload_automation/
diff --git a/dev_scripts/get_apk_versions b/dev_scripts/get_apk_versions
new file mode 100755
index 00000000..39b08772
--- /dev/null
+++ b/dev_scripts/get_apk_versions
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+import os
+import sys
+import argparse
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+
+from wlauto.exceptions import WAError
+from wlauto.utils.misc import write_table
+from distmanagement.apk import get_aapt_path, get_apk_versions
+
+
+if __name__ == '__main__':
+ try:
+ aapt = get_aapt_path()
+ parser = argparse.ArgumentParser()
+ parser.add_argument('path', metavar='PATH', help='Location to look for APKs.')
+ args = parser.parse_args()
+
+ versions = get_apk_versions(args.path, aapt)
+ write_table([v.to_tuple() for v in versions], sys.stdout,
+ align='<<<>>', headers=['path', 'package', 'name', 'version code', 'version name'])
+ except WAError, e:
+ logging.error(e)
+ sys.exit(1)
diff --git a/dev_scripts/pep8 b/dev_scripts/pep8
new file mode 100755
index 00000000..25507f38
--- /dev/null
+++ b/dev_scripts/pep8
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+EXCLUDE=wlauto/external/,wlauto/tests
+EXCLUDE_COMMA=wlauto/core/bootstrap.py,wlauto/workloads/geekbench/__init__.py
+IGNORE=E501,E265,E266,W391
+
+if ! hash pep8 2>/dev/null; then
+ echo "pep8 not found in PATH"
+ echo "you can install it with \"sudo pip install pep8\""
+ exit 1
+fi
+
+if [[ "$1" == "" ]]; then
+ THIS_DIR="`dirname \"$0\"`"
+ pushd $THIS_DIR/.. > /dev/null
+ pep8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE wlauto
+ pep8 --exclude=$EXCLUDE --ignore=$IGNORE,E241 $(echo "$EXCLUDE_COMMA" | sed 's/,/ /g')
+ popd > /dev/null
+else
+ pep8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE $1
+fi
+
diff --git a/dev_scripts/pylint b/dev_scripts/pylint
new file mode 100755
index 00000000..487d3c95
--- /dev/null
+++ b/dev_scripts/pylint
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+target=$1
+
+compare_versions() {
+ if [[ $1 == $2 ]]; then
+ return 0
+ fi
+
+ local IFS=.
+ local i ver1=($1) ver2=($2)
+
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
+
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]; then
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]})); then
+ return 1
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]})); then
+ return 2
+ fi
+ done
+
+ return 0
+}
+
+pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print version')
+compare_versions $pylint_version "1.3.0"
+result=$?
+if [ "$result" == "2" ]; then
+ echo "ERROR: pylint version must be at least 1.3.0; found $pylint_version"
+ exit 1
+fi
+
+THIS_DIR="`dirname \"$0\"`"
+if [[ "$target" == "" ]]; then
+ pushd $THIS_DIR/.. > /dev/null
+ pylint --rcfile extras/pylintrc wlauto
+ popd > /dev/null
+else
+ pylint --rcfile $THIS_DIR/../extras/pylintrc $target
+fi
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 00000000..64237c70
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,184 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = build
+
+SPHINXAPI = sphinx-apidoc
+SPHINXAPIOPTS =
+
+WAEXT = ./build_extension_docs.py
+WAEXTOPTS = source/extensions ../wlauto ../wlauto/external ../wlauto/tests
+
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+ALLSPHINXAPIOPTS = -f $(SPHINXAPIOPTS) -o source/api ../wlauto
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run documentation coverage checks"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+ rm -rf source/api/*
+ rm -rf source/extensions/*
+ rm -rf source/instrumentation_method_map.rst
+
+coverage:
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo
+ @echo "Build finished. The coverage reports are in $(BUILDDIR)/coverage."
+
+api: ../wlauto
+ rm -rf source/api/*
+ $(SPHINXAPI) $(ALLSPHINXAPIOPTS)
+
+waext: ../wlauto
+ rm -rf source/extensions
+ mkdir -p source/extensions
+ $(WAEXT) $(WAEXTOPTS)
+
+
+sigtab: ../wlauto/core/instrumentation.py source/instrumentation_method_map.template
+ rm -rf source/instrumentation_method_map.rst
+ ./build_instrumentation_method_map.py source/instrumentation_method_map.rst
+
+html: api waext sigtab
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml: api waext sigtab
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml: api waext sigtab
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle: api waext sigtab
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json: api waext sigtab
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp: api waext sigtab
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp: api waext sigtab
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/WorkloadAutomation2.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/WorkloadAutomation2.qhc"
+
+devhelp: api
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/WorkloadAutomation2"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/WorkloadAutomation2"
+ @echo "# devhelp"
+
+epub: api waext sigtab
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex: api waext sigtab
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf: api waext sigtab
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text: api waext sigtab
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man: api waext sigtab
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo: api waext sigtab
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info: api waext sigtab
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext: api waext sigtab
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes: api waext sigtab
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck: api waext sigtab
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest: api waext sigtab
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/build_extension_docs.py b/doc/build_extension_docs.py
new file mode 100755
index 00000000..bee61cdd
--- /dev/null
+++ b/doc/build_extension_docs.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import sys
+
+from wlauto import ExtensionLoader
+from wlauto.utils.doc import get_rst_from_extension, underline
+from wlauto.utils.misc import capitalize
+
+
+GENERATE_FOR = ['workload', 'instrument', 'result_processor', 'device']
+
+
+def generate_extension_documentation(source_dir, outdir, ignore_paths):
+ loader = ExtensionLoader(keep_going=True)
+ loader.clear()
+ loader.update(paths=[source_dir], ignore_paths=ignore_paths)
+ for ext_type in loader.extension_kinds:
+ if not ext_type in GENERATE_FOR:
+ continue
+ outfile = os.path.join(outdir, '{}s.rst'.format(ext_type))
+ with open(outfile, 'w') as wfh:
+ wfh.write('.. _{}s:\n\n'.format(ext_type))
+ wfh.write(underline(capitalize('{}s'.format(ext_type))))
+ exts = loader.list_extensions(ext_type)
+ for ext in sorted(exts, key=lambda x: x.name):
+ wfh.write(get_rst_from_extension(ext))
+
+
+if __name__ == '__main__':
+ generate_extension_documentation(sys.argv[2], sys.argv[1], sys.argv[3:])
diff --git a/doc/build_instrumentation_method_map.py b/doc/build_instrumentation_method_map.py
new file mode 100755
index 00000000..a9438c92
--- /dev/null
+++ b/doc/build_instrumentation_method_map.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# Copyright 2015-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import sys
+import string
+from copy import copy
+
+from wlauto.core.instrumentation import SIGNAL_MAP, PRIORITY_MAP
+from wlauto.utils.doc import format_simple_table
+
+
+CONVINIENCE_ALIASES = ['initialize', 'setup', 'start', 'stop', 'process_workload_result',
+ 'update_result', 'teardown', 'finalize']
+
+OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instrumentation_method_map.template')
+
+
+def escape_trailing_underscore(value):
+ if value.endswith('_'):
+ return value[:-1] + '\_'
+
+
+def generate_instrumentation_method_map(outfile):
+ signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()],
+ headers=['method name', 'signal'], align='<<')
+ priority_table = format_simple_table([(escape_trailing_underscore(k), v) for k, v in PRIORITY_MAP.iteritems()],
+ headers=['prefix', 'priority'], align='<>')
+ with open(OUTPUT_TEMPLATE_FILE) as fh:
+ template = string.Template(fh.read())
+ with open(outfile, 'w') as wfh:
+ wfh.write(template.substitute(signal_names=signal_table, priority_prefixes=priority_table))
+
+
+if __name__ == '__main__':
+ generate_instrumentation_method_map(sys.argv[1])
diff --git a/doc/source/_static/.gitignore b/doc/source/_static/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/doc/source/_static/.gitignore
diff --git a/doc/source/_templates/.gitignore b/doc/source/_templates/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/doc/source/_templates/.gitignore
diff --git a/doc/source/additional_topics.rst b/doc/source/additional_topics.rst
new file mode 100644
index 00000000..520b3170
--- /dev/null
+++ b/doc/source/additional_topics.rst
@@ -0,0 +1,101 @@
+Additional Topics
++++++++++++++++++
+
+Modules
+=======
+
+Modules are essentially plug-ins for Extensions. They provide a way of defining
+common and reusable functionality. An Extension can load zero or more modules
+during it's creation. Loaded modules will then add their capabilities (see
+Capabilities_) to those of the Extension. When calling code tries to access an
+attribute of an Extension the Extension doesn't have, it will try to find the
+attribute among it's loaded modules and will return that instead.
+
+.. note:: Modules are themselves extensions, and can therefore load their own
+ modules. *Do not* abuse this.
+
+For example, calling code may wish to reboot an unresponsive device by calling
+``device.hard_reset()``, but the ``Device`` in question does not have a
+``hard_reset`` method; however the ``Device`` has loaded ``netio_switch``
+module which allows to disable power supply over a network (say this device
+is in a rack and is powered through such a switch). The module has
+``reset_power`` capability (see Capabilities_ below) and so implements
+``hard_reset``. This will get invoked when ``device.hard_rest()`` is called.
+
+.. note:: Modules can only extend Extensions with new attributes; they cannot
+ override existing functionality. In the example above, if the
+ ``Device`` has implemented ``hard_reset()`` itself, then *that* will
+ get invoked irrespective of which modules it has loaded.
+
+If two loaded modules have the same capability or implement the same method,
+then the last module to be loaded "wins" and its method will be invoke,
+effectively overriding the module that was loaded previously.
+
+Specifying Modules
+------------------
+
+Modules get loaded when an Extension is instantiated by the extension loader.
+There are two ways to specify which modules should be loaded for a device.
+
+
+Capabilities
+============
+
+Capabilities define the functionality that is implemented by an Extension,
+either within the Extension itself or through loadable modules. A capability is
+just a label, but there is an implied contract. When an Extension claims to have
+a particular capability, it promises to expose a particular set of
+functionality through a predefined interface.
+
+Currently used capabilities are described below.
+
+.. note:: Since capabilities are basically random strings, the user can always
+ define their own; and it is then up to the user to define, enforce and
+ document the contract associated with their capability. Below, are the
+ "standard" capabilities used in WA.
+
+
+.. note:: The method signatures in the descriptions below show the calling
+ signature (i.e. they're omitting the initial self parameter).
+
+active_cooling
+--------------
+
+Intended to be used by devices and device modules, this capability implies
+that the device implements a controllable active cooling solution (e.g.
+a programmable fan). The device/module must implement the following methods:
+
+start_active_cooling()
+ Active cooling is started (e.g. the fan is turned on)
+
+stop_active_cooling()
+ Active cooling is stopped (e.g. the fan is turned off)
+
+
+reset_power
+-----------
+
+Intended to be used by devices and device modules, this capability implies
+that the device is capable of performing a hard reset by toggling power. The
+device/module must implement the following method:
+
+hard_reset()
+ The device is restarted. This method cannot rely on the device being
+ responsive and must work even if the software on the device has crashed.
+
+
+flash
+-----
+
+Intended to be used by devices and device modules, this capability implies
+that the device can be flashed with new images. The device/module must
+implement the following method:
+
+flash(image_bundle=None, images=None)
+ ``image_bundle`` is a path to a "bundle" (e.g. a tarball) that contains
+ all the images to be flashed. Which images go where must also be defined
+ within the bundle. ``images`` is a dict mapping image destination (e.g.
+ partition name) to the path to that specific image. Both
+ ``image_bundle`` and ``images`` may be specified at the same time. If
+ there is overlap between the two, ``images`` wins and its contents will
+ be flashed in preference to the ``image_bundle``.
diff --git a/doc/source/agenda.rst b/doc/source/agenda.rst
new file mode 100644
index 00000000..5b5ac690
--- /dev/null
+++ b/doc/source/agenda.rst
@@ -0,0 +1,608 @@
+.. _agenda:
+
+======
+Agenda
+======
+
+An agenda specifies what is to be done during a Workload Automation run,
+including which workloads will be run, with what configuration, which
+instruments and result processors will be enabled, etc. Agenda syntax is
+designed to be both succinct and expressive.
+
+Agendas are specified using YAML_ notation. It is recommended that you
+familiarize yourself with the linked page.
+
+.. _YAML: http://en.wikipedia.org/wiki/YAML
+
+.. note:: Earlier versions of WA have supported CSV-style agendas. These were
+ there to facilitate transition from WA1 scripts. The format was more
+ awkward and supported only a limited subset of the features. Support
+ for it has now been removed.
+
+
+Specifying which workloads to run
+=================================
+
+The central purpose of an agenda is to specify what workloads to run. A
+minimalist agenda contains a single entry at the top level called "workloads"
+that maps onto a list of workload names to run:
+
+.. code-block:: yaml
+
+ workloads:
+ - dhrystone
+ - memcpy
+ - cyclictest
+
+This specifies a WA run consisting of ``dhrystone`` followed by ``memcpy``, followed by
+``cyclictest`` workloads, and using instruments and result processors specified in
+config.py (see :ref:`configuration-specification` section).
+
+.. note:: If you're familiar with YAML, you will recognize the above as a single-key
+ associative array mapping onto a list. YAML has two notations for both
+ associative arrays and lists: block notation (seen above) and also
+ in-line notation. This means that the above agenda can also be
+ written in a single line as ::
+
+ workloads: [dhrystone, memcpy, cyclictest]
+
+ (with the list in-lined), or ::
+
+ {workloads: [dhrystone, memcpy, cyclictest]}
+
+ (with both the list and the associative array in-line). WA doesn't
+ care which of the notations is used as they all get parsed into the
+ same structure by the YAML parser. You can use whatever format you
+ find easier/clearer.
+
+Multiple iterations
+-------------------
+
+There will normally be some variability in workload execution when running on a
+real device. In order to quantify it, multiple iterations of the same workload
+are usually performed. You can specify the number of iterations for each
+workload by adding ``iterations`` field to the workload specifications (or
+"specs"):
+
+.. code-block:: yaml
+
+ workloads:
+ - name: dhrystone
+ iterations: 5
+ - name: memcpy
+ iterations: 5
+ - name: cyclictest
+ iterations: 5
+
+Now that we're specifying both the workload name and the number of iterations in
+each spec, we have to explicitly name each field of the spec.
+
+It is often the case that, as in in the example above, you will want to run all
+workloads for the same number of iterations. Rather than having to specify it
+for each and every spec, you can do with a single entry by adding a ``global``
+section to your agenda:
+
+.. code-block:: yaml
+
+ global:
+ iterations: 5
+ workloads:
+ - dhrystone
+ - memcpy
+ - cyclictest
+
+The global section can contain the same fields as a workload spec. The
+fields in the global section will get added to each spec. If the same field is
+defined both in global section and in a spec, then the value in the spec will
+overwrite the global value. For example, suppose we wanted to run all our workloads
+for five iterations, except cyclictest which we want to run for ten (e.g.
+because we know it to be particularly unstable). This can be specified like
+this:
+
+.. code-block:: yaml
+
+ global:
+ iterations: 5
+ workloads:
+ - dhrystone
+ - memcpy
+ - name: cyclictest
+ iterations: 10
+
+Again, because we are now specifying two fields for cyclictest spec, we have to
+explicitly name them.
+
+Configuring workloads
+---------------------
+
+Some workloads accept configuration parameters that modify their behavior. These
+parameters are specific to a particular workload and can alter the workload in
+any number of ways, e.g. set the duration for which to run, or specify a media
+file to be used, etc. The vast majority of workload parameters will have some
+default value, so it is only necessary to specify the name of the workload in
+order for WA to run it. However, sometimes you want more control over how a
+workload runs.
+
+For example, by default, dhrystone will execute 10 million loops across four
+threads. Suppose you device has six cores available and you want the workload to
+load them all. You also want to increase the total number of loops accordingly
+to 15 million. You can specify this using dhrystone's parameters:
+
+.. code-block:: yaml
+
+ global:
+ iterations: 5
+ workloads:
+ - name: dhrystone
+ params:
+ threads: 6
+ mloops: 15
+ - memcpy
+ - name: cyclictest
+ iterations: 10
+
+.. note:: You can find out what parameters a workload accepts by looking it up
+ in the :ref:`Workloads` section. You can also look it up using WA itself
+ with "show" command::
+
+ wa show dhrystone
+
+ see the :ref:`Invocation` section for details.
+
+In addition to configuring the workload itself, we can also specify
+configuration for the underlying device. This can be done by setting runtime
+parameters in the workload spec. For example, suppose we want to ensure the
+maximum score for our benchmarks, at the expense of power consumption, by
+setting the cpufreq governor to "performance" on cpu0 (assuming all our cores
+are in the same DVFS domain and so setting the governor for cpu0 will affect all
+cores). This can be done like this:
+
+.. code-block:: yaml
+
+ global:
+ iterations: 5
+ workloads:
+ - name: dhrystone
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ workload_params:
+ threads: 6
+ mloops: 15
+ - memcpy
+ - name: cyclictest
+ iterations: 10
+
+
+Here, we're specifying ``sysfile_values`` runtime parameter for the device. The
+value for this parameter is a mapping (an associative array, in YAML) of file
+paths onto values that should be written into those files. ``sysfile_values`` is
+the only runtime parameter that is available for any (Linux) device. Other
+runtime parameters will depend on the specifics of the device used (e.g. its
+CPU cores configuration). I've renamed ``params`` to ``workload_params`` for
+clarity, but that wasn't strictly necessary as ``params`` is interpreted as
+``workload_params`` inside a workload spec.
+
+.. note:: ``params`` field is interpreted differently depending on whether it's in a
+ workload spec or the global section. In a workload spec, it translates to
+ ``workload_params``, in the global section it translates to ``runtime_params``.
+
+Runtime parameters do not automatically reset at the end of workload spec
+execution, so all subsequent iterations will also be affected unless they
+explicitly change the parameter (in the example above, performance governor will
+also be used for ``memcpy`` and ``cyclictest``. There are two ways around this:
+either set ``reboot_policy`` WA setting (see :ref:`configuration-specification` section) such that
+the device gets rebooted between spec executions, thus being returned to its
+initial state, or set the default runtime parameter values in the ``global``
+section of the agenda so that they get set for every spec that doesn't
+explicitly override them.
+
+.. note:: "In addition to ``runtime_params`` there are also ``boot_params`` that
+ work in a similar way, but they get passed to the device when it
+ reboots. At the moment ``TC2`` is the only device that defines a boot
+ parameter, which is explained in ``TC2`` documentation, so boot
+ parameters will not be mentioned further.
+
+IDs and Labels
+--------------
+
+It is possible to list multiple specs with the same workload in an agenda. You
+may wish to this if you want to run a workload with different parameter values
+or under different runtime configurations of the device. The workload name
+therefore does not uniquely identify a spec. To be able to distinguish between
+different specs (e.g. in reported results), each spec has an ID which is unique
+to all specs within an agenda (and therefore with a single WA run). If an ID
+isn't explicitly specified using ``id`` field (note that the field name is in
+lower case), one will be automatically assigned to the spec at the beginning of
+the WA run based on the position of the spec within the list. The first spec
+*without an explicit ID* will be assigned ID ``1``, the second spec *without an
+explicit ID* will be assigned ID ``2``, and so forth.
+
+Numerical IDs aren't particularly easy to deal with, which is why it is
+recommended that, for non-trivial agendas, you manually set the ids to something
+more meaningful (or use labels -- see below). An ID can be pretty much anything
+that will pass through the YAML parser. The only requirement is that it is
+unique to the agenda. However, is usually better to keep them reasonably short
+(they don't need to be *globally* unique), and to stick with alpha-numeric
+characters and underscores/dashes. While WA can handle other characters as well,
+getting too adventurous with your IDs may cause issues further down the line
+when processing WA results (e.g. when uploading them to a database that may have
+its own restrictions).
+
+In addition to IDs, you can also specify labels for your workload specs. These
+are similar to IDs but do not have the uniqueness restriction. If specified,
+labels will be used by some result processes instead of (or in addition to) the
+workload name. For example, the ``csv`` result processor will put the label in the
+"workload" column of the CSV file.
+
+It is up to you how you chose to use IDs and labels. WA itself doesn't expect
+any particular format (apart from uniqueness for IDs). Below is the earlier
+example updated to specify explicit IDs and label dhrystone spec to reflect
+parameters used.
+
+.. code-block:: yaml
+
+ global:
+ iterations: 5
+ workloads:
+ - id: 01_dhry
+ name: dhrystone
+ label: dhrystone_15over6
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ workload_params:
+ threads: 6
+ mloops: 15
+ - id: 02_memc
+ name: memcpy
+ - id: 03_cycl
+ name: cyclictest
+ iterations: 10
+
+
+Result Processors and Instrumentation
+=====================================
+
+Result Processors
+-----------------
+
+Result processors, as the name suggests, handle the processing of results
+generated form running workload specs. By default, WA enables a couple of basic
+result processors (e.g. one generates a csv file with all scores reported by
+workloads), which you can see in ``~/.workload_automation/config.py``. However,
+WA has a number of other, more specialized, result processors (e.g. for
+uploading to databases). You can list available result processors with
+``wa list result_processors`` command. If you want to permanently enable a
+result processor, you can add it to your ``config.py``. You can also enable a
+result processor for a particular run by specifying it in the ``config`` section
+in the agenda. As the name suggests, ``config`` section mirrors the structure of
+``config.py``\ (although using YAML rather than Python), and anything that can
+be specified in the latter, can also be specified in the former.
+
+As with workloads, result processors may have parameters that define their
+behavior. Parameters of result processors are specified a little differently,
+however. Result processor parameter values are listed in the config section,
+namespaced under the name of the result processor.
+
+For example, suppose we want to be able to easily query the results generated by
+the workload specs we've defined so far. We can use ``sqlite`` result processor
+to have WA create an sqlite_ database file with the results. By default, this
+file will be generated in WA's output directory (at the same level as
+results.csv); but suppose we want to store the results in the same file for
+every run of the agenda we do. This can be done by specifying an alternative
+database file with ``database`` parameter of the result processor:
+
+.. code-block:: yaml
+
+ config:
+ result_processors: [sqlite]
+ sqlite:
+ database: ~/my_wa_results.sqlite
+ global:
+ iterations: 5
+ workloads:
+ - id: 01_dhry
+ name: dhrystone
+ label: dhrystone_15over6
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ workload_params:
+ threads: 6
+ mloops: 15
+ - id: 02_memc
+ name: memcpy
+ - id: 03_cycl
+ name: cyclictest
+ iterations: 10
+
+A couple of things to observe here:
+
+- There is no need to repeat the result processors listed in ``config.py``. The
+ processors listed in ``result_processors`` entry in the agenda will be used
+ *in addition to* those defined in the ``config.py``.
+- The database file is specified under "sqlite" entry in the config section.
+ Note, however, that this entry alone is not enough to enable the result
+ processor, it must be listed in ``result_processors``, otherwise the "sqilte"
+ config entry will be ignored.
+- The database file must be specified as an absolute path, however it may use
+ the user home specifier '~' and/or environment variables.
+
+.. _sqlite: http://www.sqlite.org/
+
+
+Instrumentation
+---------------
+
+WA can enable various "instruments" to be used during workload execution.
+Instruments can be quite diverse in their functionality, but the majority of
+instruments available in WA today are there to collect additional data (such as
+trace) from the device during workload execution. You can view the list of
+available instruments by using ``wa list instruments`` command. As with result
+processors, a few are enabled by default in the ``config.py`` and additional
+ones may be added in the same place, or specified in the agenda using
+``instrumentation`` entry.
+
+For example, we can collect core utilisation statistics (for what proportion of
+workload execution N cores were utilized above a specified threshold) using
+``coreutil`` instrument.
+
+.. code-block:: yaml
+
+ config:
+ instrumentation: [coreutil]
+ coreutil:
+ threshold: 80
+ result_processors: [sqlite]
+ sqlite:
+ database: ~/my_wa_results.sqlite
+ global:
+ iterations: 5
+ workloads:
+ - id: 01_dhry
+ name: dhrystone
+ label: dhrystone_15over6
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ workload_params:
+ threads: 6
+ mloops: 15
+ - id: 02_memc
+ name: memcpy
+ - id: 03_cycl
+ name: cyclictest
+ iterations: 10
+
+Instrumentation isn't "free" and it is advisable not to have too many
+instruments enabled at once as that might skew results. For example, you don't
+want to have power measurement enabled at the same time as event tracing, as the
+latter may prevent cores from going into idle states and thus affecting the
+reading collected by the former.
+
+Unlike result processors, instrumentation may be enabled (and disabled -- see below)
+on per-spec basis. For example, suppose we want to collect /proc/meminfo from the
+device when we run ``memcpy`` workload, but not for the other two. We can do that using
+``sysfs_extractor`` instrument, and we will only enable it for ``memcpy``:
+
+.. code-block:: yaml
+
+ config:
+ instrumentation: [coreutil]
+ coreutil:
+ threshold: 80
+ sysfs_extractor:
+ paths: [/proc/meminfo]
+ result_processors: [sqlite]
+ sqlite:
+ database: ~/my_wa_results.sqlite
+ global:
+ iterations: 5
+ workloads:
+ - id: 01_dhry
+ name: dhrystone
+ label: dhrystone_15over6
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ workload_params:
+ threads: 6
+ mloops: 15
+ - id: 02_memc
+ name: memcpy
+ instrumentation: [sysfs_extractor]
+ - id: 03_cycl
+ name: cyclictest
+ iterations: 10
+
+As with ``config`` sections, ``instrumentation`` entry in the spec needs only to
+list additional instruments and does not need to repeat instruments specified
+elsewhere.
+
+.. note:: At present, it is only possible to enable/disable instrumentation on
+ per-spec base. It is *not* possible to provide configuration on
+ per-spec basis in the current version of WA (e.g. in our example, it
+ is not possible to specify different ``sysfs_extractor`` paths for
+ different workloads). This restriction may be lifted in future
+ versions of WA.
+
+Disabling result processors and instrumentation
+-----------------------------------------------
+
+As seen above, extensions specified with ``instrumentation`` and
+``result_processor`` clauses get added to those already specified previously.
+Just because an instrument specified in ``config.py`` is not listed in the
+``config`` section of the agenda, does not mean it will be disabled. If you do
+want to disable an instrument, you can always remove/comment it out from
+``config.py``. However that will be introducing a permanent configuration change
+to your environment (one that can be easily reverted, but may be just as
+easily forgotten). If you want to temporarily disable a result processor or an
+instrument for a particular run, you can do that in your agenda by prepending a
+tilde (``~``) to its name.
+
+For example, let's say we want to disable ``cpufreq`` instrument enabled in our
+``config.py`` (suppose we're going to send results via email and so want to
+reduce to total size of the output directory):
+
+.. code-block:: yaml
+
+ config:
+ instrumentation: [coreutil, ~cpufreq]
+ coreutil:
+ threshold: 80
+ sysfs_extractor:
+ paths: [/proc/meminfo]
+ result_processors: [sqlite]
+ sqlite:
+ database: ~/my_wa_results.sqlite
+ global:
+ iterations: 5
+ workloads:
+ - id: 01_dhry
+ name: dhrystone
+ label: dhrystone_15over6
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ workload_params:
+ threads: 6
+ mloops: 15
+ - id: 02_memc
+ name: memcpy
+ instrumentation: [sysfs_extractor]
+ - id: 03_cycl
+ name: cyclictest
+ iterations: 10
+
+
+Sections
+========
+
+It is a common requirement to be able to run the same set of workloads under
+different device configurations. E.g. you may want to investigate impact of
+changing a particular setting to different values on the benchmark scores, or to
+quantify the impact of enabling a particular feature in the kernel. WA allows
+this by defining "sections" of configuration with an agenda.
+
+For example, suppose what we really want, is to measure the impact of using
+interactive cpufreq governor vs the performance governor on the three
+benchmarks. We could create another three workload spec entries similar to the
+ones we already have and change the sysfile value being set to "interactive".
+However, this introduces a lot of duplication; and what if we want to change
+spec configuration? We would have to change it in multiple places, running the
+risk of forgetting one.
+
+A better way is to keep the three workload specs and define a section for each
+governor:
+
+.. code-block:: yaml
+
+ config:
+ instrumentation: [coreutil, ~cpufreq]
+ coreutil:
+ threshold: 80
+ sysfs_extractor:
+ paths: [/proc/meminfo]
+ result_processors: [sqlite]
+ sqlite:
+ database: ~/my_wa_results.sqlite
+ global:
+ iterations: 5
+ sections:
+ - id: perf
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ - id: inter
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
+ workloads:
+ - id: 01_dhry
+ name: dhrystone
+ label: dhrystone_15over6
+ workload_params:
+ threads: 6
+ mloops: 15
+ - id: 02_memc
+ name: memcpy
+ instrumentation: [sysfs_extractor]
+ - id: 03_cycl
+ name: cyclictest
+ iterations: 10
+
+A section, just like an workload spec, needs to have a unique ID. Apart from
+that, a "section" is similar to the ``global`` section we've already seen --
+everything that goes into a section will be applied to each workload spec.
+Workload specs defined under top-level ``workloads`` entry will be executed for
+each of the sections listed under ``sections``.
+
+.. note:: It is also possible to have a ``workloads`` entry within a section,
+ in which case, those workloads will only be executed for that specific
+ section.
+
+In order to maintain the uniqueness requirement of workload spec IDs, they will
+be namespaced under each section by prepending the section ID to the spec ID
+with an under score. So in the agenda above, we no longer have a workload spec
+with ID ``01_dhry``, instead there are two specs with IDs ``perf_01_dhry`` and
+``inter_01_dhry``.
+
+Note that the ``global`` section still applies to every spec in the agenda. So
+the precedence order is -- spec settings override section settings, which in
+turn override global settings.
+
+
+Other Configuration
+===================
+
+.. _configuration_in_agenda:
+
+As mentioned previously, ``config`` section in an agenda can contain anything
+that can be defined in ``config.py`` (with Python syntax translated to the
+equivalent YAML). Certain configuration (e.g. ``run_name``) makes more sense
+to define in an agenda than a config file. Refer to the
+:ref:`configuration-specification` section for details.
+
+.. code-block:: yaml
+
+ config:
+ project: governor_comparison
+ run_name: performance_vs_interactive
+
+ device: generic_android
+ reboot_policy: never
+
+ instrumentation: [coreutil, ~cpufreq]
+ coreutil:
+ threshold: 80
+ sysfs_extractor:
+ paths: [/proc/meminfo]
+ result_processors: [sqlite]
+ sqlite:
+ database: ~/my_wa_results.sqlite
+ global:
+ iterations: 5
+ sections:
+ - id: perf
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ - id: inter
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
+ workloads:
+ - id: 01_dhry
+ name: dhrystone
+ label: dhrystone_15over6
+ workload_params:
+ threads: 6
+ mloops: 15
+ - id: 02_memc
+ name: memcpy
+ instrumentation: [sysfs_extractor]
+ - id: 03_cycl
+ name: cyclictest
+ iterations: 10
+
diff --git a/doc/source/changes.rst b/doc/source/changes.rst
new file mode 100644
index 00000000..9d1dd58d
--- /dev/null
+++ b/doc/source/changes.rst
@@ -0,0 +1,7 @@
+What's New in Workload Automation
+=================================
+
+Version 2.3.0
+-------------
+
+- First publicly-released version.
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 00000000..56c30053
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,270 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Workload Automation 2 documentation build configuration file, created by
+# sphinx-quickstart on Mon Jul 15 09:00:46 2013.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+import warnings
+
+warnings.filterwarnings('ignore', "Module louie was already imported")
+
+this_dir = os.path.dirname(__file__)
+sys.path.insert(0, os.path.join(this_dir, '../..'))
+import wlauto
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Workload Automation'
+copyright = u'2013, ARM Ltd'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = wlauto.__version__
+# The full version, including alpha/beta/rc tags.
+release = wlauto.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['**/*-example']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'WorkloadAutomationdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'WorkloadAutomation.tex', u'Workload Automation Documentation',
+ u'WA Mailing List \\textless{}workload-automation@arm.com\\textgreater{},Sergei Trofimov \\textless{}sergei.trofimov@arm.com\\textgreater{}, Vasilis Flouris \\textless{}vasilis.flouris@arm.com\\textgreater{}, Mohammed Binsabbar \\textless{}mohammed.binsabbar@arm.com\\textgreater{}', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'workloadautomation', u'Workload Automation Documentation',
+ [u'WA Mailing List <workload-automation@arm.com>, Sergei Trofimov <sergei.trofimov@arm.com>, Vasilis Flouris <vasilis.flouris@arm.com>'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'WorkloadAutomation', u'Workload Automation Documentation',
+ u'WA Mailing List <workload-automation@arm.com>, Sergei Trofimov <sergei.trofimov@arm.com>, Vasilis Flouris <vasilis.flouris@arm.com>', 'WorkloadAutomation', 'A framwork for automationg workload execution on mobile devices.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+
+def setup(app):
+ app.add_object_type('confval', 'confval',
+ objname='configuration value',
+ indextemplate='pair: %s; configuration value')
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
new file mode 100644
index 00000000..8551c672
--- /dev/null
+++ b/doc/source/configuration.rst
@@ -0,0 +1,188 @@
+.. _configuration-specification:
+
+=============
+Configuration
+=============
+
+In addition to specifying run execution parameters through an agenda, the
+behavior of WA can be modified through configuration file(s). The default
+configuration file is ``~/.workload_automation/config.py`` (the location can be
+changed by setting ``WA_USER_DIRECTORY`` environment variable, see :ref:`envvars`
+section below). This file will be
+created when you first run WA if it does not already exist. This file must
+always exist and will always be loaded. You can add to or override the contents
+of that file on invocation of Workload Automation by specifying an additional
+configuration file using ``--config`` option.
+
+The config file is just a Python source file, so it can contain any valid Python
+code (though execution of arbitrary code through the config file is
+discouraged). Variables with specific names will be picked up by the framework
+and used to modify the behavior of Workload automation.
+
+.. note:: As of version 2.1.3 is also possible to specify the following
+ configuration in the agenda. See :ref:`configuration in an agenda <configuration_in_agenda>`\ .
+
+
+.. _available_settings:
+
+Available Settings
+==================
+
+.. note:: Extensions such as workloads, instrumentation or result processors
+ may also pick up certain settings from this file, so the list below is
+ not exhaustive. Please refer to the documentation for the specific
+ extensions to see what settings they accept.
+
+.. confval:: device
+
+ This setting defines what specific Device subclass will be used to interact
+ the connected device. Obviously, this must match your setup.
+
+.. confval:: device_config
+
+ This must be a Python dict containing setting-value mapping for the
+ configured :rst:dir:`device`. What settings and values are valid is specific
+ to each device. Please refer to the documentation for your device.
+
+.. confval:: reboot_policy
+
+ This defines when during execution of a run the Device will be rebooted. The
+ possible values are:
+
+ ``"never"``
+ The device will never be rebooted.
+ ``"initial"``
+ The device will be rebooted when the execution first starts, just before
+ executing the first workload spec.
+ ``"each_spec"``
+ The device will be rebooted before running a new workload spec.
+ Note: this acts the same as each_iteration when execution order is set to by_iteration
+ ``"each_iteration"``
+ The device will be rebooted before each new iteration.
+
+ .. seealso::
+
+ :doc:`execution_model`
+
+.. confval:: execution_order
+
+ Defines the order in which the agenda spec will be executed. At the moment,
+ the following execution orders are supported:
+
+ ``"by_iteration"``
+ The first iteration of each workload spec is executed one after the other,
+ so all workloads are executed before proceeding on to the second iteration.
+ E.g. A1 B1 C1 A2 C2 A3. This is the default if no order is explicitly specified.
+
+ In case of multiple sections, this will spread them out, such that specs
+ from the same section are further part. E.g. given sections X and Y, global
+ specs A and B, and two iterations, this will run ::
+
+ X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
+
+ ``"by_section"``
+ Same as ``"by_iteration"``, however this will group specs from the same
+ section together, so given sections X and Y, global specs A and B, and two iterations,
+ this will run ::
+
+ X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
+
+ ``"by_spec"``
+ All iterations of the first spec are executed before moving on to the next
+ spec. E.g. A1 A2 A3 B1 C1 C2 This may also be specified as ``"classic"``,
+ as this was the way workloads were executed in earlier versions of WA.
+
+ ``"random"``
+ Execution order is entirely random.
+
+ Added in version 2.1.5.
+
+.. confval:: instrumentation
+
+ This should be a list of instruments to be enabled during run execution.
+ Values must be names of available instruments. Instruments are used to
+ collect additional data, such as energy measurements or execution time,
+ during runs.
+
+ .. seealso::
+
+ :doc:`api/wlauto.instrumentation`
+
+.. confval:: result_processors
+
+ This should be a list of result processors to be enabled during run execution.
+ Values must be names of available result processors. Result processor define
+ how data is output from WA.
+
+ .. seealso::
+
+ :doc:`api/wlauto.result_processors`
+
+.. confval:: logging
+
+ A dict that contains logging setting. At the moment only three settings are
+ supported:
+
+ ``"file format"``
+ Controls how logging output appears in the run.log file in the output
+ directory.
+ ``"verbose format"``
+ Controls how logging output appear on the console when ``--verbose`` flag
+ was used.
+ ``"regular format"``
+ Controls how logging output appear on the console when ``--verbose`` flag
+ was not used.
+
+ All three values should be Python `old-style format strings`_ specifying which
+ `log record attributes`_ should be displayed.
+
+There are also a couple of settings are used to provide additional metadata
+for a run. These may get picked up by instruments or result processors to
+attach context to results.
+
+.. confval:: project
+
+ A string naming the project for which data is being collected. This may be
+ useful, e.g. when uploading data to a shared database that is populated from
+ multiple projects.
+
+.. confval:: project_stage
+
+ A dict or a string that allows adding additional identifier. This is may be
+ useful for long-running projects.
+
+.. confval:: run_name
+
+ A string that labels the WA run that is bing performed. This would typically
+ be set in the ``config`` section of an agenda (see
+ :ref:`configuration in an agenda <configuration_in_agenda>`) rather than in the config file.
+
+.. _old-style format strings: http://docs.python.org/2/library/stdtypes.html#string-formatting-operations
+.. _log record attributes: http://docs.python.org/2/library/logging.html#logrecord-attributes
+
+
+.. _envvars:
+
+Environment Variables
+=====================
+
+In addition to standard configuration described above, WA behaviour can be
+altered through environment variables. These can determine where WA looks for
+various assets when it starts.
+
+.. confval:: WA_USER_DIRECTORY
+
+ This is the location WA will look for config.py, inustrumentation , and it
+ will also be used for local caches, etc. If this variable is not set, the
+ default location is ``~/.workload_automation`` (this is created when WA
+ is installed).
+
+ .. note:: This location **must** be writable by the user who runs WA.
+
+
+.. confval:: WA_EXTENSION_PATHS
+
+ By default, WA will look for extensions in its own package and in
+ subdirectories under ``WA_USER_DIRECTORY``. This environment variable can
+ be used specify a colon-separated list of additional locations WA should
+ use to look for extensions.
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
new file mode 100644
index 00000000..d0696ce7
--- /dev/null
+++ b/doc/source/contributing.rst
@@ -0,0 +1,45 @@
+
+Contributing Code
+=================
+
+We welcome code contributions via GitHub pull requests to the official WA
+repository. To help with maintainability of the code line we ask that the code
+uses a coding style consistent with the rest of WA code, which is basically
+`PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ with line length and block
+comment rules relaxed (the wrapper for PEP8 checker inside ``dev_scripts`` will
+run it with appropriate configuration).
+
+We ask that the following checks are performed on the modified code prior to
+submitting a pull request:
+
+.. note:: You will need pylint and pep8 static checkers installed::
+
+ pip install pep8
+ pip install pylint
+
+ It is recommened that you install via pip rather than through your
+ distribution's package mananger because the latter is likely to
+ contain out-of-date version of these tools.
+
+- ``./dev_scripts/pylint`` should be run without arguments and should produce no
+ output (any output should be addressed by making appropriate changes in the
+ code or adding a pylint ignore directive, if there is a good reason for
+ keeping the code as is).
+- ``./dev_scripts/pep8`` should be run without arguments and should produce no
+ output (any output should be addressed by making appropriate changes in the
+ code).
+- If the modifications touch core framework (anything under ``wlauto/core``), unit
+ tests should be run using ``nosetests``, and they should all pass.
+
+ - If significant additions have been made to the framework, unit
+ tests should be added to cover the new functionality.
+
+- If modifications have been made to documentation (this includes description
+ attributes for Parameters and Extensions), documentation should be built to
+ make sure no errors or warning during build process, and a visual inspection
+ of new/updated sections in resulting HTML should be performed to ensure
+ everything renders as expected.
+
+Once you have your contribution is ready, please follow instructions in `GitHub
+documentation <https://help.github.com/articles/creating-a-pull-request/>`_ to
+create a pull request.
diff --git a/doc/source/conventions.rst b/doc/source/conventions.rst
new file mode 100644
index 00000000..c811f522
--- /dev/null
+++ b/doc/source/conventions.rst
@@ -0,0 +1,74 @@
+===========
+Conventions
+===========
+
+Interface Definitions
+=====================
+
+Throughout this documentation a number of stubbed-out class definitions will be
+presented showing an interface defined by a base class that needs to be
+implemented by the deriving classes. The following conventions will be used when
+presenting such an interface:
+
+ - Methods shown raising :class:`NotImplementedError` are abstract and *must*
+ be overridden by subclasses.
+ - Methods with ``pass`` in their body *may* be (but do not need to be) overridden
+ by subclasses. If not overridden, these methods will default to the base
+ class implementation, which may or may not be a no-op (the ``pass`` in the
+ interface specification does not necessarily mean that the method does not have an
+ actual implementation in the base class).
+
+ .. note:: If you *do* override these methods you must remember to call the
+ base class' version inside your implementation as well.
+
+ - Attributes who's value is shown as ``None`` *must* be redefined by the
+ subclasses with an appropriate value.
+ - Attributes who's value is shown as something other than ``None`` (including
+ empty strings/lists/dicts) *may* be (but do not need to be) overridden by
+ subclasses. If not overridden, they will default to the value shown.
+
+Keep in mind that the above convention applies only when showing interface
+definitions and may not apply elsewhere in the documentation. Also, in the
+interest of clarity, only the relevant parts of the base class definitions will
+be shown some members (such as internal methods) may be omitted.
+
+
+Code Snippets
+=============
+
+Code snippets provided are intended to be valid Python code, and to be complete.
+However, for the sake of clarity, in some cases only the relevant parts will be
+shown with some details omitted (details that may necessary to validity of the code
+but not to understanding of the concept being illustrated). In such cases, a
+commented ellipsis will be used to indicate that parts of the code have been
+dropped. E.g. ::
+
+ # ...
+
+ def update_result(self, context):
+ # ...
+ context.result.add_metric('energy', 23.6, 'Joules', lower_is_better=True)
+
+ # ...
+
+
+Core Class Names
+================
+
+When core classes are referenced throughout the documentation, usually their
+fully-qualified names are given e.g. :class:`wlauto.core.workload.Workload`.
+This is done so that Sphinx_ can resolve them and provide a link. While
+implementing extensions, however, you should *not* be importing anything
+directly form under :mod:`wlauto.core`. Instead, classes you are meant to
+instantiate or subclass have been aliased in the root :mod:`wlauto` package,
+and should be imported from there, e.g. ::
+
+ from wlauto import Workload
+
+All examples given in the documentation follow this convention. Please note that
+this only applies to the :mod:`wlauto.core` subpackage; all other classes
+should be imported for their corresponding subpackages.
+
+.. _Sphinx: http://sphinx-doc.org/
+
+
diff --git a/doc/source/daq_device_setup.rst b/doc/source/daq_device_setup.rst
new file mode 100644
index 00000000..8853fc2f
--- /dev/null
+++ b/doc/source/daq_device_setup.rst
@@ -0,0 +1,246 @@
+.. _daq_setup:
+
+DAQ Server Guide
+================
+
+NI-DAQ, or just "DAQ", is the Data Acquisition device developed by National
+Instruments:
+
+ http://www.ni.com/data-acquisition/
+
+WA uses the DAQ to collect power measurements during workload execution. A
+client/server solution for this is distributed as part of WA, though it is
+distinct from WA and may be used separately (by invoking the client APIs from a
+Python script, or used directly from the command line).
+
+This solution is dependent on the NI-DAQmx driver for the DAQ device. At the
+time of writing, only Windows versions of the driver are supported (there is an
+old Linux version that works on some versions of RHEL and Centos, but it is
+unsupported and won't work with recent Linux kernels). Because of this, the
+server part of the solution will need to be run on a Windows machine (though it
+should also work on Linux, if the driver becomes available).
+
+
+.. _daq_wiring:
+
+DAQ Device Wiring
+-----------------
+
+The server expects the device to be wired in a specific way in order to be able
+to collect power measurements. Two consecutive Analogue Input (AI) channels on
+the DAQ are used to form a logical "port" (starting with AI/0 and AI/1 for port
+0). Of these, the lower/even channel (e.g. AI/0) is used to measure the voltage
+on the rail we're interested in; the higher/odd channel (e.g. AI/1) is used to
+measure the voltage drop across a known very small resistor on the same rail,
+which is then used to calculate current. The logical wiring diagram looks like
+this::
+
+ Port N
+ ======
+ |
+ | AI/(N*2)+ <--- Vr -------------------------|
+ | |
+ | AI/(N*2)- <--- GND -------------------// |
+ | |
+ | AI/(N*2+1)+ <--- V ------------|-------V |
+ | r | |
+ | AI/(N*2+1)- <--- Vr --/\/\/\----| |
+ | | |
+ | | |
+ | |------------------------------|
+ ======
+
+ Where:
+ V: Voltage going into the resistor
+ Vr: Voltage between resistor and the SOC
+ GND: Ground
+ r: The resistor across the rail with a known
+ small value.
+
+
+The physical wiring will depend on the specific DAQ device, as channel layout
+varies between models.
+
+.. note:: Current solution supports variable number of ports, however it
+ assumes that the ports are sequential and start at zero. E.g. if you
+ want to measure power on three rails, you will need to wire ports 0-2
+ (AI/0 to AI/5 channels on the DAQ) to do it. It is not currently
+ possible to use any other configuration (e.g. ports 1, 2 and 5).
+
+
+Setting up NI-DAQmx driver on a Windows Machine
+-----------------------------------------------
+
+ - The NI-DAQmx driver is pretty big in size, 1.5 GB. The driver name is
+ 'NI-DAQmx' and its version '9.7.0f0' which you can obtain it from National
+ Instruments website by downloading NI Measurement & Automation Explorer (Ni
+ MAX) from: http://joule.ni.com/nidu/cds/view/p/id/3811/lang/en
+
+ .. note:: During the installation process, you might be prompted to install
+ .NET framework 4.
+
+ - The installation process is quite long, 7-15 minutes.
+ - Once installed, open NI MAX, which should be in your desktop, if not type its
+ name in the start->search.
+ - Connect the NI-DAQ device to your machine. You should see it appear under
+ 'Devices and Interfaces'. If not, press 'F5' to refresh the list.
+ - Complete the device wiring as described in the :ref:`daq_wiring` section.
+ - Quit NI MAX.
+
+
+Setting up DAQ server
+---------------------
+
+The DAQ power measurement solution is implemented in daqpower Python library,
+the package for which can be found in WA's install location under
+``wlauto/external/daq_server/daqpower-1.0.0.tar.gz`` (the version number in your
+installation may be different).
+
+ - Install NI-DAQmx driver, as described in the previous section.
+ - Install Python 2.7.
+ - Download and install ``pip``, ``numpy`` and ``twisted`` Python packages.
+ These packages have C extensions, an so you will need a native compiler set
+ up if you want to install them from PyPI. As an easier alternative, you can
+ find pre-built Windows installers for these packages here_ (the versions are
+ likely to be older than what's on PyPI though).
+ - Install the daqpower package using pip::
+
+ pip install C:\Python27\Lib\site-packages\wlauto\external\daq_server\daqpower-1.0.0.tar.gz
+
+ This should automatically download and install ``PyDAQmx`` package as well
+ (the Python bindings for the NI-DAQmx driver).
+
+.. _here: http://www.lfd.uci.edu/~gohlke/pythonlibs/
+
+
+Running DAQ server
+------------------
+
+Once you have installed the ``daqpower`` package and the required dependencies as
+described above, you can start the server by executing ``run-daq-server`` from the
+command line. The server will start listening on the default port, 45677.
+
+.. note:: There is a chance that pip will not add ``run-daq-server`` into your
+ path. In that case, you can run daq server as such:
+ ``python C:\path to python\Scripts\run-daq-server``
+
+You can optionally specify flags to control the behaviour or the server::
+
+ usage: run-daq-server [-h] [-d DIR] [-p PORT] [--debug] [--verbose]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -d DIR, --directory DIR
+ Working directory
+ -p PORT, --port PORT port the server will listen on.
+ --debug Run in debug mode (no DAQ connected).
+ --verbose Produce verobose output.
+
+.. note:: The server will use a working directory (by default, the directory
+ the run-daq-server command was executed in, or the location specified
+ with -d flag) to store power traces before they are collected by the
+ client. This directory must be read/write-able by the user running
+ the server.
+
+
+Collecting Power with WA
+------------------------
+
+.. note:: You do *not* need to install the ``daqpower`` package on the machine
+ running WA, as it is already included in the WA install structure.
+ However, you do need to make sure that ``twisted`` package is
+ installed.
+
+You can enable ``daq`` instrument your agenda/config.py in order to get WA to
+collect power measurements. At minimum, you will also need to specify the
+resistor values for each port in your configuration, e.g.::
+
+ resistor_values = [0.005, 0.005] # in Ohms
+
+This also specifies the number of logical ports (measurement sites) you want to
+use, and, implicitly, the port numbers (ports 0 to N-1 will be used).
+
+.. note:: "ports" here refers to the logical ports wired on the DAQ (see :ref:`daq_wiring`,
+ not to be confused with the TCP port the server is listening on.
+
+Unless you're running the DAQ server and WA on the same machine (unlikely
+considering that WA is officially supported only on Linux and recent NI-DAQmx
+drivers are only available on Windows), you will also need to specify the IP
+address of the server::
+
+ daq_server = 127.0.0.1
+
+There are a number of other settings that can optionally be specified in the
+configuration (e.g. the labels to be used for DAQ ports). Please refer to the
+:class:`wlauto.instrumentation.daq.Daq` documentation for details.
+
+
+Collecting Power from the Command Line
+--------------------------------------
+
+``daqpower`` package also comes with a client that may be used from the command
+line. Unlike when collecting power with WA, you *will* need to install the
+``daqpower`` package. Once installed, you will be able to interract with a
+running DAQ server by invoking ``send-daq-command``. The invocation syntax is ::
+
+ send-daq-command --host HOST [--port PORT] COMMAND [OPTIONS]
+
+Options are command-specific. COMMAND may be one of the following (and they
+should generally be inoked in that order):
+
+ :configure: Set up a new session, specifying the configuration values to
+ be used. If there is already a configured session, it will
+ be terminated. OPTIONS for this this command are the DAQ
+ configuration parameters listed in the DAQ instrument
+ documentation with all ``_`` replaced by ``-`` and prefixed
+ with ``--``, e.g. ``--resistor-values``.
+ :start: Start collecting power measurments.
+ :stop: Stop collecting power measurments.
+ :get_data: Pull files containg power measurements from the server.
+ There is one option for this command:
+ ``--output-directory`` which specifies where the files will
+ be pulled to; if this is not specified, the will be in the
+ current directory.
+ :close: Close the currently configured server session. This will get rid
+ of the data files and configuration on the server, so it would
+ no longer be possible to use "start" or "get_data" commands
+ before a new session is configured.
+
+A typical command line session would go like this:
+
+.. code-block:: bash
+
+ send-daq-command --host 127.0.0.1 configure --resistor-values 0.005 0.005
+ # set up and kick off the use case you want to measure
+ send-daq-command --host 127.0.0.1 start
+ # wait for the use case to complete
+ send-daq-command --host 127.0.0.1 stop
+ send-daq-command --host 127.0.0.1 get_data
+ # files called PORT_0.csv and PORT_1.csv will appear in the current directory
+ # containing measurements collected during use case execution
+ send-daq-command --host 127.0.0.1 close
+ # the session is terminated and the csv files on the server have been
+ # deleted. A new session may now be configured.
+
+In addtion to these "standard workflow" commands, the following commands are
+also available:
+
+ :list_devices: Returns a list of DAQ devices detected by the NI-DAQmx
+ driver. In case mutiple devices are connected to the
+ server host, you can specify the device you want to use
+ with ``--device-id`` option when configuring a session.
+ :list_ports: Returns a list of ports tha have been configured for the
+ current session, e.g. ``['PORT_0', 'PORT_1']``.
+ :list_port_files: Returns a list of data files that have been geneted
+ (unless something went wrong, there should be one for
+ each port).
+
+
+Collecting Power from another Python Script
+-------------------------------------------
+
+You can invoke the above commands from a Python script using
+:py:func:`daqpower.client.execute_command` function, passing in
+:class:`daqpower.config.ServerConfiguration` and, in case of the configure command,
+:class:`daqpower.config.DeviceConfigruation`. Please see the implementation of
+the ``daq`` WA instrument for examples of how these APIs can be used.
diff --git a/doc/source/device_setup.rst b/doc/source/device_setup.rst
new file mode 100644
index 00000000..3f6e16ad
--- /dev/null
+++ b/doc/source/device_setup.rst
@@ -0,0 +1,407 @@
+Setting Up A Device
+===================
+
+WA should work with most Android devices out-of-the box, as long as the device
+is discoverable by ``adb`` (i.e. gets listed when you run ``adb devices``). For
+USB-attached devices, that should be the case; for network devices, ``adb connect``
+would need to be invoked with the IP address of the device. If there is only one
+device connected to the host running WA, then no further configuration should be
+necessary (though you may want to :ref:`tweak some Android settings <configuring-android>`\ ).
+
+If you have multiple devices connected, have a non-standard Android build (e.g.
+on a development board), or want to use of the more advanced WA functionality,
+further configuration will be required.
+
+Android
++++++++
+
+General Device Setup
+--------------------
+
+You can specify the device interface by setting ``device`` setting in
+``~/.workload_automation/config.py``. Available interfaces can be viewed by
+running ``wa list devices`` command. If you don't see your specific device
+listed (which is likely unless you're using one of the ARM-supplied platforms), then
+you should use ``generic_android`` interface (this is set in the config by
+default).
+
+.. code-block:: python
+
+ device = 'generic_android'
+
+The device interface may be configured through ``device_config`` setting, who's
+value is a ``dict`` mapping setting names to their values. You can find the full
+list of available parameter by looking up your device interface in the
+:ref:`devices` section of the documentation. Some of the most common parameters
+you might want to change are outlined below.
+
+.. confval:: adb_name
+
+ If you have multiple Android devices connected to the host machine, you will
+ need to set this to indicate to WA which device you want it to use.
+
+.. confval:: working_directory
+
+ WA needs a "working" directory on the device which it will use for collecting
+ traces, caching assets it pushes to the device, etc. By default, it will
+ create one under ``/sdcard`` which should be mapped and writable on standard
+ Android builds. If this is not the case for your device, you will need to
+ specify an alternative working directory (e.g. under ``/data/local``).
+
+.. confval:: scheduler
+
+ This specifies the scheduling mechanism (from the perspective of core layout)
+ utilized by the device). For recent big.LITTLE devices, this should generally
+ be "hmp" (ARM Hetrogeneous Mutli-Processing); some legacy development
+ platforms might have Linaro IKS kernels, in which case it should be "iks".
+ For homogeneous (single-cluster) devices, it should be "smp". Please see
+ ``scheduler`` parameter in the ``generic_android`` device documentation for
+ more details.
+
+.. confval:: core_names
+
+ This and ``core_clusters`` need to be set if you want to utilize some more
+ advanced WA functionality (like setting of core-related runtime parameters
+ such as governors, frequencies, etc). ``core_names`` should be a list of
+ core names matching the order in which they are exposed in sysfs. For
+ example, ARM TC2 SoC is a 2x3 big.LITTLE system; it's core_names would be
+ ``['a7', 'a7', 'a7', 'a15', 'a15']``, indicating that cpu0-cpu2 in cpufreq
+ sysfs structure are A7's and cpu3 and cpu4 are A15's.
+
+.. confval:: core_clusters
+
+ If ``core_names`` is defined, this must also be defined. This is a list of
+ integer values indicating the cluster the corresponding core in
+ ``cores_names`` belongs to. For example, for TC2, this would be
+ ``[0, 0, 0, 1, 1]``, indicating that A7's are on cluster 0 and A15's are on
+ cluster 1.
+
+A typical ``device_config`` inside ``config.py`` may look something like
+
+
+.. code-block:: python
+
+ device_config = dict(
+ 'adb_name'='0123456789ABCDEF',
+ 'working_direcory'='/sdcard/wa-working',
+ 'core_names'=['a7', 'a7', 'a7', 'a15', 'a15'],
+ 'core_clusters'=[0, 0, 0, 1, 1],
+ # ...
+ )
+
+.. _configuring-android:
+
+Configuring Android
+-------------------
+
+There are a few additional tasks you may need to perform once you have a device
+booted into Android (especially if this is an initial boot of a fresh OS
+deployment):
+
+ - You have gone through FTU (first time usage) on the home screen and
+ in the apps menu.
+ - You have disabled the screen lock.
+ - You have set sleep timeout to the highest possible value (30 mins on
+ most devices).
+ - You have disabled brightness auto-adjust and have set the brightness
+ to a fixed level.
+ - You have set the locale language to "English" (this is important for
+ some workloads in which UI automation looks for specific text in UI
+ elements).
+
+TC2 Setup
+---------
+
+This section outlines how to setup ARM TC2 development platform to work with WA.
+
+Pre-requisites
+~~~~~~~~~~~~~~
+
+You can obtain the full set of images for TC2 from Linaro:
+
+https://releases.linaro.org/latest/android/vexpress-lsk.
+
+For the easiest setup, follow the instructions on the "Firmware" and "Binary
+Image Installation" tabs on that page.
+
+.. note:: The default ``reboot_policy`` in ``config.py`` is to not reboot. With
+ this WA will assume that the device is already booted into Android
+ prior to WA being invoked. If you want to WA to do the initial boot of
+ the TC2, you will have to change reboot policy to at least
+ ``initial``.
+
+
+Setting Up Images
+~~~~~~~~~~~~~~~~~
+
+.. note:: Make sure that both DIP switches near the black reset button on TC2
+ are up (this is counter to the Linaro guide that instructs to lower
+ one of the switches).
+
+.. note:: The TC2 must have an Ethernet connection.
+
+
+If you have followed the setup instructions on the Linaro page, you should have
+a USB stick or an SD card with the file system, and internal microSD on the
+board (VEMSD) with the firmware images. The default Linaro configuration is to
+boot from the image on the boot partition in the file system you have just
+created. This is not supported by WA, which expects the image to be in NOR flash
+on the board. This requires you to copy the images from the boot partition onto
+the internal microSD card.
+
+Assuming the boot partition of the Linaro file system is mounted on
+``/media/boot`` and the internal microSD is mounted on ``/media/VEMSD``, copy
+the following images::
+
+ cp /media/boot/zImage /media/VEMSD/SOFTWARE/kern_mp.bin
+ cp /media/boot/initrd /media/VEMSD/SOFTWARE/init_mp.bin
+ cp /media/boot/v2p-ca15-tc2.dtb /media/VEMSD/SOFTWARE/mp_a7bc.dtb
+
+Optionally
+##########
+
+The default device tree configuration the TC2 is to boot on the A7 cluster. It
+is also possible to configure the device tree to boot on the A15 cluster, or to
+boot with one of the clusters disabled (turning TC2 into an A7-only or A15-only
+device). Please refer to the "Firmware" tab on the Linaro paged linked above for
+instructions on how to compile the appropriate device tree configurations.
+
+WA allows selecting between these configurations using ``os_mode`` boot
+parameter of the TC2 device interface. In order for this to work correctly,
+device tree files for the A15-bootcluster, A7-only and A15-only configurations
+should be copied into ``/media/VEMSD/SOFTWARE/`` as ``mp_a15bc.dtb``,
+``mp_a7.dtb`` and ``mp_a15.dtb`` respectively.
+
+This is entirely optional. If you're not planning on switching boot cluster
+configuration, those files do not need to be present in VEMSD.
+
+config.txt
+##########
+
+Also, make sure that ``USB_REMOTE`` setting in ``/media/VEMSD/config.txt`` is set
+to ``TRUE`` (this will allow rebooting the device by writing reboot.txt to
+VEMSD). ::
+
+ USB_REMOTE: TRUE ;Selects remote command via USB
+
+
+TC2-specific device_config settings
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are a few settings that may need to be set in ``device_config`` inside
+your ``config.py`` which are specific to TC2:
+
+.. note:: TC2 *does not* accept most "standard" android ``device_config``
+ settings.
+
+adb_name
+ If you're running WA with reboots disabled (which is the default reboot
+ policy), you will need to manually run ``adb connect`` with TC2's IP
+ address and set this.
+
+root_mount
+ WA expects TC2's internal microSD to be mounted on the host under
+ ``/media/VEMSD``. If this location is different, it needs to be specified
+ using this setting.
+
+boot_firmware
+ WA defaults to try booting using UEFI, which will require some additional
+ firmware from ARM that may not be provided with Linaro releases (see the
+ UEFI and PSCI section below). If you do not have those images, you will
+ need to set ``boot_firmware`` to ``bootmon``.
+
+fs_medium
+ TC2's file system can reside either on an SD card or on a USB stick. Boot
+ configuration is different depending on this. By default, WA expects it
+ to be on ``usb``; if you are using and SD card, you should set this to
+ ``sd``.
+
+bm_image
+ Bootmon image that comes as part of TC2 firmware periodically gets
+ updated. At the time of the release, ``bm_v519r.axf`` was used by
+ ARM. If you are using a more recent image, you will need to set this
+ indicating the image name (just the name of the actual file, *not* the
+ path). Note: this setting only applies if using ``bootmon`` boot
+ firmware.
+
+serial_device
+ WA will assume TC2 is connected on ``/dev/ttyS0`` by default. If the
+ serial port is different, you will need to set this.
+
+
+UEFI and PSCI
+~~~~~~~~~~~~~
+
+UEFI is a boot firmware alternative to bootmon. Currently UEFI is coupled with PSCI (Power State Coordination Interface). That means
+that in order to use PSCI, UEFI has to be the boot firmware. Currently the reverse dependency is true as well (for TC2). Therefore
+using UEFI requires enabling PSCI.
+
+In case you intend to use uefi/psci mode instead of bootmon, you will need two additional files: tc2_sec.bin and tc2_uefi.bin.
+after obtaining those files, place them inside /media/VEMSD/SOFTWARE/ directory as such::
+
+ cp tc2_sec.bin /media/VEMSD/SOFTWARE/
+ cp tc2_uefi.bin /media/VEMSD/SOFTWARE/
+
+
+Juno Setup
+----------
+
+.. note:: At the time of writing, the Android software stack on Juno was still
+ very immature. Some workloads may not run, and there maybe stability
+ issues with the device.
+
+
+The full software stack can be obtained from Linaro:
+
+https://releases.linaro.org/14.08/members/arm/android/images/armv8-android-juno-lsk
+
+Please follow the instructions on the "Binary Image Installation" tab on that
+page. More up-to-date firmware and kernel may also be obtained by registered
+members from ARM Connected Community: http://www.arm.com/community/ (though this
+is not guaranteed to work with the Linaro file system).
+
+UEFI
+~~~~
+
+Juno uses UEFI_ to boot the kernel image. UEFI supports multiple boot
+configurations, and presents a menu on boot to select (in default configuration
+it will automatically boot the first entry in the menu if not interrupted before
+a timeout). WA will look for a specific entry in the UEFI menu
+(``'WA'`` by default, but that may be changed by setting ``uefi_entry`` in the
+``device_config``). When following the UEFI instructions on the above Linaro
+page, please make sure to name the entry appropriately (or to correctly set the
+``uefi_entry``).
+
+.. _UEFI: http://en.wikipedia.org/wiki/UEFI
+
+There are two supported way for Juno to discover kernel images through UEFI. It
+can either load them from NOR flash on the board, or form boot partition on the
+file system. The setup described on the Linaro page uses the boot partition
+method.
+
+If WA does not find the UEFI entry it expects, it will create one. However, it
+will assume that the kernel image resides in NOR flash, which means it will not
+work with Linaro file system. So if you're replicating the Linaro setup exactly,
+you will need to create the entry manually, as outline on the above-linked page.
+
+Rebooting
+~~~~~~~~~
+
+At the time of writing, normal Android reboot did not work properly on Juno
+Android, causing the device to crash into an irrecoverable state. Therefore, WA
+will perform a hard reset to reboot the device. It will attempt to do this by
+toggling the DTR line on the serial connection to the device. In order for this
+to work, you need to make sure that SW1 configuration switch on the back panel of
+the board (the right-most DIP switch) is toggled *down*.
+
+
+Linux
++++++
+
+General Device Setup
+--------------------
+
+You can specify the device interface by setting ``device`` setting in
+``~/.workload_automation/config.py``. Available interfaces can be viewed by
+running ``wa list devices`` command. If you don't see your specific device
+listed (which is likely unless you're using one of the ARM-supplied platforms), then
+you should use ``generic_linux`` interface (this is set in the config by
+default).
+
+.. code-block:: python
+
+ device = 'generic_linux'
+
+The device interface may be configured through ``device_config`` setting, who's
+value is a ``dict`` mapping setting names to their values. You can find the full
+list of available parameter by looking up your device interface in the
+:ref:`devices` section of the documentation. Some of the most common parameters
+you might want to change are outlined below.
+
+Currently, the only only supported method for talking to a Linux device is over
+SSH. Device configuration must specify the parameters need to establish the
+connection.
+
+.. confval:: host
+
+ This should be either the the DNS name or IP address of the device.
+
+.. confval:: username
+
+ The login name of the user on the device that WA will use. This user should
+ have a home directory (unless an alternative working directory is specified
+ using ``working_directory`` config -- see below), and, for full
+ functionality, the user should have sudo rights (WA will be able to use
+ sudo-less acounts but some instruments or workload may not work).
+
+.. confval:: password
+
+ Password for the account on the device. Either this of a ``keyfile`` (see
+ below) must be specified.
+
+.. confval:: keyfile
+
+ If key-based authentication is used, this may be used to specify the SSH identity
+ file instead of the password.
+
+.. confval:: property_files
+
+ This is a list of paths that will be pulled for each WA run into the __meta
+ subdirectory in the results. The intention is to collect meta-data about the
+ device that may aid in reporducing the results later. The paths specified do
+ not have to exist on the device (they will be ignored if they do not). The
+ default list is ``['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release']``
+
+
+In addition, ``working_directory``, ``scheduler``, ``core_names``, and
+``core_clusters`` can also be specified and have the same meaning as for Android
+devices (see above).
+
+A typical ``device_config`` inside ``config.py`` may look something like
+
+
+.. code-block:: python
+
+ device_config = dict(
+ 'host'='192.168.0.7',
+ 'username'='guest',
+ 'password'='guest',
+ 'core_names'=['a7', 'a7', 'a7', 'a15', 'a15'],
+ 'core_clusters'=[0, 0, 0, 1, 1],
+ # ...
+ )
+
+
+Related Settings
+++++++++++++++++
+
+Reboot Policy
+-------------
+
+This indicates when during WA execution the device will be rebooted. By default
+this is set to ``never``, indicating that WA will not reboot the device. Please
+see ``reboot_policy`` documentation in :ref:`configuration-specification` for
+
+more details.
+
+Execution Order
+---------------
+
+``execution_order`` defines the order in which WA will execute workloads.
+``by_iteration`` (set by default) will execute the first iteration of each spec
+first, followed by the second iteration of each spec (that defines more than one
+iteration) and so forth. The alternative will loop through all iterations for
+the first first spec first, then move on to second spec, etc. Again, please see
+:ref:`configuration-specification` for more details.
+
+
+Adding a new device interface
++++++++++++++++++++++++++++++
+
+If you are working with a particularly unusual device (e.g. a early stage
+development board) or need to be able to handle some quirk of your Android build,
+configuration available in ``generic_android`` interface may not be enough for
+you. In that case, you may need to write a custom interface for your device. A
+device interface is an ``Extension`` (a plug-in) type in WA and is implemented
+similar to other extensions (such as workloads or instruments). Pleaser refer to
+:ref:`adding_a_device` section for information on how this may be done.
diff --git a/doc/source/execution_model.rst b/doc/source/execution_model.rst
new file mode 100644
index 00000000..3140583b
--- /dev/null
+++ b/doc/source/execution_model.rst
@@ -0,0 +1,115 @@
+++++++++++++++++++
+Framework Overview
+++++++++++++++++++
+
+Execution Model
+===============
+
+At the high level, the execution model looks as follows:
+
+.. image:: wa-execution.png
+ :scale: 50 %
+
+After some initial setup, the framework initializes the device, loads and initialized
+instrumentation and begins executing jobs defined by the workload specs in the agenda. Each job
+executes in four basic stages:
+
+setup
+ Initial setup for the workload is performed. E.g. required assets are deployed to the
+ devices, required services or applications are launched, etc. Run time configuration of the
+ device for the workload is also performed at this time.
+
+run
+ This is when the workload actually runs. This is defined as the part of the workload that is
+ to be measured. Exactly what happens at this stage depends entirely on the workload.
+
+result processing
+ Results generated during the execution of the workload, if there are any, are collected,
+ parsed and extracted metrics are passed up to the core framework.
+
+teardown
+ Final clean up is performed, e.g. applications may closed, files generated during execution
+ deleted, etc.
+
+Signals are dispatched (see signal_dispatch_ below) at each stage of workload execution,
+which installed instrumentation can hook into in order to collect measurements, alter workload
+execution, etc. Instrumentation implementation usually mirrors that of workloads, defining
+setup, teardown and result processing stages for a particular instrument. Instead of a ``run``,
+instruments usually implement a ``start`` and a ``stop`` which get triggered just before and just
+after a workload run. However, the signal dispatch mechanism give a high degree of flexibility
+to instruments allowing them to hook into almost any stage of a WA run (apart from the very
+early initialization).
+
+Metrics and artifacts generated by workloads and instrumentation are accumulated by the framework
+and are then passed to active result processors. This happens after each individual workload
+execution and at the end of the run. A result process may chose to act at either or both of these
+points.
+
+
+Control Flow
+============
+
+This section goes into more detail explaining the relationship between the major components of the
+framework and how control passes between them during a run. It will only go through the major
+transition and interactions and will not attempt to describe very single thing that happens.
+
+.. note:: This is the control flow for the ``wa run`` command which is the main functionality
+ of WA. Other commands are much simpler and most of what is described below does not
+ apply to them.
+
+#. ``wlauto.core.entry_point`` parses the command form the arguments and executes the run command
+ (``wlauto.commands.run.RunCommand``).
+#. Run command initializes the output directory and creates a ``wlauto.core.agenda.Agenda`` based on
+ the command line arguments. Finally, it instantiates a ``wlauto.core.execution.Executor`` and
+ passes it the Agenda.
+#. The Executor uses the Agenda to create a ``wlauto.core.configuraiton.RunConfiguration`` fully
+ defines the configuration for the run (it will be serialised into ``__meta`` subdirectory under
+ the output directory.
+#. The Executor proceeds to instantiate and install instrumentation, result processors and the
+ device interface, based on the RunConfiguration. The executor also initialise a
+ ``wlauto.core.execution.ExecutionContext`` which is used to track the current state of the run
+ execution and also serves as a means of communication between the core framework and the
+ extensions.
+#. Finally, the Executor instantiates a ``wlauto.core.execution.Runner``, initializes its job
+ queue with workload specs from the RunConfiguraiton, and kicks it off.
+#. The Runner performs the run time initialization of the device and goes through the workload specs
+ (in the order defined by ``execution_order`` setting), running each spec according to the
+ execution model described in the previous section. The Runner sends signals (see below) at
+ appropriate points during execution.
+#. At the end of the run, the control is briefly passed back to the Executor, which outputs a
+ summary for the run.
+
+
+.. _signal_dispatch:
+
+Signal Dispatch
+===============
+
+WA uses the `louie <https://pypi.python.org/pypi/Louie/1.1>`_ (formerly, pydispatcher) library
+for signal dispatch. Callbacks can be registered for signals emitted during the run. WA uses a
+version of louie that has been modified to introduce priority to registered callbacks (so that
+callbacks that are know to be slow can be registered with a lower priority so that they do not
+interfere with other callbacks).
+
+This mechanism is abstracted for instrumentation. Methods of an :class:`wlauto.core.Instrument`
+subclass automatically get hooked to appropriate signals based on their names when the instrument
+is "installed" for the run. Priority can be specified by adding ``very_fast_``, ``fast_`` ,
+``slow_`` or ``very_slow_`` prefixes to method names.
+
+The full list of method names and the signals they map to may be viewed
+:ref:`here <instrumentation_method_map>`.
+
+Signal dispatching mechanism may also be used directly, for example to dynamically register
+callbacks at runtime or allow extensions other than ``Instruments`` to access stages of the run
+they are normally not aware of.
+
+The sending of signals is the responsibility of the Runner. Signals gets sent during transitions
+between execution stages and when special evens, such as errors or device reboots, occur.
+
+See Also
+--------
+
+.. toctree::
+ :maxdepth: 1
+
+ instrumentation_method_map
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 00000000..46095f5d
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,138 @@
+.. Workload Automation 2 documentation master file, created by
+ sphinx-quickstart on Mon Jul 15 09:00:46 2013.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to Documentation for Workload Automation
+================================================
+
+Workload Automation (WA) is a framework for running workloads on real hardware devices. WA
+supports a number of output formats as well as additional instrumentation (such as Streamline
+traces). A number of workloads are included with the framework.
+
+
+.. contents:: Contents
+
+
+What's New
+~~~~~~~~~~
+
+.. toctree::
+ :maxdepth: 1
+
+ changes
+
+
+Usage
+~~~~~
+
+This section lists general usage documentation. If you're new to WA2, it is
+recommended you start with the :doc:`quickstart` page. This section also contains
+installation and configuration guides.
+
+
+.. toctree::
+ :maxdepth: 2
+
+ quickstart
+ installation
+ device_setup
+ invocation
+ agenda
+ configuration
+
+
+Extensions
+~~~~~~~~~~
+
+This section lists extensions that currently come with WA2. Each package below
+represents a particular type of extension (e.g. a workload); each sub-package of
+that package is a particular instance of that extension (e.g. the Andebench
+workload). Clicking on a link will show what the individual extension does,
+what configuration parameters it takes, etc.
+
+For how to implement you own extensions, please refer to the guides in the
+:ref:`in-depth` section.
+
+.. raw:: html
+
+ <style>
+ td {
+ vertical-align: text-top;
+ }
+ </style>
+ <table <tr><td>
+
+.. toctree::
+ :maxdepth: 2
+
+ extensions/workloads
+
+.. raw:: html
+
+ </td><td>
+
+.. toctree::
+ :maxdepth: 2
+
+ extensions/instruments
+
+
+.. raw:: html
+
+ </td><td>
+
+.. toctree::
+ :maxdepth: 2
+
+ extensions/result_processors
+
+.. raw:: html
+
+ </td><td>
+
+.. toctree::
+ :maxdepth: 2
+
+ extensions/devices
+
+.. raw:: html
+
+ </td></tr></table>
+
+.. _in-depth:
+
+In-depth
+~~~~~~~~
+
+This section contains more advanced topics, such how to write your own extensions
+and detailed descriptions of how WA functions under the hood.
+
+.. toctree::
+ :maxdepth: 2
+
+ conventions
+ writing_extensions
+ execution_model
+ resources
+ additional_topics
+ daq_device_setup
+ revent
+ contributing
+
+API Reference
+~~~~~~~~~~~~~
+
+.. toctree::
+ :maxdepth: 5
+
+ api/modules
+
+
+Indices and tables
+~~~~~~~~~~~~~~~~~~
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
new file mode 100644
index 00000000..0485ddcd
--- /dev/null
+++ b/doc/source/installation.rst
@@ -0,0 +1,144 @@
+============
+Installation
+============
+
+.. module:: wlauto
+
+This page describes how to install Workload Automation 2.
+
+
+Prerequisites
+=============
+
+Operating System
+----------------
+
+WA runs on a native Linux install. It was tested with Ubuntu 12.04,
+but any recent Linux distribution should work. It should run on either
+32bit or 64bit OS, provided the correct version of Android (see below)
+was installed. Officially, **other environments are not supported**. WA
+has been known to run on Linux Virtual machines and in Cygwin environments,
+though additional configuration maybe required in both cases (known issues
+include makings sure USB/serial connections are passed to the VM, and wrong
+python/pip binaries being picked up in Cygwin). WA *should* work on other
+Unix-based systems such as BSD or Mac OS X, but it has not been tested
+in those environments. WA *does not* run on Windows (though it should be
+possible to get limited functionality with minimal porting effort).
+
+
+Android SDK
+-----------
+
+You need to have the Android SDK with at least one platform installed.
+To install it, download the ADT Bundle from here_. Extract it
+and add ``<path_to_android_sdk>/sdk/platform-tools`` and ``<path_to_android_sdk>/sdk/tools``
+to your ``PATH``. To test that you've installed it properly run ``adb
+version``, the output should be similar to this::
+
+ $$ adb version
+ Android Debug Bridge version 1.0.31
+ $$
+
+.. _here: https://developer.android.com/sdk/index.html
+
+Once that is working, run ::
+
+ android update sdk
+
+This will open up a dialog box listing available android platforms and
+corresponding API levels, e.g. ``Android 4.3 (API 18)``. For WA, you will need
+at least API level 18 (i.e. Android 4.3), though installing the latest is
+usually the best bet.
+
+Optionally (but recommended), you should also set ``ANDROID_HOME`` to point to
+the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
+
+
+Python
+------
+
+Workload Automation 2 requires Python 2.7 (Python 3 is not supported, at the moment).
+
+
+pip
+---
+
+pip is the recommended package manager for Python. It is not part of standard
+Python distribution and would need to be installed separately. On Ubuntu and
+similar distributions, this may be done with APT::
+
+ sudo apt-get install python-pip
+
+
+Python Packages
+---------------
+
+.. note:: pip should automatically download and install missing dependencies,
+ so if you're using pip, you can skip this section.
+
+Workload Automation 2 depends on the following additional libraries:
+
+ * pexpect
+ * docutils
+ * pySerial
+ * pyYAML
+ * python-dateutil
+
+You can install these with pip::
+
+ sudo pip install pexpect
+ sudo pip install pyserial
+ sudo pip install pyyaml
+ sudo pip install docutils
+ sudo pip install python-dateutil
+
+Some of these may also be available in your distro's repositories, e.g. ::
+
+ sudo apt-get install python-serial
+
+Distro package versions tend to be older, so pip installation is recommended.
+However, pip will always download and try to build the source, so in some
+situations distro binaries may provide an easier fall back. Please also note that
+distro package names may differ from pip packages.
+
+
+Optional Python Packages
+------------------------
+
+.. note:: unlike the mandatory dependencies in the previous section,
+ pip will *not* install these automatically, so you will have
+ to explicitly install them if/when you need them.
+
+In addition to the mandatory packages listed in the previous sections, some WA
+functionality (e.g. certain extensions) may have additional dependencies. Since
+they are not necessary to be able to use most of WA, they are not made mandatory
+to simplify initial WA installation. If you try to use an extension that has
+additional, unmet dependencies, WA will tell you before starting the run, and
+you can install it then. They are listed here for those that would rather
+install them upfront (e.g. if you're planning to use WA to an environment that
+may not always have Internet access).
+
+ * nose
+ * pandas
+ * PyDAQmx
+ * pymongo
+ * jinja2
+
+
+.. note:: Some packages have C extensions and will require Python development
+ headers to install. You can get those by installing ``python-dev``
+ package in apt on Ubuntu (or the equivalent for your distribution).
+
+Installing
+==========
+
+Download the tarball and run pip::
+
+ sudo pip install wlauto-$version.tar.gz
+
+If the above succeeds, try ::
+
+ wa --version
+
+Hopefully, this should output something along the lines of "Workload Automation
+version $version".
diff --git a/doc/source/instrumentation_method_map.rst b/doc/source/instrumentation_method_map.rst
new file mode 100644
index 00000000..f68ecb59
--- /dev/null
+++ b/doc/source/instrumentation_method_map.rst
@@ -0,0 +1,73 @@
+Instrumentation Signal-Method Mapping
+=====================================
+
+.. _instrumentation_method_map:
+
+Instrument methods get automatically hooked up to signals based on their names. Mostly, the method
+name correponds to the name of the signal, however there are a few convienience aliases defined
+(listed first) to make easier to relate instrumenation code to the workload execution model.
+
+======================================== =========================================
+method name signal
+======================================== =========================================
+initialize run-init-signal
+setup successful-workload-setup-signal
+start before-workload-execution-signal
+stop after-workload-execution-signal
+process_workload_result successful-iteration-result-update-signal
+update_result after-iteration-result-update-signal
+teardown after-workload-teardown-signal
+finalize run-fin-signal
+on_run_start start-signal
+on_run_end end-signal
+on_workload_spec_start workload-spec-start-signal
+on_workload_spec_end workload-spec-end-signal
+on_iteration_start iteration-start-signal
+on_iteration_end iteration-end-signal
+before_initial_boot before-initial-boot-signal
+on_successful_initial_boot successful-initial-boot-signal
+after_initial_boot after-initial-boot-signal
+before_first_iteration_boot before-first-iteration-boot-signal
+on_successful_first_iteration_boot successful-first-iteration-boot-signal
+after_first_iteration_boot after-first-iteration-boot-signal
+before_boot before-boot-signal
+on_successful_boot successful-boot-signal
+after_boot after-boot-signal
+on_spec_init spec-init-signal
+on_run_init run-init-signal
+on_iteration_init iteration-init-signal
+before_workload_setup before-workload-setup-signal
+on_successful_workload_setup successful-workload-setup-signal
+after_workload_setup after-workload-setup-signal
+before_workload_execution before-workload-execution-signal
+on_successful_workload_execution successful-workload-execution-signal
+after_workload_execution after-workload-execution-signal
+before_workload_result_update before-iteration-result-update-signal
+on_successful_workload_result_update successful-iteration-result-update-signal
+after_workload_result_update after-iteration-result-update-signal
+before_workload_teardown before-workload-teardown-signal
+on_successful_workload_teardown successful-workload-teardown-signal
+after_workload_teardown after-workload-teardown-signal
+before_overall_results_processing before-overall-results-process-signal
+on_successful_overall_results_processing successful-overall-results-process-signal
+after_overall_results_processing after-overall-results-process-signal
+on_error error_logged
+on_warning warning_logged
+======================================== =========================================
+
+
+The names above may be prefixed with one of pre-defined prefixes to set the priority of the
+Instrument method realive to other callbacks registered for the signal (within the same priority
+level, callbacks are invoked in the order they were registered). The table below shows the mapping
+of the prifix to the corresponding priority:
+
+=========== ===
+prefix priority
+=========== ===
+very_fast\_ 20
+fast\_ 10
+normal\_ 0
+slow\_ -10
+very_slow\_ -20
+=========== ===
+
diff --git a/doc/source/instrumentation_method_map.template b/doc/source/instrumentation_method_map.template
new file mode 100644
index 00000000..48003245
--- /dev/null
+++ b/doc/source/instrumentation_method_map.template
@@ -0,0 +1,17 @@
+Instrumentation Signal-Method Mapping
+=====================================
+
+.. _instrumentation_method_map:
+
+Instrument methods get automatically hooked up to signals based on their names. Mostly, the method
+name correponds to the name of the signal, however there are a few convienience aliases defined
+(listed first) to make easier to relate instrumenation code to the workload execution model.
+
+$signal_names
+
+The names above may be prefixed with one of pre-defined prefixes to set the priority of the
+Instrument method realive to other callbacks registered for the signal (within the same priority
+level, callbacks are invoked in the order they were registered). The table below shows the mapping
+of the prifix to the corresponding priority:
+
+$priority_prefixes
diff --git a/doc/source/invocation.rst b/doc/source/invocation.rst
new file mode 100644
index 00000000..5c8ead92
--- /dev/null
+++ b/doc/source/invocation.rst
@@ -0,0 +1,135 @@
+.. _invocation:
+
+========
+Commands
+========
+
+Installing the wlauto package will add ``wa`` command to your system,
+which you can run from anywhere. This has a number of sub-commands, which can
+be viewed by executing ::
+
+ wa -h
+
+Individual sub-commands are discussed in detail below.
+
+run
+---
+
+The most common sub-command you will use is ``run``. This will run specfied
+workload(s) and process resulting output. This takes a single mandatory
+argument that specifies what you want WA to run. This could be either a
+workload name, or a path to an "agenda" file that allows to specify multiple
+workloads as well as a lot additional configuration (see :ref:`agenda`
+section for details). Executing ::
+
+ wa run -h
+
+Will display help for this subcommand that will look somehtign like this::
+
+ usage: run [-d DIR] [-f] AGENDA
+
+ Execute automated workloads on a remote device and process the resulting
+ output.
+
+ positional arguments:
+ AGENDA Agenda for this workload automation run. This defines
+ which workloads will be executed, how many times, with
+ which tunables, etc. See /usr/local/lib/python2.7
+ /dist-packages/wlauto/agenda-example.csv for an
+ example of how this file should be structured.
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -c CONFIG, --config CONFIG
+ specify an additional config.py
+ -v, --verbose The scripts will produce verbose output.
+ --version Output the version of Workload Automation and exit.
+ --debug Enable debug mode. Note: this implies --verbose.
+ -d DIR, --output-directory DIR
+ Specify a directory where the output will be
+ generated. If the directoryalready exists, the script
+ will abort unless -f option (see below) is used,in
+ which case the contents of the directory will be
+ overwritten. If this optionis not specified, then
+ wa_output will be used instead.
+ -f, --force Overwrite output directory if it exists. By default,
+ the script will abort in thissituation to prevent
+ accidental data loss.
+ -i ID, --id ID Specify a workload spec ID from an agenda to run. If
+ this is specified, only that particular spec will be
+ run, and other workloads in the agenda will be
+ ignored. This option may be used to specify multiple
+ IDs.
+
+
+Output Directory
+~~~~~~~~~~~~~~~~
+
+The exact contents on the output directory will depend on configuration options
+used, instrumentation and output processors enabled, etc. Typically, the output
+directory will contain a results file at the top level that lists all
+measurements that were collected (currently, csv and json formats are
+supported), along with a subdirectory for each iteration executed with output
+for that specific iteration.
+
+At the top level, there will also be a run.log file containing the complete log
+output for the execution. The contents of this file is equivalent to what you
+would get in the console when using --verbose option.
+
+Finally, there will be a __meta subdirectory. This will contain a copy of the
+agenda file used to run the workloads along with any other device-specific
+configuration files used during execution.
+
+
+list
+----
+
+This lists all extensions of a particular type. For example ::
+
+ wa list workloads
+
+will list all workloads currently included in WA. The list will consist of
+extension names and short descriptions of the functionality they offer.
+
+
+show
+----
+
+This will show detailed information about an extension, including more in-depth
+description and any parameters/configuration that are available. For example
+executing ::
+
+ wa show andebench
+
+will produce something like ::
+
+
+ andebench
+
+ AndEBench is an industry standard Android benchmark provided by The Embedded Microprocessor Benchmark Consortium
+ (EEMBC).
+
+ parameters:
+
+ number_of_threads
+ Number of threads that will be spawned by AndEBench.
+ type: int
+
+ single_threaded
+ If ``true``, AndEBench will run with a single thread. Note: this must not be specified if ``number_of_threads``
+ has been specified.
+ type: bool
+
+ http://www.eembc.org/andebench/about.php
+
+ From the website:
+
+ - Initial focus on CPU and Dalvik interpreter performance
+ - Internal algorithms concentrate on integer operations
+ - Compares the difference between native and Java performance
+ - Implements flexible multicore performance analysis
+ - Results displayed in Iterations per second
+ - Detailed log file for comprehensive engineering analysis
+
+
+
diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst
new file mode 100644
index 00000000..7b9ec9b7
--- /dev/null
+++ b/doc/source/quickstart.rst
@@ -0,0 +1,162 @@
+==========
+Quickstart
+==========
+
+This sections will show you how to quickly start running workloads using
+Workload Automation 2.
+
+
+Install
+=======
+
+.. note:: This is a quick summary. For more detailed instructions, please see
+ the :doc:`installation` section.
+
+Make sure you have Python 2.7 and a recent Android SDK with API level 18 or above
+installed on your system. For the SDK, make sure that either ``ANDROID_HOME``
+environment variable is set, or that ``adb`` is in your ``PATH``.
+
+.. note:: A complete install of the Android SDK is required, as WA uses a
+ number of its utilities, not just adb.
+
+In addition to the base Python 2.7 install, you will also need to have ``pip``
+(Python's package manager) installed as well. This is usually a separate package.
+
+Once you have the pre-requisites and a tarball with the workload automation package,
+you can install it with pip::
+
+ sudo pip install wlauto-2.2.0dev.tar.gz
+
+This will install Workload Automation on your system, along with the Python
+packages it depends on.
+
+(Optional) Verify installation
+-------------------------------
+
+Once the tarball has been installed, try executing ::
+
+ wa -h
+
+You should see a help message outlining available subcommands.
+
+
+(Optional) APK files
+--------------------
+
+A large number of WA workloads are installed as APK files. These cannot be
+distributed with WA and so you will need to obtain those separately.
+
+For more details, please see the :doc:`installation` section.
+
+
+Configure Your Device
+=====================
+
+Out of the box, WA is configured to work with a generic Android device through
+``adb``. If you only have one device listed when you execute ``adb devices``,
+and your device has a standard Android configuration, then no extra configuration
+is required (if your device is connected via network, you will have to manually execute
+``adb connect <device ip>`` so that it appears in the device listing).
+
+If you have multiple devices connected, you will need to tell WA which one you
+want it to use. You can do that by setting ``adb_name`` in device configuration inside
+``~/.workload_automation/config.py``\ , e.g.
+
+.. code-block:: python
+
+ # ...
+
+ device_config = dict(
+ adb_name = 'abcdef0123456789',
+ # ...
+ )
+
+ # ...
+
+This should give you basic functionality. If your device has non-standard
+Android configuration (e.g. it's a development board) or your need some advanced
+functionality (e.g. big.LITTLE tuning parameters), additional configuration may
+be required. Please see the :doc:`device_setup` section for more details.
+
+
+Running Your First Workload
+===========================
+
+The simplest way to run a workload is to specify it as a parameter to WA ``run``
+sub-command::
+
+ wa run dhrystone
+
+You will see INFO output from WA as it executes each stage of the run. A
+completed run output should look something like this::
+
+ INFO Initializing
+ INFO Running workloads
+ INFO Connecting to device
+ INFO Initializing device
+ INFO Running workload 1 dhrystone (iteration 1)
+ INFO Setting up
+ INFO Executing
+ INFO Processing result
+ INFO Tearing down
+ INFO Processing overall results
+ INFO Status available in wa_output/status.txt
+ INFO Done.
+ INFO Ran a total of 1 iterations: 1 OK
+ INFO Results can be found in wa_output
+
+Once the run has completed, you will find a directory called ``wa_output``
+in the location where you have invoked ``wa run``. Within this directory,
+you will find a "results.csv" file which will contain results obtained for
+dhrystone, as well as a "run.log" file containing detailed log output for
+the run. You will also find a sub-directory called 'drystone_1_1' that
+contains the results for that iteration. Finally, you will find a copy of the
+agenda file in the ``wa_output/__meta`` subdirectory. The contents of
+iteration-specific subdirectories will vary from workload to workload, and,
+along with the contents of the main output directory, will depend on the
+instrumentation and result processors that were enabled for that run.
+
+The ``run`` sub-command takes a number of options that control its behavior,
+you can view those by executing ``wa run -h``. Please see the :doc:`invocation`
+section for details.
+
+
+Create an Agenda
+================
+
+Simply running a single workload is normally of little use. Typically, you would
+want to specify several workloads, setup the device state and, possibly, enable
+additional instrumentation. To do this, you would need to create an "agenda" for
+the run that outlines everything you want WA to do.
+
+Agendas are written using YAML_ markup language. A simple agenda might look
+like this:
+
+.. code-block:: yaml
+
+ config:
+ instrumentation: [~execution_time]
+ result_processors: [json]
+ global:
+ iterations: 2
+ workloads:
+ - memcpy
+ - name: dhrystone
+ params:
+ mloops: 5
+ threads: 1
+
+This agenda
+
+- Specifies two workloads: memcpy and dhrystone.
+- Specifies that dhrystone should run in one thread and execute five million loops.
+- Specifies that each of the two workloads should be run twice.
+- Enables json result processor, in addition to the result processors enabled in
+ the config.py.
+- Disables execution_time instrument, if it is enabled in the config.py
+
+There is a lot more that could be done with an agenda. Please see :doc:`agenda`
+section for details.
+
+.. _YAML: http://en.wikipedia.org/wiki/YAML
+
diff --git a/doc/source/resources.rst b/doc/source/resources.rst
new file mode 100644
index 00000000..af944e6f
--- /dev/null
+++ b/doc/source/resources.rst
@@ -0,0 +1,45 @@
+Dynamic Resource Resolution
+===========================
+
+Introduced in version 2.1.3.
+
+The idea is to decouple resource identification from resource discovery.
+Workloads/instruments/devices/etc state *what* resources they need, and not
+*where* to look for them -- this instead is left to the resource resolver that
+is now part of the execution context. The actual discovery of resources is
+performed by resource getters that are registered with the resolver.
+
+A resource type is defined by a subclass of
+:class:`wlauto.core.resource.Resource`. An instance of this class describes a
+resource that is to be obtained. At minimum, a ``Resource`` instance has an
+owner (which is typically the object that is looking for the resource), but
+specific resource types may define other parameters that describe an instance of
+that resource (such as file names, URLs, etc).
+
+An object looking for a resource invokes a resource resolver with an instance of
+``Resource`` describing the resource it is after. The resolver goes through the
+getters registered for that resource type in priority order attempting to obtain
+the resource; once the resource is obtained, it is returned to the calling
+object. If none of the registered getters could find the resource, ``None`` is
+returned instead.
+
+The most common kind of object looking for resources is a ``Workload``, and
+since v2.1.3, ``Workload`` class defines
+:py:meth:`wlauto.core.workload.Workload.init_resources` method that may be
+overridden by subclasses to perform resource resolution. For example, a workload
+looking for an APK file would do so like this::
+
+ from wlauto import Workload
+ from wlauto.common.resources import ApkFile
+
+ class AndroidBenchmark(Workload):
+
+ # ...
+
+ def init_resources(self, context):
+ self.apk_file = context.resource.get(ApkFile(self))
+
+ # ...
+
+
+Currently available resource types are defined in :py:mod:`wlauto.common.resources`.
diff --git a/doc/source/revent.rst b/doc/source/revent.rst
new file mode 100644
index 00000000..e3b756ce
--- /dev/null
+++ b/doc/source/revent.rst
@@ -0,0 +1,97 @@
+.. _revent_files_creation:
+
+revent
+======
+
+revent utility can be used to record and later play back a sequence of user
+input events, such as key presses and touch screen taps. This is an alternative
+to Android UI Automator for providing automation for workloads. ::
+
+
+ usage:
+ revent [record time file|replay file|info] [verbose]
+ record: stops after either return on stdin
+ or time (in seconds)
+ and stores in file
+ replay: replays eventlog from file
+ info:shows info about each event char device
+ any additional parameters make it verbose
+
+Recording
+---------
+
+To record, transfer the revent binary to the device, then invoke ``revent
+record``, giving it the time (in seconds) you want to record for, and the
+file you want to record to (WA expects these files to have .revent
+extension)::
+
+ host$ adb push revent /data/local/revent
+ host$ adb shell
+ device# cd /data/local
+ device# ./revent record 1000 my_recording.revent
+
+The recording has now started and button presses, taps, etc you perform on the
+device will go into the .revent file. The recording will stop after the
+specified time period, and you can also stop it by hitting return in the adb
+shell.
+
+Replaying
+---------
+
+To replay a recorded file, run ``revent replay`` on the device, giving it the
+file you want to replay::
+
+ device# ./revent replay my_recording.revent
+
+
+Using revent With Workloads
+---------------------------
+
+Some workloads (pretty much all games) rely on recorded revents for their
+execution. :class:`wlauto.common.GameWorkload`-derived workloads expect two
+revent files -- one for performing the initial setup (navigating menus,
+selecting game modes, etc), and one for the actual execution of the game.
+Because revents are very device-specific\ [*]_, these two files would need to
+be recorded for each device.
+
+The files must be called ``<device name>.(setup|run).revent``, where
+``<device name>`` is the name of your device (as defined by the ``name``
+attribute of your device's class). WA will look for these files in two
+places: ``<install dir>/wlauto/workloads/<workload name>/revent_files``
+and ``~/.workload_automation/dependencies/<workload name>``. The first
+location is primarily intended for revent files that come with WA (and if
+you did a system-wide install, you'll need sudo to add files there), so it's
+probably easier to use the second location for the files you record. Also,
+if revent files for a workload exist in both locations, the files under
+``~/.workload_automation/dependencies`` will be used in favor of those
+installed with WA.
+
+For example, if you wanted to run angrybirds workload on "Acme" device, you would
+record the setup and run revent files using the method outlined in the section
+above and then pull them for the devices into the following locations::
+
+ ~/workload_automation/dependencies/angrybirds/Acme.setup.revent
+ ~/workload_automation/dependencies/angrybirds/Acme.run.revent
+
+(you may need to create the intermediate directories if they don't already
+exist).
+
+.. [*] It's not just about screen resolution -- the event codes may be different
+ even if devices use the same screen.
+
+
+revent vs. UiAutomator
+----------------------
+
+In general, Android UI Automator is the preferred way of automating user input
+for workloads because, unlike revent, UI Automator does not depend on a
+particular screen resolution, and so is more portable across different devices.
+It also gives better control and can potentially be faster for ling UI
+manipulations, as input events are scripted based on the available UI elements,
+rather than generated by human input.
+
+On the other hand, revent can be used to manipulate pretty much any workload,
+where as UI Automator only works for Android UI elements (such as text boxes or
+radio buttons), which makes the latter useless for things like games. Recording
+revent sequence is also faster than writing automation code (on the other hand,
+one would need maintain a different revent log for each screen resolution).
diff --git a/doc/source/wa-execution.png b/doc/source/wa-execution.png
new file mode 100644
index 00000000..9bdea6fd
--- /dev/null
+++ b/doc/source/wa-execution.png
Binary files differ
diff --git a/doc/source/writing_extensions.rst b/doc/source/writing_extensions.rst
new file mode 100644
index 00000000..737a1166
--- /dev/null
+++ b/doc/source/writing_extensions.rst
@@ -0,0 +1,956 @@
+==================
+Writing Extensions
+==================
+
+Workload Automation offers several extension points (or plugin types).The most
+interesting of these are
+
+:workloads: These are the tasks that get executed and measured on the device. These
+ can be benchmarks, high-level use cases, or pretty much anything else.
+:devices: These are interfaces to the physical devices (development boards or end-user
+ devices, such as smartphones) that use cases run on. Typically each model of a
+ physical device would require it's own interface class (though some functionality
+ may be reused by subclassing from an existing base).
+:instruments: Instruments allow collecting additional data from workload execution (e.g.
+ system traces). Instruments are not specific to a particular Workload. Instruments
+ can hook into any stage of workload execution.
+:result processors: These are used to format the results of workload execution once they have been
+ collected. Depending on the callback used, these will run either after each
+ iteration or at the end of the run, after all of the results have been
+ collected.
+
+You create an extension by subclassing the appropriate base class, defining
+appropriate methods and attributes, and putting the .py file with the class into
+an appropriate subdirectory under ``~/.workload_automation`` (there is one for
+each extension type).
+
+
+Extension Basics
+================
+
+This sub-section covers things common to implementing extensions of all types.
+It is recommended you familiarize yourself with the information here before
+proceeding onto guidance for specific extension types.
+
+To create an extension, you basically subclass an appropriate base class and them
+implement the appropriate methods
+
+The Context
+-----------
+
+The majority of methods in extensions accept a context argument. This is an
+instance of :class:`wlauto.core.execution.ExecutionContext`. If contains
+of information about current state of execution of WA and keeps track of things
+like which workload is currently running and the current iteration.
+
+Notable attributes of the context are
+
+context.spec
+ the current workload specification being executed. This is an
+ instance of :class:`wlauto.core.configuration.WorkloadRunSpec`
+ and defines the workload and the parameters under which it is
+ being executed.
+
+context.workload
+ ``Workload`` object that is currently being executed.
+
+context.current_iteration
+ The current iteration of the spec that is being executed. Note that this
+ is the iteration for that spec, i.e. the number of times that spec has
+ been run, *not* the total number of all iterations have been executed so
+ far.
+
+context.result
+ This is the result object for the current iteration. This is an instance
+ of :class:`wlauto.core.result.IterationResult`. It contains the status
+ of the iteration as well as the metrics and artifacts generated by the
+ workload and enable instrumentation.
+
+context.device
+ The device interface object that can be used to interact with the
+ device. Note that workloads and instruments have their own device
+ attribute and they should be using that instead.
+
+In addition to these, context also defines a few useful paths (see below).
+
+
+Paths
+-----
+
+You should avoid using hard-coded absolute paths in your extensions whenever
+possible, as they make your code too dependent on a particular environment and
+may mean having to make adjustments when moving to new (host and/or device)
+platforms. To help avoid hard-coded absolute paths, WA automation defines
+a number of standard locations. You should strive to define your paths relative
+to one of those.
+
+On the host
+~~~~~~~~~~~
+
+Host paths are available through the context object, which is passed to most
+extension methods.
+
+context.run_output_directory
+ This is the top-level output directory for all WA results (by default,
+ this will be "wa_output" in the directory in which WA was invoked.
+
+context.output_directory
+ This is the output directory for the current iteration. This will an
+ iteration-specific subdirectory under the main results location. If
+ there is no current iteration (e.g. when processing overall run results)
+ this will point to the same location as ``root_output_directory``.
+
+context.host_working_directory
+ This an addition location that may be used by extensions to store
+ non-iteration specific intermediate files (e.g. configuration).
+
+Additionally, the global ``wlauto.settings`` object exposes on other location:
+
+settings.dependency_directory
+ this is the root directory for all extension dependencies (e.g. media
+ files, assets etc) that are not included within the extension itself.
+
+As per Python best practice, it is recommended that methods and values in
+``os.path`` standard library module are used for host path manipulation.
+
+On the device
+~~~~~~~~~~~~~
+
+Workloads and instruments have a ``device`` attribute, which is an interface to
+the device used by WA. It defines the following location:
+
+device.working_directory
+ This is the directory for all WA-related files on the device. All files
+ deployed to the device should be pushed to somewhere under this location
+ (the only exception being executables installed with ``device.install``
+ method).
+
+Since there could be a mismatch between path notation used by the host and the
+device, the ``os.path`` modules should *not* be used for on-device path
+manipulation. Instead device has an equipment module exposed through
+``device.path`` attribute. This has all the same attributes and behaves the
+same way as ``os.path``, but is guaranteed to produce valid paths for the device,
+irrespective of the host's path notation.
+
+.. note:: result processors, unlike workloads and instruments, do not have their
+ own device attribute; however they can access the device through the
+ context.
+
+
+Parameters
+----------
+
+All extensions can be parameterized. Parameters are specified using
+``parameters`` class attribute. This should be a list of
+:class:`wlauto.core.Parameter` instances. The following attributes can be
+specified on parameter creation:
+
+name
+ This is the only mandatory argument. The name will be used to create a
+ corresponding attribute in the extension instance, so it must be a valid
+ Python identifier.
+
+kind
+ This is the type of the value of the parameter. This could be an
+ callable. Normally this should be a standard Python type, e.g. ``int`
+ or ``float``, or one the types defined in :mod:`wlauto.utils.types`.
+ If not explicitly specified, this will default to ``str``.
+
+ .. note:: Irrespective of the ``kind`` specified, ``None`` is always a
+ valid value for a parameter. If you don't want to allow
+ ``None``, then set ``mandatory`` (see below) to ``True``.
+
+allowed_values
+ A list of the only allowed values for this parameter.
+
+ .. note:: For composite types, such as ``list_of_strings`` or
+ ``list_of_ints`` in :mod:`wlauto.utils.types`, each element of
+ the value will be checked against ``allowed_values`` rather
+ than the composite value itself.
+
+default
+ The default value to be used for this parameter if one has not been
+ specified by the user. Defaults to ``None``.
+
+mandatory
+ A ``bool`` indicating whether this parameter is mandatory. Setting this
+ to ``True`` will make ``None`` an illegal value for the parameter.
+ Defaults to ``False``.
+
+ .. note:: Specifying a ``default`` will mean that this parameter will,
+ effectively, be ignored (unless the user sets the param to ``None``).
+
+ .. note:: Mandatory parameters are *bad*. If at all possible, you should
+ strive to provide a sensible ``default`` or to make do without
+ the parameter. Only when the param is absolutely necessary,
+ and there really is no sensible default that could be given
+ (e.g. something like login credentials), should you consider
+ making it mandatory.
+
+constraint
+ This is an additional constraint to be enforced on the parameter beyond
+ its type or fixed allowed values set. This should be a predicate (a function
+ that takes a single argument -- the user-supplied value -- and returns
+ a ``bool`` indicating whether the constraint has been satisfied).
+
+override
+ A parameter name must be unique not only within an extension but also
+ with that extension's class hierarchy. If you try to declare a parameter
+ with the same name as already exists, you will get an error. If you do
+ want to override a parameter from further up in the inheritance
+ hierarchy, you can indicate that by setting ``override`` attribute to
+ ``True``.
+
+ When overriding, you do not need to specify every other attribute of the
+ parameter, just the ones you what to override. Values for the rest will
+ be taken from the parameter in the base class.
+
+
+Validation and cross-parameter constraints
+------------------------------------------
+
+An extension will get validated at some point after constructions. When exactly
+this occurs depends on the extension type, but it *will* be validated before it
+is used.
+
+You can implement ``validate`` method in your extension (that takes no arguments
+beyond the ``self``) to perform any additions *internal* validation in your
+extension. By "internal", I mean that you cannot make assumptions about the
+surrounding environment (e.g. that the device has been initialized).
+
+The contract for ``validate`` method is that it should raise an exception
+(either ``wlauto.exceptions.ConfigError`` or extension-specific exception type -- see
+further on this page) if some validation condition has not, and cannot, been met.
+If the method returns without raising an exception, then the extension is in a
+valid internal state.
+
+Note that ``validate`` can be used not only to verify, but also to impose a
+valid internal state. In particular, this where cross-parameter constraints can
+be resolved. If the ``default`` or ``allowed_values`` of one parameter depend on
+another parameter, there is no way to express that declaratively when specifying
+the parameters. In that case the dependent attribute should be left unspecified
+on creation and should instead be set inside ``validate``.
+
+Logging
+-------
+
+Every extension class has it's own logger that you can access through
+``self.logger`` inside the extension's methods. Generally, a :class:`Device` will log
+everything it is doing, so you shouldn't need to add much additional logging in
+your expansion's. But you might what to log additional information, e.g.
+what settings your extension is using, what it is doing on the host, etc.
+Operations on the host will not normally be logged, so your extension should
+definitely log what it is doing on the host. One situation in particular where
+you should add logging is before doing something that might take a significant amount
+of time, such as downloading a file.
+
+
+Documenting
+-----------
+
+All extensions and their parameter should be documented. For extensions
+themselves, this is done through ``description`` class attribute. The convention
+for an extension description is that the first paragraph should be a short
+summary description of what the extension does and why one would want to use it
+(among other things, this will get extracted and used by ``wa list`` command).
+Subsequent paragraphs (separated by blank lines) can then provide a more
+detailed description, including any limitations and setup instructions.
+
+For parameters, the description is passed as an argument on creation. Please
+note that if ``default``, ``allowed_values``, or ``constraint``, are set in the
+parameter, they do not need to be explicitly mentioned in the description (wa
+documentation utilities will automatically pull those). If the ``default`` is set
+in ``validate`` or additional cross-parameter constraints exist, this *should*
+be documented in the parameter description.
+
+Both extensions and their parameters should be documented using reStructureText
+markup (standard markup for Python documentation). See:
+
+http://docutils.sourceforge.net/rst.html
+
+Aside from that, it is up to you how you document your extension. You should try
+to provide enough information so that someone unfamiliar with your extension is
+able to use it, e.g. you should document all settings and parameters your
+extension expects (including what the valid value are).
+
+
+Error Notification
+------------------
+
+When you detect an error condition, you should raise an appropriate exception to
+notify the user. The exception would typically be :class:`ConfigError` or
+(depending the type of the extension)
+:class:`WorkloadError`/:class:`DeviceError`/:class:`InstrumentError`/:class:`ResultProcessorError`.
+All these errors are defined in :mod:`wlauto.exception` module.
+
+:class:`ConfigError` should be raised where there is a problem in configuration
+specified by the user (either through the agenda or config files). These errors
+are meant to be resolvable by simple adjustments to the configuration (and the
+error message should suggest what adjustments need to be made. For all other
+errors, such as missing dependencies, mis-configured environment, problems
+performing operations, etc., the extension type-specific exceptions should be
+used.
+
+If the extension itself is capable of recovering from the error and carrying
+on, it may make more sense to log an ERROR or WARNING level message using the
+extension's logger and to continue operation.
+
+
+Utils
+-----
+
+Workload Automation defines a number of utilities collected under
+:mod:`wlauto.utils` subpackage. These utilities were created to help with the
+implementation of the framework itself, but may be also be useful when
+implementing extensions.
+
+
+Adding a Workload
+=================
+
+.. note:: You can use ``wa create workload [name]`` script to generate a new workload
+ structure for you. This script can also create the boilerplate for
+ UI automation, if your workload needs it. See ``wa create -h`` for more
+ details.
+
+New workloads can be added by subclassing :class:`wlauto.core.workload.Workload`
+
+
+The Workload class defines the following interface::
+
+ class Workload(Extension):
+
+ name = None
+
+ def init_resources(self, context):
+ pass
+
+ def setup(self, context):
+ raise NotImplementedError()
+
+ def run(self, context):
+ raise NotImplementedError()
+
+ def update_result(self, context):
+ raise NotImplementedError()
+
+ def teardown(self, context):
+ raise NotImplementedError()
+
+ def validate(self):
+ pass
+
+.. note:: Please see :doc:`conventions` section for notes on how to interpret
+ this.
+
+The interface should be implemented as follows
+
+ :name: This identifies the workload (e.g. it used to specify it in the
+ agenda_.
+ :init_resources: This method may be optionally override to implement dynamic
+ resource discovery for the workload.
+ **Added in version 2.1.3**
+ :setup: Everything that needs to be in place for workload execution should
+ be done in this method. This includes copying files to the device,
+ starting up an application, configuring communications channels,
+ etc.
+ :run: This method should perform the actual task that is being measured.
+ When this method exits, the task is assumed to be complete.
+
+ .. note:: Instrumentation is kicked off just before calling this
+ method and is disabled right after, so everything in this
+ method is being measured. Therefore this method should
+ contain the least code possible to perform the operations
+ you are interested in measuring. Specifically, things like
+ installing or starting applications, processing results, or
+ copying files to/from the device should be done elsewhere if
+ possible.
+
+ :update_result: This method gets invoked after the task execution has
+ finished and should be used to extract metrics and add them
+ to the result (see below).
+ :teardown: This could be used to perform any cleanup you may wish to do,
+ e.g. Uninstalling applications, deleting file on the device, etc.
+
+ :validate: This method can be used to validate any assumptions your workload
+ makes about the environment (e.g. that required files are
+ present, environment variables are set, etc) and should raise
+ a :class:`wlauto.exceptions.WorkloadError` if that is not the
+ case. The base class implementation only makes sure sure that
+ the name attribute has been set.
+
+.. _agenda: agenda.html
+
+Workload methods (except for ``validate``) take a single argument that is a
+:class:`wlauto.core.execution.ExecutionContext` instance. This object keeps
+track of the current execution state (such as the current workload, iteration
+number, etc), and contains, among other things, a
+:class:`wlauto.core.workload.WorkloadResult` instance that should be populated
+from the ``update_result`` method with the results of the execution. ::
+
+ # ...
+
+ def update_result(self, context):
+ # ...
+ context.result.add_metric('energy', 23.6, 'Joules', lower_is_better=True)
+
+ # ...
+
+Example
+-------
+
+This example shows a simple workload that times how long it takes to compress a
+file of a particular size on the device.
+
+.. note:: This is intended as an example of how to implement the Workload
+ interface. The methodology used to perform the actual measurement is
+ not necessarily sound, and this Workload should not be used to collect
+ real measurements.
+
+.. code-block:: python
+
+ import os
+ from wlauto import Workload, Parameter
+
+ class ZiptestWorkload(Workload):
+
+ name = 'ziptest'
+ description = '''
+ Times how long it takes to gzip a file of a particular size on a device.
+
+ This workload was created for illustration purposes only. It should not be
+ used to collect actual measurements.
+
+ '''
+
+ parameters = [
+ Parameter('file_size', kind=int, default=2000000,
+ description='Size of the file (in bytes) to be gzipped.')
+ ]
+
+ def setup(self, context):
+ # Generate a file of the specified size containing random garbage.
+ host_infile = os.path.join(context.output_directory, 'infile')
+ command = 'openssl rand -base64 {} > {}'.format(self.file_size, host_infile)
+ os.system(command)
+ # Set up on-device paths
+ devpath = self.device.path # os.path equivalent for the device
+ self.device_infile = devpath.join(self.device.working_directory, 'infile')
+ self.device_outfile = devpath.join(self.device.working_directory, 'outfile')
+ # Push the file to the device
+ self.device.push_file(host_infile, self.device_infile)
+
+ def run(self, context):
+ self.device.execute('cd {} && (time gzip {}) &>> {}'.format(self.device.working_directory,
+ self.device_infile,
+ self.device_outfile))
+
+ def update_result(self, context):
+ # Pull the results file to the host
+ host_outfile = os.path.join(context.output_directory, 'outfile')
+ self.device.pull_file(self.device_outfile, host_outfile)
+ # Extract metrics form the file's contents and update the result
+ # with them.
+ content = iter(open(host_outfile).read().strip().split())
+ for value, metric in zip(content, content):
+ mins, secs = map(float, value[:-1].split('m'))
+ context.result.add_metric(metric, secs + 60 * mins)
+
+ def teardown(self, context):
+ # Clean up on-device file.
+ self.device.delete_file(self.device_infile)
+ self.device.delete_file(self.device_outfile)
+
+
+
+.. _GameWorkload:
+
+Adding revent-dependent Workload:
+---------------------------------
+
+:class:`wlauto.common.game.GameWorkload` is the base class for all the workloads
+that depend on :ref:`revent_files_creation` files. It implements all the methods
+needed to push the files to the device and run them. New GameWorkload can be
+added by subclassing :class:`wlauto.common.game.GameWorkload`:
+
+The GameWorkload class defines the following interface::
+
+ class GameWorkload(Workload):
+
+ name = None
+ package = None
+ activity = None
+
+The interface should be implemented as follows
+
+ :name: This identifies the workload (e.g. it used to specify it in the
+ agenda_.
+ :package: This is the name of the '.apk' package without its file extension.
+ :activity: The name of the main activity that runs the package.
+
+Example:
+--------
+
+This example shows a simple GameWorkload that plays a game.
+
+.. code-block:: python
+
+ from wlauto.common.game import GameWorkload
+
+ class MyGame(GameWorkload):
+
+ name = 'mygame'
+ package = 'com.mylogo.mygame'
+ activity = 'myActivity.myGame'
+
+Convention for Naming revent Files for :class:`wlauto.common.game.GameWorkload`
+-------------------------------------------------------------------------------
+
+There is a convention for naming revent files which you should follow if you
+want to record your own revent files. Each revent file must start with the
+device name(case sensitive) then followed by a dot '.' then the stage name
+then '.revent'. All your custom revent files should reside at
+'~/.workload_automation/dependencies/WORKLOAD NAME/'. These are the current
+supported stages:
+
+ :setup: This stage is where the game is loaded. It is a good place to
+ record revent here to modify the game settings and get it ready
+ to start.
+ :run: This stage is where the game actually starts. This will allow for
+ more accurate results if the revent file for this stage only
+ records the game being played.
+
+For instance, to add a custom revent files for a device named mydevice and
+a workload name mygame, you create a new directory called mygame in
+'~/.workload_automation/dependencies/'. Then you add the revent files for
+the stages you want in ~/.workload_automation/dependencies/mygame/::
+
+ mydevice.setup.revent
+ mydevice.run.revent
+
+Any revent file in the dependencies will always overwrite the revent file in the
+workload directory. So it is possible for example to just provide one revent for
+setup in the dependencies and use the run.revent that is in the workload directory.
+
+Adding an Instrument
+====================
+
+Instruments can be used to collect additional measurements during workload
+execution (e.g. collect power readings). An instrument can hook into almost any
+stage of workload execution. A typical instrument would implement a subset of
+the following interface::
+
+ class Instrument(Extension):
+
+ name = None
+ description = None
+
+ parameters = [
+ ]
+
+ def initialize(self, context):
+ pass
+
+ def setup(self, context):
+ pass
+
+ def start(self, context):
+ pass
+
+ def stop(self, context):
+ pass
+
+ def update_result(self, context):
+ pass
+
+ def teardown(self, context):
+ pass
+
+ def finalize(self, context):
+ pass
+
+This is similar to a Workload, except all methods are optional. In addition to
+the workload-like methods, instruments can define a number of other methods that
+will get invoked at various points during run execution. The most useful of
+which is perhaps ``initialize`` that gets invoked after the device has been
+initialised for the first time, and can be used to perform one-time setup (e.g.
+copying files to the device -- there is no point in doing that for each
+iteration). The full list of available methods can be found in
+:ref:`Signals Documentation <instrument_name_mapping>`.
+
+
+Prioritization
+--------------
+
+Callbacks (e.g. ``setup()`` methods) for all instrumentation get executed at the
+same point during workload execution, one after another. The order in which the
+callbacks get invoked should be considered arbitrary and should not be relied
+on (e.g. you cannot expect that just because instrument A is listed before
+instrument B in the config, instrument A's callbacks will run first).
+
+In some cases (e.g. in ``start()`` and ``stop()`` methods), it is important to
+ensure that a particular instrument's callbacks run a closely as possible to the
+workload's invocations in order to maintain accuracy of readings; or,
+conversely, that a callback is executed after the others, because it takes a
+long time and may throw off the accuracy of other instrumentation. You can do
+this by prepending ``fast_`` or ``slow_`` to your callbacks' names. For
+example::
+
+ class PreciseInstrument(Instument):
+
+ # ...
+
+ def fast_start(self, context):
+ pass
+
+ def fast_stop(self, context):
+ pass
+
+ # ...
+
+``PreciseInstrument`` will be started after all other instrumentation (i.e.
+*just* before the workload runs), and it will stopped before all other
+instrumentation (i.e. *just* after the workload runs). It is also possible to
+use ``very_fast_`` and ``very_slow_`` prefixes when you want to be really
+sure that your callback will be the last/first to run.
+
+If more than one active instrument have specified fast (or slow) callbacks, then
+their execution order with respect to each other is not guaranteed. In general,
+having a lot of instrumentation enabled is going to necessarily affect the
+readings. The best way to ensure accuracy of measurements is to minimize the
+number of active instruments (perhaps doing several identical runs with
+different instruments enabled).
+
+Example
+-------
+
+Below is a simple instrument that measures the execution time of a workload::
+
+ class ExecutionTimeInstrument(Instrument):
+ """
+ Measure how long it took to execute the run() methods of a Workload.
+
+ """
+
+ name = 'execution_time'
+
+ def initialize(self, context):
+ self.start_time = None
+ self.end_time = None
+
+ def fast_start(self, context):
+ self.start_time = time.time()
+
+ def fast_stop(self, context):
+ self.end_time = time.time()
+
+ def update_result(self, context):
+ execution_time = self.end_time - self.start_time
+ context.result.add_metric('execution_time', execution_time, 'seconds')
+
+
+Adding a Result Processor
+=========================
+
+A result processor is responsible for processing the results. This may
+involve formatting and writing them to a file, uploading them to a database,
+generating plots, etc. WA comes with a few result processors that output
+results in a few common formats (such as csv or JSON).
+
+You can add your own result processors by creating a Python file in
+``~/.workload_automation/result_processors`` with a class that derives from
+:class:`wlauto.core.result.ResultProcessor`, which has the following interface::
+
+ class ResultProcessor(Extension):
+
+ name = None
+ description = None
+
+ parameters = [
+ ]
+
+ def initialize(self, context):
+ pass
+
+ def process_iteration_result(self, result, context):
+ pass
+
+ def export_iteration_result(self, result, context):
+ pass
+
+ def process_run_result(self, result, context):
+ pass
+
+ def export_run_result(self, result, context):
+ pass
+
+ def finalize(self, context):
+ pass
+
+
+The method names should be fairly self-explanatory. The difference between
+"process" and "export" methods is that export methods will be invoke after
+process methods for all result processors have been generated. Process methods
+may generated additional artifacts (metrics, files, etc), while export methods
+should not -- the should only handle existing results (upload them to a
+database, archive on a filer, etc).
+
+The result object passed to iteration methods is an instance of
+:class:`wlauto.core.result.IterationResult`, the result object passed to run
+methods is an instance of :class:`wlauto.core.result.RunResult`. Please refer to
+their API documentation for details.
+
+Example
+-------
+
+Here is an example result processor that formats the results as a column-aligned
+table::
+
+ import os
+ from wlauto import ResultProcessor
+ from wlauto.utils.misc import write_table
+
+
+ class Table(ResultProcessor):
+
+ name = 'table'
+ description = 'Gerates a text file containing a column-aligned table with run results.'
+
+ def process_run_result(self, result, context):
+ rows = []
+ for iteration_result in result.iteration_results:
+ for metric in iteration_result.metrics:
+ rows.append([metric.name, str(metric.value), metric.units or '',
+ metric.lower_is_better and '-' or '+'])
+
+ outfile = os.path.join(context.output_directory, 'table.txt')
+ with open(outfile, 'w') as wfh:
+ write_table(rows, wfh)
+
+
+Adding a Resource Getter
+========================
+
+A resource getter is a new extension type added in version 2.1.3. A resource
+getter implement a method of acquiring resources of a particular type (such as
+APK files or additional workload assets). Resource getters are invoked in
+priority order until one returns the desired resource.
+
+If you want WA to look for resources somewhere it doesn't by default (e.g. you
+have a repository of APK files), you can implement a getter for the resource and
+register it with a higher priority than the standard WA getters, so that it gets
+invoked first.
+
+Instances of a resource getter should implement the following interface::
+
+ class ResourceGetter(Extension):
+
+ name = None
+ resource_type = None
+ priority = GetterPriority.environment
+
+ def get(self, resource, **kwargs):
+ raise NotImplementedError()
+
+The getter should define a name (as with all extensions), a resource
+type, which should be a string, e.g. ``'jar'``, and a priority (see `Getter
+Prioritization`_ below). In addition, ``get`` method should be implemented. The
+first argument is an instance of :class:`wlauto.core.resource.Resource`
+representing the resource that should be obtained. Additional keyword
+arguments may be used by the invoker to provide additional information about
+the resource. This method should return an instance of the resource that
+has been discovered (what "instance" means depends on the resource, e.g. it
+could be a file path), or ``None`` if this getter was unable to discover
+that resource.
+
+Getter Prioritization
+---------------------
+
+A priority is an integer with higher numeric values indicating a higher
+priority. The following standard priority aliases are defined for getters:
+
+
+ :cached: The cached version of the resource. Look here first. This priority also implies
+ that the resource at this location is a "cache" and is not the only version of the
+ resource, so it may be cleared without losing access to the resource.
+ :preferred: Take this resource in favour of the environment resource.
+ :environment: Found somewhere under ~/.workload_automation/ or equivalent, or
+ from environment variables, external configuration files, etc.
+ These will override resource supplied with the package.
+ :package: Resource provided with the package.
+ :remote: Resource will be downloaded from a remote location (such as an HTTP server
+ or a samba share). Try this only if no other getter was successful.
+
+These priorities are defined as class members of
+:class:`wlauto.core.resource.GetterPriority`, e.g. ``GetterPriority.cached``.
+
+Most getters in WA will be registered with either ``environment`` or
+``package`` priorities. So if you want your getter to override the default, it
+should typically be registered as ``preferred``.
+
+You don't have to stick to standard priority levels (though you should, unless
+there is a good reason). Any integer is a valid priority. The standard priorities
+range from -20 to 20 in increments of 10.
+
+Example
+-------
+
+The following is an implementation of a getter for a workload APK file that
+looks for the file under
+``~/.workload_automation/dependencies/<workload_name>``::
+
+ import os
+ import glob
+
+ from wlauto import ResourceGetter, GetterPriority, settings
+ from wlauto.exceptions import ResourceError
+
+
+ class EnvironmentApkGetter(ResourceGetter):
+
+ name = 'environment_apk'
+ resource_type = 'apk'
+ priority = GetterPriority.environment
+
+ def get(self, resource):
+ resource_dir = _d(os.path.join(settings.dependency_directory, resource.owner.name))
+ version = kwargs.get('version')
+ found_files = glob.glob(os.path.join(resource_dir, '*.apk'))
+ if version:
+ found_files = [ff for ff in found_files if version.lower() in ff.lower()]
+ if len(found_files) == 1:
+ return found_files[0]
+ elif not found_files:
+ return None
+ else:
+ raise ResourceError('More than one .apk found in {} for {}.'.format(resource_dir,
+ resource.owner.name))
+
+.. _adding_a_device:
+
+Adding a Device
+===============
+
+At the moment, only Android devices are supported. Most of the functionality for
+interacting with a device is implemented in
+:class:`wlauto.common.AndroidDevice` and is exposed through ``generic_android``
+device interface, which should suffice for most purposes. The most common area
+where custom functionality may need to be implemented is during device
+initialization. Usually, once the device gets to the Android home screen, it's
+just like any other Android device (modulo things like differences between
+Android versions).
+
+If your device doesn't not work with ``generic_device`` interface and you need
+to write a custom interface to handle it, you would do that by subclassing
+``AndroidDevice`` and then just overriding the methods you need. Typically you
+will want to override one or more of the following:
+
+reset
+ Trigger a device reboot. The default implementation just sends ``adb
+ reboot`` to the device. If this command does not work, an alternative
+ implementation may need to be provided.
+
+hard_reset
+ This is a harsher reset that involves cutting the power to a device
+ (e.g. holding down power button or removing battery from a phone). The
+ default implementation is a no-op that just sets some internal flags. If
+ you're dealing with unreliable prototype hardware that can crash and
+ become unresponsive, you may want to implement this in order for WA to
+ be able to recover automatically.
+
+connect
+ When this method returns, adb connection to the device has been
+ established. This gets invoked after a reset. The default implementation
+ just waits for the device to appear in the adb list of connected
+ devices. If this is not enough (e.g. your device is connected via
+ Ethernet and requires an explicit ``adb connect`` call), you may wish to
+ override this to perform the necessary actions before invoking the
+ ``AndroidDevice``\ s version.
+
+init
+ This gets called once at the beginning of the run once the connection to
+ the device has been established. There is no default implementation.
+ It's there to allow whatever custom initialisation may need to be
+ performed for the device (setting properties, configuring services,
+ etc).
+
+Please refer to the API documentation for :class:`wlauto.common.AndroidDevice`
+for the full list of its methods and their functionality.
+
+
+Other Extension Types
+=====================
+
+In addition to extension types covered above, there are few other, more
+specialized ones. They will not be covered in as much detail. Most of them
+expose relatively simple interfaces with only a couple of methods and it is
+expected that if the need arises to extend them, the API-level documentation
+that accompanies them, in addition to what has been outlined here, should
+provide enough guidance.
+
+:commands: This allows extending WA with additional sub-commands (to supplement
+ exiting ones outlined in the :ref:`invocation` section).
+:modules: Modules are "extensions for extensions". They can be loaded by other
+ extensions to expand their functionality (for example, a flashing
+ module maybe loaded by a device in order to support flashing).
+
+
+Packaging Your Extensions
+=========================
+
+If your have written a bunch of extensions, and you want to make it easy to
+deploy them to new systems and/or to update them on existing systems, you can
+wrap them in a Python package. You can use ``wa create package`` command to
+generate appropriate boiler plate. This will create a ``setup.py`` and a
+directory for your package that you can place your extensions into.
+
+For example, if you have a workload inside ``my_workload.py`` and a result
+processor in ``my_result_processor.py``, and you want to package them as
+``my_wa_exts`` package, first run the create command ::
+
+ wa create package my_wa_exts
+
+This will create a ``my_wa_exts`` directory which contains a
+``my_wa_exts/setup.py`` and a subdirectory ``my_wa_exts/my_wa_exts`` which is
+the package directory for your extensions (you can rename the top-level
+``my_wa_exts`` directory to anything you like -- it's just a "container" for the
+setup.py and the package directory). Once you have that, you can then copy your
+extensions into the package directory, creating
+``my_wa_exts/my_wa_exts/my_workload.py`` and
+``my_wa_exts/my_wa_exts/my_result_processor.py``. If you have a lot of
+extensions, you might want to organize them into subpackages, but only the
+top-level package directory is created by default, and it is OK to have
+everything in there.
+
+.. note:: When discovering extensions thorugh this mechanism, WA traveries the
+ Python module/submodule tree, not the directory strucuter, therefore,
+ if you are going to create subdirectories under the top level dictory
+ created for you, it is important that your make sure they are valid
+ Python packages; i.e. each subdirectory must contain a __init__.py
+ (even if blank) in order for the code in that directory and its
+ subdirectories to be discoverable.
+
+At this stage, you may want to edit ``params`` structure near the bottom of
+the ``setup.py`` to add correct author, license and contact information (see
+"Writing the Setup Script" section in standard Python documentation for
+details). You may also want to add a README and/or a COPYING file at the same
+level as the setup.py. Once you have the contents of your package sorted,
+you can generate the package by running ::
+
+ cd my_wa_exts
+ python setup.py sdist
+
+This will generate ``my_wa_exts/dist/my_wa_exts-0.0.1.tar.gz`` package which
+can then be deployed on the target system with standard Python package
+management tools, e.g. ::
+
+ sudo pip install my_wa_exts-0.0.1.tar.gz
+
+As part of the installation process, the setup.py in the package, will write the
+package's name into ``~/.workoad_automoation/packages``. This will tell WA that
+the package contains extension and it will load them next time it runs.
+
+.. note:: There are no unistall hooks in ``setuputils``, so if you ever
+ uninstall your WA extensions package, you will have to manually remove
+ it from ``~/.workload_automation/packages`` otherwise WA will complain
+ abou a missing package next time you try to run it.
diff --git a/extras/README b/extras/README
new file mode 100644
index 00000000..9dbb3499
--- /dev/null
+++ b/extras/README
@@ -0,0 +1,12 @@
+This directory is intended for miscellaneous extra stuff that may be useful while developing
+Workload Automation. It should *NOT* contain anything necessary for *using* workload automation.
+Whenever you add something to this directory, please also add a short description of what it is in
+this file.
+
+pylintrc
+ pylint configuration file set up for WA development (see comment at the top of the file
+ for how to use).
+
+walog.vim
+ Vim syntax file for WA logs; adds highlighting similar to what comes out
+ in the console. See comment in the file for how to enable it.
diff --git a/extras/pylintrc b/extras/pylintrc
new file mode 100644
index 00000000..99e2b8f1
--- /dev/null
+++ b/extras/pylintrc
@@ -0,0 +1,70 @@
+#
+# pylint configuration for Workload Automation.
+#
+# To install pylint run
+#
+# sudo apt-get install pylint
+#
+# copy this file to ~/.pylintrc in order for pylint to pick it up.
+# (Or alternatively, specify it with --rcfile option on invocation.)
+#
+# Note: If you're adding something to disable setting, please also add the
+# explanation of the code in the comment above it. Messages should only
+# be added here we really don't *ever* care about them. For ignoring
+# messages on specific lines or in specific files, add the appropriate
+# pylint disable clause in the source.
+#
+[MASTER]
+
+profile=no
+
+ignore=external
+
+[MESSAGES CONTROL]
+# Disable the following messags:
+# C0301: Line too long (%s/%s)
+# C0103: Invalid name "%s" (should match %s)
+# C0111: Missing docstring
+# W0142 - Used * or ** magic
+# R0903: Too few public methods
+# R0904: Too many public methods
+# R0922: Abstract class is only referenced 1 times
+# W0511: TODO Note: this is disabled for a cleaner output, but should be reenabled
+# occasionally (through command line argument) to make sure all
+# TODO's are addressed, e.g. before a release.
+# W0141: Used builtin function (map|filter)
+# I0011: Locally disabling %s
+# R0921: %s: Abstract class not referenced
+# Note: this needs to be in the rc file due to a known bug in pylint:
+# http://www.logilab.org/ticket/111138
+# W1401: nomalous-backslash-in-string, due to:
+# https://bitbucket.org/logilab/pylint/issue/272/anomalous-backslash-in-string-for-raw
+# C0330: bad continuation, due to:
+# https://bitbucket.org/logilab/pylint/issue/232/wrong-hanging-indentation-false-positive
+disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330
+
+[FORMAT]
+max-module-lines=4000
+
+[DESIGN]
+
+# We have DeviceConfig classes that are basically just repositories of confuration
+# settings.
+max-args=30
+max-attributes=30
+
+
+[SIMILARITIES]
+
+min-similarity-lines=10
+
+[REPORTS]
+
+output-format=colorized
+
+reports=no
+
+[IMPORTS]
+
+# Parts of string are not deprecated. Throws too many false positives.
+deprecated-modules=
diff --git a/extras/walog.vim b/extras/walog.vim
new file mode 100644
index 00000000..a9b79617
--- /dev/null
+++ b/extras/walog.vim
@@ -0,0 +1,21 @@
+" Copy this into ~/.vim/syntax/ and add the following to your ~/.vimrc:
+" au BufRead,BufNewFile run.log set filetype=walog
+"
+if exists("b:current_syntax")
+ finish
+endif
+
+syn region debugPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d DEBUG' end=':'
+syn region infoPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d INFO' end=':'
+syn region warningPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d WARNING' end=':'
+syn region errorPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d ERROR' end=':'
+syn region critPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d CRITICAL' end=':'
+
+hi debugPreamble guifg=Blue ctermfg=DarkBlue
+hi infoPreamble guifg=Green ctermfg=DarkGreen
+hi warningPreamble guifg=Yellow ctermfg=178
+hi errorPreamble guifg=Red ctermfg=DarkRed
+hi critPreamble guifg=Red ctermfg=DarkRed cterm=bold gui=bold
+
+let b:current_syntax='walog'
+
diff --git a/scripts/create_workload b/scripts/create_workload
new file mode 100644
index 00000000..c32f9ce2
--- /dev/null
+++ b/scripts/create_workload
@@ -0,0 +1,17 @@
+#!/bin/bash
+# $Copyright:
+# ----------------------------------------------------------------
+# This confidential and proprietary software may be used only as
+# authorised by a licensing agreement from ARM Limited
+# (C) COPYRIGHT 2013 ARM Limited
+# ALL RIGHTS RESERVED
+# The entire notice above must be reproduced on all authorised
+# copies and copies may only be made to the extent permitted
+# by a licensing agreement from ARM Limited.
+# ----------------------------------------------------------------
+# File: create_workload
+# ----------------------------------------------------------------
+# $
+#
+wa create workload $@
+
diff --git a/scripts/list_extensions b/scripts/list_extensions
new file mode 100644
index 00000000..08b65aad
--- /dev/null
+++ b/scripts/list_extensions
@@ -0,0 +1,16 @@
+#!/bin/bash
+# $Copyright:
+# ----------------------------------------------------------------
+# This confidential and proprietary software may be used only as
+# authorised by a licensing agreement from ARM Limited
+# (C) COPYRIGHT 2013 ARM Limited
+# ALL RIGHTS RESERVED
+# The entire notice above must be reproduced on all authorised
+# copies and copies may only be made to the extent permitted
+# by a licensing agreement from ARM Limited.
+# ----------------------------------------------------------------
+# File: list_extensions
+# ----------------------------------------------------------------
+# $
+#
+wa list $@
diff --git a/scripts/run_workloads b/scripts/run_workloads
new file mode 100644
index 00000000..616f076c
--- /dev/null
+++ b/scripts/run_workloads
@@ -0,0 +1,17 @@
+#!/bin/bash
+# $Copyright:
+# ----------------------------------------------------------------
+# This confidential and proprietary software may be used only as
+# authorised by a licensing agreement from ARM Limited
+# (C) COPYRIGHT 2013 ARM Limited
+# ALL RIGHTS RESERVED
+# The entire notice above must be reproduced on all authorised
+# copies and copies may only be made to the extent permitted
+# by a licensing agreement from ARM Limited.
+# ----------------------------------------------------------------
+# File: run_workloads
+# ----------------------------------------------------------------
+# $
+#
+wa run $@
+
diff --git a/scripts/wa b/scripts/wa
new file mode 100644
index 00000000..a7942e5a
--- /dev/null
+++ b/scripts/wa
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+# $Copyright:
+# ----------------------------------------------------------------
+# This confidential and proprietary software may be used only as
+# authorised by a licensing agreement from ARM Limited
+# (C) COPYRIGHT 2013 ARM Limited
+# ALL RIGHTS RESERVED
+# The entire notice above must be reproduced on all authorised
+# copies and copies may only be made to the extent permitted
+# by a licensing agreement from ARM Limited.
+# ----------------------------------------------------------------
+# File: run_workloads
+# ----------------------------------------------------------------
+# $
+#
+from wlauto.core.entry_point import main
+main()
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..4eb13f98
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,96 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import sys
+import warnings
+from itertools import chain
+
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+sys.path.insert(0, './wlauto/core/')
+from version import get_wa_version
+
+# happends if falling back to distutils
+warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
+warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'")
+
+try:
+ os.remove('MANIFEST')
+except OSError:
+ pass
+
+packages = []
+data_files = {}
+source_dir = os.path.dirname(__file__)
+for root, dirs, files in os.walk('wlauto'):
+ rel_dir = os.path.relpath(root, source_dir)
+ data = []
+ if '__init__.py' in files:
+ for f in files:
+ if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:
+ data.append(f)
+ package_name = rel_dir.replace(os.sep, '.')
+ package_dir = root
+ packages.append(package_name)
+ data_files[package_name] = data
+ else:
+ # use previous package name
+ filepaths = [os.path.join(root, f) for f in files]
+ data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
+
+scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]
+
+params = dict(
+ name='wlauto',
+ description='A framework for automating workload execution and measurment collection on ARM devices.',
+ version=get_wa_version(),
+ packages=packages,
+ package_data=data_files,
+ scripts=scripts,
+ url='N/A',
+ license='Apache v2',
+ maintainer='ARM Architecture & Technology Device Lab',
+ maintainer_email='workload-automation@arm.com',
+ install_requires=[
+ 'python-dateutil', # converting between UTC and local time.
+ 'pexpect>=3.3', # Send/recieve to/from device
+ 'pyserial', # Serial port interface
+ 'colorama', # Printing with colors
+ 'pyYAML', # YAML-formatted agenda parsing
+ ],
+ extras_require={
+ 'other': ['jinja2', 'pandas>=0.13.1'],
+ 'test': ['nose'],
+ 'mongodb': ['pymongo'],
+ 'doc': ['sphinx'],
+ },
+ # https://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Environment :: Console',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: POSIX :: Linux',
+ 'Programming Language :: Python :: 2.7',
+ ],
+)
+
+all_extras = list(chain(params['extras_require'].itervalues()))
+params['extras_require']['everything'] = all_extras
+
+setup(**params)
diff --git a/wlauto/__init__.py b/wlauto/__init__.py
new file mode 100644
index 00000000..0e31686c
--- /dev/null
+++ b/wlauto/__init__.py
@@ -0,0 +1,36 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from wlauto.core.bootstrap import settings # NOQA
+from wlauto.core.device import Device, RuntimeParameter, CoreParameter # NOQA
+from wlauto.core.command import Command # NOQA
+from wlauto.core.workload import Workload # NOQA
+from wlauto.core.extension import Module, Parameter, Artifact, Alias # NOQA
+from wlauto.core.extension_loader import ExtensionLoader # NOQA
+from wlauto.core.instrumentation import Instrument # NOQA
+from wlauto.core.result import ResultProcessor, IterationResult # NOQA
+from wlauto.core.resource import ResourceGetter, Resource, GetterPriority, NO_ONE # NOQA
+from wlauto.core.exttype import get_extension_type # NOQA Note: MUST be imported after other core imports.
+
+from wlauto.common.resources import File, ExtensionAsset, Executable
+from wlauto.common.linux.device import LinuxDevice # NOQA
+from wlauto.common.android.device import AndroidDevice, BigLittleDevice # NOQA
+from wlauto.common.android.resources import ApkFile, JarFile
+from wlauto.common.android.workload import (UiAutomatorWorkload, ApkWorkload, AndroidBenchmark, # NOQA
+ AndroidUiAutoBenchmark, GameWorkload) # NOQA
+
+from wlauto.core.version import get_wa_version
+
+__version__ = get_wa_version()
diff --git a/wlauto/agenda-example-biglittle.yaml b/wlauto/agenda-example-biglittle.yaml
new file mode 100644
index 00000000..eea89213
--- /dev/null
+++ b/wlauto/agenda-example-biglittle.yaml
@@ -0,0 +1,79 @@
+# This agenda specifies configuration that may be used for regression runs
+# on big.LITTLE systems. This agenda will with a TC2 device configured as
+# described in the documentation.
+config:
+ device: tc2
+ run_name: big.LITTLE_regression
+global:
+ iterations: 5
+sections:
+ - id: mp_a15only
+ boot_parameters:
+ os_mode: mp_a15_only
+ runtime_parameters:
+ a15_governor: interactive
+ a15_governor_tunables:
+ above_hispeed_delay: 20000
+ - id: mp_a7bc
+ boot_parameters:
+ os_mode: mp_a7_bootcluster
+ runtime_parameters:
+ a7_governor: interactive
+ a7_min_frequency: 500000
+ a7_governor_tunables:
+ above_hispeed_delay: 20000
+ a15_governor: interactive
+ a15_governor_tunables:
+ above_hispeed_delay: 20000
+ - id: mp_a15bc
+ boot_parameters:
+ os_mode: mp_a15_bootcluster
+ runtime_parameters:
+ a7_governor: interactive
+ a7_min_frequency: 500000
+ a7_governor_tunables:
+ above_hispeed_delay: 20000
+ a15_governor: interactive
+ a15_governor_tunables:
+ above_hispeed_delay: 20000
+workloads:
+ - id: b01
+ name: andebench
+ workload_parameters:
+ number_of_threads: 5
+ - id: b02
+ name: andebench
+ label: andebenchst
+ workload_parameters:
+ number_of_threads: 1
+ - id: b03
+ name: antutu
+ label: antutu4.0.3
+ workload_parameters:
+ version: 4.0.3
+ - id: b04
+ name: benchmarkpi
+ - id: b05
+ name: caffeinemark
+ - id: b06
+ name: cfbench
+ - id: b07
+ name: geekbench
+ label: geekbench3
+ workload_parameters:
+ version: 3
+ - id: b08
+ name: linpack
+ - id: b09
+ name: quadrant
+ - id: b10
+ name: smartbench
+ - id: b11
+ name: sqlite
+ - id: b12
+ name: vellamo
+
+ - id: w01
+ name: bbench_with_audio
+ - id: w02
+ name: audio
diff --git a/wlauto/agenda-example-tutorial.yaml b/wlauto/agenda-example-tutorial.yaml
new file mode 100644
index 00000000..6eb2b9a1
--- /dev/null
+++ b/wlauto/agenda-example-tutorial.yaml
@@ -0,0 +1,43 @@
+# This an agenda that is built-up during the explantion of the agenda features
+# in the documentation. This should work out-of-the box on most rooted Android
+# devices.
+config:
+ project: governor_comparison
+ run_name: performance_vs_interactive
+
+ device: generic_android
+ reboot_policy: never
+
+ instrumentation: [coreutil, cpufreq]
+ coreutil:
+ threshold: 80
+ sysfs_extractor:
+ paths: [/proc/meminfo]
+ result_processors: [sqlite]
+ sqlite:
+ database: ~/my_wa_results.sqlite
+global:
+ iterations: 5
+sections:
+ - id: perf
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
+ - id: inter
+ runtime_params:
+ sysfile_values:
+ /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
+workloads:
+ - id: 01_dhry
+ name: dhrystone
+ label: dhrystone_15over6
+ workload_params:
+ threads: 6
+ mloops: 15
+ - id: 02_memc
+ name: memcpy
+ instrumentation: [sysfs_extractor]
+ - id: 03_cycl
+ name: cyclictest
+ iterations: 10
+
diff --git a/wlauto/commands/__init__.py b/wlauto/commands/__init__.py
new file mode 100644
index 00000000..16224d6f
--- /dev/null
+++ b/wlauto/commands/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/commands/create.py b/wlauto/commands/create.py
new file mode 100644
index 00000000..6db925c6
--- /dev/null
+++ b/wlauto/commands/create.py
@@ -0,0 +1,300 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import stat
+import string
+import textwrap
+import argparse
+import shutil
+import getpass
+
+from wlauto import ExtensionLoader, Command, settings
+from wlauto.exceptions import CommandError
+from wlauto.utils.cli import init_argument_parser
+from wlauto.utils.misc import (capitalize, check_output,
+ ensure_file_directory_exists as _f, ensure_directory_exists as _d)
+from wlauto.utils.types import identifier
+from wlauto.utils.doc import format_body
+
+
+__all__ = ['create_workload']
+
+
+TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
+
+UIAUTO_BUILD_SCRIPT = """#!/bin/bash
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $$class_dir
+cp $$base_class $$class_dir
+
+ant build
+
+if [[ -f bin/${package_name}.jar ]]; then
+ cp bin/${package_name}.jar ..
+fi
+"""
+
+
+class CreateSubcommand(object):
+
+ name = None
+ help = None
+ usage = None
+ description = None
+ epilog = None
+ formatter_class = None
+
+ def __init__(self, logger, subparsers):
+ self.logger = logger
+ self.group = subparsers
+ parser_params = dict(help=(self.help or self.description), usage=self.usage,
+ description=format_body(textwrap.dedent(self.description), 80),
+ epilog=self.epilog)
+ if self.formatter_class:
+ parser_params['formatter_class'] = self.formatter_class
+ self.parser = subparsers.add_parser(self.name, **parser_params)
+ init_argument_parser(self.parser) # propagate top-level options
+ self.initialize()
+
+ def initialize(self):
+ pass
+
+
+class CreateWorkloadSubcommand(CreateSubcommand):
+
+ name = 'workload'
+ description = '''Create a new workload. By default, a basic workload template will be
+ used but you can use options to specify a different template.'''
+
+ def initialize(self):
+ self.parser.add_argument('name', metavar='NAME',
+ help='Name of the workload to be created')
+ self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
+ help='The location at which the workload will be created. If not specified, ' +
+ 'this defaults to "~/.workload_automation/workloads".')
+ self.parser.add_argument('-f', '--force', action='store_true',
+ help='Create the new workload even if a workload with the specified ' +
+ 'name already exists.')
+
+ template_group = self.parser.add_mutually_exclusive_group()
+ template_group.add_argument('-A', '--android-benchmark', action='store_true',
+ help='Use android benchmark template. This template allows you to specify ' +
+ ' an APK file that will be installed and run on the device. You should ' +
+ ' place the APK file into the workload\'s directory at the same level ' +
+ 'as the __init__.py.')
+ template_group.add_argument('-U', '--ui-automation', action='store_true',
+ help='Use UI automation template. This template generates a UI automation ' +
+ 'Android project as well as the Python class. This a more general ' +
+ 'version of the android benchmark template that makes no assumptions ' +
+ 'about the nature of your workload, apart from the fact that you need ' +
+ 'UI automation. If you need to install an APK, start an app on device, ' +
+ 'etc., you will need to do that explicitly in your code.')
+ template_group.add_argument('-B', '--android-uiauto-benchmark', action='store_true',
+ help='Use android uiauto benchmark template. This generates a UI automation ' +
+ 'project as well as a Python class. This template should be used ' +
+ 'if you have a APK file that needs to be run on the device. You ' +
+ 'should place the APK file into the workload\'s directory at the ' +
+ 'same level as the __init__.py.')
+
+ def execute(self, args): # pylint: disable=R0201
+ where = args.path or 'local'
+ check_name = not args.force
+
+ if args.android_benchmark:
+ kind = 'android'
+ elif args.ui_automation:
+ kind = 'uiauto'
+ elif args.android_uiauto_benchmark:
+ kind = 'android_uiauto'
+ else:
+ kind = 'basic'
+
+ try:
+ create_workload(args.name, kind, where, check_name)
+ except CommandError, e:
+ print "ERROR:", e
+
+
+class CreatePackageSubcommand(CreateSubcommand):
+
+ name = 'package'
+ description = '''Create a new empty Python package for WA extensions. On installation,
+ this package will "advertise" itself to WA so that Extensions with in it will
+ be loaded by WA when it runs.'''
+
+ def initialize(self):
+ self.parser.add_argument('name', metavar='NAME',
+ help='Name of the package to be created')
+ self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
+ help='The location at which the new pacakge will be created. If not specified, ' +
+ 'current working directory will be used.')
+ self.parser.add_argument('-f', '--force', action='store_true',
+ help='Create the new package even if a file or directory with the same name '
+ 'already exists at the specified location.')
+
+ def execute(self, args): # pylint: disable=R0201
+ package_dir = args.path or os.path.abspath('.')
+ template_path = os.path.join(TEMPLATES_DIR, 'setup.template')
+ self.create_extensions_package(package_dir, args.name, template_path, args.force)
+
+ def create_extensions_package(self, location, name, setup_template_path, overwrite=False):
+ package_path = os.path.join(location, name)
+ if os.path.exists(package_path):
+ if overwrite:
+ self.logger.info('overwriting existing "{}"'.format(package_path))
+ shutil.rmtree(package_path)
+ else:
+ raise CommandError('Location "{}" already exists.'.format(package_path))
+ actual_package_path = os.path.join(package_path, name)
+ os.makedirs(actual_package_path)
+ setup_text = render_template(setup_template_path, {'package_name': name, 'user': getpass.getuser()})
+ with open(os.path.join(package_path, 'setup.py'), 'w') as wfh:
+ wfh.write(setup_text)
+ touch(os.path.join(actual_package_path, '__init__.py'))
+
+
+class CreateCommand(Command):
+
+ name = 'create'
+ description = '''Used to create various WA-related objects (see positional arguments list for what
+ objects may be created).\n\nUse "wa create <object> -h" for object-specific arguments.'''
+ formatter_class = argparse.RawDescriptionHelpFormatter
+ subcmd_classes = [CreateWorkloadSubcommand, CreatePackageSubcommand]
+
+ def initialize(self):
+ subparsers = self.parser.add_subparsers(dest='what')
+ self.subcommands = [] # pylint: disable=W0201
+ for subcmd_cls in self.subcmd_classes:
+ subcmd = subcmd_cls(self.logger, subparsers)
+ self.subcommands.append(subcmd)
+
+ def execute(self, args):
+ for subcmd in self.subcommands:
+ if subcmd.name == args.what:
+ subcmd.execute(args)
+ break
+ else:
+ raise CommandError('Not a valid create parameter: {}'.format(args.name))
+
+
+def create_workload(name, kind='basic', where='local', check_name=True, **kwargs):
+ if check_name:
+ extloader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
+ if name in [wl.name for wl in extloader.list_workloads()]:
+ raise CommandError('Workload with name "{}" already exists.'.format(name))
+
+ class_name = get_class_name(name)
+ if where == 'local':
+ workload_dir = _d(os.path.join(settings.environment_root, 'workloads', name))
+ else:
+ workload_dir = _d(os.path.join(where, name))
+
+ if kind == 'basic':
+ create_basic_workload(workload_dir, name, class_name, **kwargs)
+ elif kind == 'uiauto':
+ create_uiautomator_workload(workload_dir, name, class_name, **kwargs)
+ elif kind == 'android':
+ create_android_benchmark(workload_dir, name, class_name, **kwargs)
+ elif kind == 'android_uiauto':
+ create_android_uiauto_benchmark(workload_dir, name, class_name, **kwargs)
+ else:
+ raise CommandError('Unknown workload type: {}'.format(kind))
+
+ print 'Workload created in {}'.format(workload_dir)
+
+
+def create_basic_workload(path, name, class_name):
+ source_file = os.path.join(path, '__init__.py')
+ with open(source_file, 'w') as wfh:
+ wfh.write(render_template('basic_workload', {'name': name, 'class_name': class_name}))
+
+
+def create_uiautomator_workload(path, name, class_name):
+ uiauto_path = _d(os.path.join(path, 'uiauto'))
+ create_uiauto_project(uiauto_path, name)
+ source_file = os.path.join(path, '__init__.py')
+ with open(source_file, 'w') as wfh:
+ wfh.write(render_template('uiauto_workload', {'name': name, 'class_name': class_name}))
+
+
+def create_android_benchmark(path, name, class_name):
+ source_file = os.path.join(path, '__init__.py')
+ with open(source_file, 'w') as wfh:
+ wfh.write(render_template('android_benchmark', {'name': name, 'class_name': class_name}))
+
+
+def create_android_uiauto_benchmark(path, name, class_name):
+ uiauto_path = _d(os.path.join(path, 'uiauto'))
+ create_uiauto_project(uiauto_path, name)
+ source_file = os.path.join(path, '__init__.py')
+ with open(source_file, 'w') as wfh:
+ wfh.write(render_template('android_uiauto_benchmark', {'name': name, 'class_name': class_name}))
+
+
+def create_uiauto_project(path, name, target='1'):
+ sdk_path = get_sdk_path()
+ android_path = os.path.join(sdk_path, 'tools', 'android')
+ package_name = 'com.arm.wlauto.uiauto.' + name.lower()
+
+ # ${ANDROID_HOME}/tools/android create uitest-project -n com.arm.wlauto.uiauto.linpack -t 1 -p ../test2
+ command = '{} create uitest-project --name {} --target {} --path {}'.format(android_path,
+ package_name,
+ target,
+ path)
+ check_output(command, shell=True)
+
+ build_script = os.path.join(path, 'build.sh')
+ with open(build_script, 'w') as wfh:
+ template = string.Template(UIAUTO_BUILD_SCRIPT)
+ wfh.write(template.substitute({'package_name': package_name}))
+ os.chmod(build_script, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
+
+ source_file = _f(os.path.join(path, 'src',
+ os.sep.join(package_name.split('.')[:-1]),
+ 'UiAutomation.java'))
+ with open(source_file, 'w') as wfh:
+ wfh.write(render_template('UiAutomation.java', {'name': name, 'package_name': package_name}))
+
+
+# Utility functions
+
+def get_sdk_path():
+ sdk_path = os.getenv('ANDROID_HOME')
+ if not sdk_path:
+ raise CommandError('Please set ANDROID_HOME environment variable to point to ' +
+ 'the locaton of Android SDK')
+ return sdk_path
+
+
+def get_class_name(name, postfix=''):
+ name = identifier(name)
+ return ''.join(map(capitalize, name.split('_'))) + postfix
+
+
+def render_template(name, params):
+ filepath = os.path.join(TEMPLATES_DIR, name)
+ with open(filepath) as fh:
+ text = fh.read()
+ template = string.Template(text)
+ return template.substitute(params)
+
+
+def touch(path):
+ with open(path, 'w') as wfh: # pylint: disable=unused-variable
+ pass
diff --git a/wlauto/commands/list.py b/wlauto/commands/list.py
new file mode 100644
index 00000000..0ffba3fa
--- /dev/null
+++ b/wlauto/commands/list.py
@@ -0,0 +1,59 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import ExtensionLoader, Command, settings
+from wlauto.utils.formatter import DescriptionListFormatter
+from wlauto.utils.doc import get_summary
+
+
+class ListCommand(Command):
+
+ name = 'list'
+ description = 'List available WA extensions with a short description of each.'
+
+ def initialize(self):
+ extension_types = ['{}s'.format(ext.name) for ext in settings.extensions]
+ self.parser.add_argument('kind', metavar='KIND',
+ help=('Specify the kind of extension to list. Must be '
+ 'one of: {}'.format(', '.join(extension_types))),
+ choices=extension_types)
+ self.parser.add_argument('-n', '--name', help='Filter results by the name specified')
+
+ def execute(self, args):
+ filters = {}
+ if args.name:
+ filters['name'] = args.name
+
+ ext_loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
+ results = ext_loader.list_extensions(args.kind[:-1])
+ if filters:
+ filtered_results = []
+ for result in results:
+ passed = True
+ for k, v in filters.iteritems():
+ if getattr(result, k) != v:
+ passed = False
+ break
+ if passed:
+ filtered_results.append(result)
+ else: # no filters specified
+ filtered_results = results
+
+ if filtered_results:
+ output = DescriptionListFormatter()
+ for result in sorted(filtered_results, key=lambda x: x.name):
+ output.add_item(get_summary(result), result.name)
+ print output.format_data()
diff --git a/wlauto/commands/run.py b/wlauto/commands/run.py
new file mode 100644
index 00000000..192d013a
--- /dev/null
+++ b/wlauto/commands/run.py
@@ -0,0 +1,87 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import sys
+import shutil
+
+import wlauto
+from wlauto import Command, settings
+from wlauto.core.agenda import Agenda
+from wlauto.core.execution import Executor
+from wlauto.utils.log import add_log_file
+
+
+class RunCommand(Command):
+
+ name = 'run'
+ description = 'Execute automated workloads on a remote device and process the resulting output.'
+
+ def initialize(self):
+ self.parser.add_argument('agenda', metavar='AGENDA',
+ help='Agenda for this workload automation run. This defines which workloads will ' +
+ 'be executed, how many times, with which tunables, etc. ' +
+ 'See example agendas in {} '.format(os.path.dirname(wlauto.__file__)) +
+ 'for an example of how this file should be structured.')
+ self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
+ help='Specify a directory where the output will be generated. If the directory' +
+ 'already exists, the script will abort unless -f option (see below) is used,' +
+ 'in which case the contents of the directory will be overwritten. If this option' +
+ 'is not specified, then {} will be used instead.'.format(settings.output_directory))
+ self.parser.add_argument('-f', '--force', action='store_true',
+ help='Overwrite output directory if it exists. By default, the script will abort in this' +
+ 'situation to prevent accidental data loss.')
+ self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
+ help='Specify a workload spec ID from an agenda to run. If this is specified, only that particular ' +
+ 'spec will be run, and other workloads in the agenda will be ignored. This option may be used to ' +
+ 'specify multiple IDs.')
+
+ def execute(self, args): # NOQA
+ self.set_up_output_directory(args)
+ add_log_file(settings.log_file)
+
+ if os.path.isfile(args.agenda):
+ agenda = Agenda(args.agenda)
+ settings.agenda = args.agenda
+ shutil.copy(args.agenda, settings.meta_directory)
+ else:
+ self.logger.debug('{} is not a file; assuming workload name.'.format(args.agenda))
+ agenda = Agenda()
+ agenda.add_workload_entry(args.agenda)
+
+ file_name = 'config_{}.py'
+ for file_number, path in enumerate(settings.get_config_paths(), 1):
+ shutil.copy(path, os.path.join(settings.meta_directory, file_name.format(file_number)))
+
+ executor = Executor()
+ executor.execute(agenda, selectors={'ids': args.only_run_ids})
+
+ def set_up_output_directory(self, args):
+ if args.output_directory:
+ settings.output_directory = args.output_directory
+ self.logger.debug('Using output directory: {}'.format(settings.output_directory))
+ if os.path.exists(settings.output_directory):
+ if args.force:
+ self.logger.info('Removing existing output directory.')
+ shutil.rmtree(settings.output_directory)
+ else:
+ self.logger.error('Output directory {} exists.'.format(settings.output_directory))
+ self.logger.error('Please specify another location, or use -f option to overwrite.\n')
+ sys.exit(1)
+
+ self.logger.info('Creating output directory.')
+ os.makedirs(settings.output_directory)
+ os.makedirs(settings.meta_directory)
diff --git a/wlauto/commands/show.py b/wlauto/commands/show.py
new file mode 100644
index 00000000..12515b73
--- /dev/null
+++ b/wlauto/commands/show.py
@@ -0,0 +1,101 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import sys
+import subprocess
+from cStringIO import StringIO
+
+from terminalsize import get_terminal_size # pylint: disable=import-error
+from wlauto import Command, ExtensionLoader, settings
+from wlauto.utils.doc import (get_summary, get_description, get_type_name, format_column, format_body,
+ format_paragraph, indent, strip_inlined_text)
+from wlauto.utils.misc import get_pager
+
+
+class ShowCommand(Command):
+
+ name = 'show'
+
+ description = """
+ Display documentation for the specified extension (workload, instrument, etc.).
+ """
+
+ def initialize(self):
+ self.parser.add_argument('name', metavar='EXTENSION',
+ help='''The name of the extension for which information will
+ be shown.''')
+
+ def execute(self, args):
+ ext_loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
+ extension = ext_loader.get_extension_class(args.name)
+ out = StringIO()
+ term_width, term_height = get_terminal_size()
+ format_extension(extension, out, term_width)
+ text = out.getvalue()
+ pager = get_pager()
+ if len(text.split('\n')) > term_height and pager:
+ sp = subprocess.Popen(pager, stdin=subprocess.PIPE)
+ sp.communicate(text)
+ else:
+ sys.stdout.write(text)
+
+
+def format_extension(extension, out, width):
+ format_extension_name(extension, out)
+ out.write('\n')
+ format_extension_summary(extension, out, width)
+ out.write('\n')
+ if extension.parameters:
+ format_extension_parameters(extension, out, width)
+ out.write('\n')
+ format_extension_description(extension, out, width)
+
+
+def format_extension_name(extension, out):
+ out.write('\n{}\n'.format(extension.name))
+
+
+def format_extension_summary(extension, out, width):
+ out.write('{}\n'.format(format_body(strip_inlined_text(get_summary(extension)), width)))
+
+
+def format_extension_description(extension, out, width):
+ # skip the initial paragraph of multi-paragraph description, as already
+ # listed above.
+ description = get_description(extension).split('\n\n', 1)[-1]
+ out.write('{}\n'.format(format_body(strip_inlined_text(description), width)))
+
+
+def format_extension_parameters(extension, out, width, shift=4):
+ out.write('parameters:\n\n')
+ param_texts = []
+ for param in extension.parameters:
+ description = format_paragraph(strip_inlined_text(param.description or ''), width - shift)
+ param_text = '{}'.format(param.name)
+ if param.mandatory:
+ param_text += " (MANDATORY)"
+ param_text += '\n{}\n'.format(description)
+ param_text += indent('type: {}\n'.format(get_type_name(param.kind)))
+ if param.allowed_values:
+ param_text += indent('allowed values: {}\n'.format(', '.join(map(str, param.allowed_values))))
+ elif param.constraint:
+ param_text += indent('constraint: {}\n'.format(get_type_name(param.constraint)))
+ if param.default:
+ param_text += indent('default: {}\n'.format(param.default))
+ param_texts.append(indent(param_text, shift))
+
+ out.write(format_column('\n'.join(param_texts), width))
+
diff --git a/wlauto/commands/templates/UiAutomation.java b/wlauto/commands/templates/UiAutomation.java
new file mode 100644
index 00000000..bd33d9a7
--- /dev/null
+++ b/wlauto/commands/templates/UiAutomation.java
@@ -0,0 +1,25 @@
+package ${package_name};
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "${name}";
+
+ public void runUiAutomation() throws Exception {
+ // UI Automation code goes here
+ }
+
+}
diff --git a/wlauto/commands/templates/android_benchmark b/wlauto/commands/templates/android_benchmark
new file mode 100644
index 00000000..82796bd5
--- /dev/null
+++ b/wlauto/commands/templates/android_benchmark
@@ -0,0 +1,27 @@
+from wlauto import AndroidBenchmark, Parameter
+
+
+class ${class_name}(AndroidBenchmark):
+
+ name = '${name}'
+ # NOTE: Please do not leave these comments in the code.
+ #
+ # Replace with the package for the app in the APK file.
+ package = 'com.foo.bar'
+ # Replace with the full path to the activity to run.
+ activity = '.RunBuzz'
+ description = "This is an placeholder description"
+
+ parameters = [
+ # Workload parameters go here e.g.
+ Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
+ description='This is an example parameter')
+ ]
+
+ def run(self, context):
+ pass
+
+ def update_result(self, context):
+ super(${class_name}, self).update_result(context)
+ # process results and add them using
+ # context.result.add_metric
diff --git a/wlauto/commands/templates/android_uiauto_benchmark b/wlauto/commands/templates/android_uiauto_benchmark
new file mode 100644
index 00000000..5d6893a8
--- /dev/null
+++ b/wlauto/commands/templates/android_uiauto_benchmark
@@ -0,0 +1,24 @@
+from wlauto import AndroidUiAutoBenchmark, Parameter
+
+
+class ${class_name}(AndroidUiAutoBenchmark):
+
+ name = '${name}'
+ # NOTE: Please do not leave these comments in the code.
+ #
+ # Replace with the package for the app in the APK file.
+ package = 'com.foo.bar'
+ # Replace with the full path to the activity to run.
+ activity = '.RunBuzz'
+ description = "This is an placeholder description"
+
+ parameters = [
+ # Workload parameters go here e.g.
+ Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
+ description='This is an example parameter')
+ ]
+
+ def update_result(self, context):
+ super(${class_name}, self).update_result(context)
+ # process results and add them using
+ # context.result.add_metric
diff --git a/wlauto/commands/templates/basic_workload b/wlauto/commands/templates/basic_workload
new file mode 100644
index 00000000..e75316f1
--- /dev/null
+++ b/wlauto/commands/templates/basic_workload
@@ -0,0 +1,28 @@
+from wlauto import Workload, Parameter
+
+
+class ${class_name}(Workload):
+
+ name = '${name}'
+ description = "This is an placeholder description"
+
+ parameters = [
+ # Workload parameters go here e.g.
+ Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
+ description='This is an example parameter')
+ ]
+
+ def setup(self, context):
+ pass
+
+ def run(self, context):
+ pass
+
+ def update_result(self, context):
+ pass
+
+ def teardown(self, context):
+ pass
+
+ def validate(self):
+ pass
diff --git a/wlauto/commands/templates/setup.template b/wlauto/commands/templates/setup.template
new file mode 100644
index 00000000..f9097b59
--- /dev/null
+++ b/wlauto/commands/templates/setup.template
@@ -0,0 +1,102 @@
+import os
+import sys
+import warnings
+from multiprocessing import Process
+
+try:
+ from setuptools.command.install import install as orig_install
+ from setuptools import setup
+except ImportError:
+ from distutils.command.install import install as orig_install
+ from distutils.core import setup
+
+try:
+ import pwd
+except ImportError:
+ pwd = None
+
+warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
+
+try:
+ os.remove('MANIFEST')
+except OSError:
+ pass
+
+
+packages = []
+data_files = {}
+source_dir = os.path.dirname(__file__)
+for root, dirs, files in os.walk('$package_name'):
+ rel_dir = os.path.relpath(root, source_dir)
+ data = []
+ if '__init__.py' in files:
+ for f in files:
+ if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:
+ data.append(f)
+ package_name = rel_dir.replace(os.sep, '.')
+ package_dir = root
+ packages.append(package_name)
+ data_files[package_name] = data
+ else:
+ # use previous package name
+ filepaths = [os.path.join(root, f) for f in files]
+ data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
+
+params = dict(
+ name='$package_name',
+ version='0.0.1',
+ packages=packages,
+ package_data=data_files,
+ url='N/A',
+ maintainer='$user',
+ maintainer_email='$user@example.com',
+ install_requires=[
+ 'wlauto',
+ ],
+ # https://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[
+ 'Development Status :: 3 - Alpha',
+ 'Environment :: Console',
+ 'License :: Other/Proprietary License',
+ 'Operating System :: Unix',
+ 'Programming Language :: Python :: 2.7',
+ ],
+)
+
+
+def update_wa_packages():
+ sudo_user = os.getenv('SUDO_USER')
+ if sudo_user:
+ user_entry = pwd.getpwnam(sudo_user)
+ os.setgid(user_entry.pw_gid)
+ os.setuid(user_entry.pw_uid)
+ env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(os.path.expanduser('~'), '.workload_automation'))
+ if not os.path.isdir(env_root):
+ os.makedirs(env_root)
+ wa_packages_file = os.path.join(env_root, 'packages')
+ if os.path.isfile(wa_packages_file):
+ with open(wa_packages_file, 'r') as wfh:
+ package_list = wfh.read().split()
+ if params['name'] not in package_list:
+ package_list.append(params['name'])
+ else: # no existing package file
+ package_list = [params['name']]
+ with open(wa_packages_file, 'w') as wfh:
+ wfh.write('\n'.join(package_list))
+
+
+class install(orig_install):
+
+ def run(self):
+ orig_install.run(self)
+ # Must be done in a separate process because will drop privileges if
+ # sudo, and won't be able to reacquire them.
+ p = Process(target=update_wa_packages)
+ p.start()
+ p.join()
+
+
+params['cmdclass'] = {'install': install}
+
+
+setup(**params)
diff --git a/wlauto/commands/templates/uiauto_workload b/wlauto/commands/templates/uiauto_workload
new file mode 100644
index 00000000..66cc193a
--- /dev/null
+++ b/wlauto/commands/templates/uiauto_workload
@@ -0,0 +1,35 @@
+from wlauto import UiAutomatorWorkload, Parameter
+
+
+class ${class_name}(UiAutomatorWorkload):
+
+ name = '${name}'
+ description = "This is an placeholder description"
+
+ parameters = [
+ # Workload parameters go here e.g.
+ Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
+ description='This is an example parameter')
+ ]
+
+ def setup(self, context):
+ super(${class_name}, self).setup(context)
+ # Perform any necessary setup before starting the UI automation
+ # e.g. copy files to the device, start apps, reset logs, etc.
+
+
+ def update_result(self, context):
+ pass
+ # Process workload execution artifacts to extract metrics
+ # and add them to the run result using
+ # context.result.add_metric()
+
+ def teardown(self, context):
+ super(${class_name}, self).teardown(context)
+ # Preform any necessary cleanup
+
+ def validate(self):
+ pass
+ # Validate inter-parameter assumptions etc
+
+
diff --git a/wlauto/common/__init__.py b/wlauto/common/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/common/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/common/android/BaseUiAutomation.class b/wlauto/common/android/BaseUiAutomation.class
new file mode 100644
index 00000000..2683f453
--- /dev/null
+++ b/wlauto/common/android/BaseUiAutomation.class
Binary files differ
diff --git a/wlauto/common/android/__init__.py b/wlauto/common/android/__init__.py
new file mode 100644
index 00000000..16224d6f
--- /dev/null
+++ b/wlauto/common/android/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/common/android/device.py b/wlauto/common/android/device.py
new file mode 100644
index 00000000..21824eae
--- /dev/null
+++ b/wlauto/common/android/device.py
@@ -0,0 +1,678 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+import os
+import sys
+import re
+import time
+import tempfile
+import shutil
+import threading
+from subprocess import CalledProcessError
+
+from wlauto.core.extension import Parameter
+from wlauto.common.linux.device import BaseLinuxDevice
+from wlauto.exceptions import DeviceError, WorkerThreadError, TimeoutError, DeviceNotRespondingError
+from wlauto.utils.misc import convert_new_lines
+from wlauto.utils.types import boolean, regex
+from wlauto.utils.android import (adb_shell, adb_background_shell, adb_list_devices,
+ adb_command, AndroidProperties, ANDROID_VERSION_MAP)
+
+
+SCREEN_STATE_REGEX = re.compile('(?:mPowerState|mScreenOn)=([0-9]+|true|false)', re.I)
+
+
+class AndroidDevice(BaseLinuxDevice): # pylint: disable=W0223
+ """
+ Device running Android OS.
+
+ """
+
+ platform = 'android'
+
+ parameters = [
+ Parameter('adb_name',
+ description='The unique ID of the device as output by "adb devices".'),
+ Parameter('android_prompt', kind=regex, default=re.compile('^.*(shell|root)@.*:/ [#$] ', re.MULTILINE),
+ description='The format of matching the shell prompt in Android.'),
+ Parameter('working_directory', default='/sdcard/wa-working',
+ description='Directory that will be used WA on the device for output files etc.'),
+ Parameter('binaries_directory', default='/system/bin',
+ description='Location of binaries on the device.'),
+ Parameter('package_data_directory', default='/data/data',
+ description='Location of of data for an installed package (APK).'),
+ Parameter('external_storage_directory', default='/sdcard',
+ description='Mount point for external storage.'),
+ Parameter('connection', default='usb', allowed_values=['usb', 'ethernet'],
+ description='Specified the nature of adb connection.'),
+ Parameter('logcat_poll_period', kind=int,
+ description="""
+ If specified and is not ``0``, logcat will be polled every
+ ``logcat_poll_period`` seconds, and buffered on the host. This
+ can be used if a lot of output is expected in logcat and the fixed
+ logcat buffer on the device is not big enough. The trade off is that
+ this introduces some minor runtime overhead. Not set by default.
+ """),
+ Parameter('enable_screen_check', kind=boolean, default=False,
+ description="""
+ Specified whether the device should make sure that the screen is on
+ during initialization.
+ """),
+ ]
+
+ default_timeout = 30
+ delay = 2
+ long_delay = 3 * delay
+ ready_timeout = 60
+
+ # Overwritten from Device. For documentation, see corresponding method in
+ # Device.
+
+ @property
+ def is_rooted(self):
+ if self._is_rooted is None:
+ try:
+ result = adb_shell(self.adb_name, 'su', timeout=1)
+ if 'not found' in result:
+ self._is_rooted = False
+ else:
+ self._is_rooted = True
+ except TimeoutError:
+ self._is_rooted = True
+ except DeviceError:
+ self._is_rooted = False
+ return self._is_rooted
+
+ @property
+ def abi(self):
+ return self.getprop()['ro.product.cpu.abi'].split('-')[0]
+
+ @property
+ def supported_eabi(self):
+ props = self.getprop()
+ result = [props['ro.product.cpu.abi']]
+ if 'ro.product.cpu.abi2' in props:
+ result.append(props['ro.product.cpu.abi2'])
+ if 'ro.product.cpu.abilist' in props:
+ for eabi in props['ro.product.cpu.abilist'].split(','):
+ if eabi not in result:
+ result.append(eabi)
+ return result
+
+ def __init__(self, **kwargs):
+ super(AndroidDevice, self).__init__(**kwargs)
+ self._logcat_poller = None
+
+ def reset(self):
+ self._is_ready = False
+ self._just_rebooted = True
+ adb_command(self.adb_name, 'reboot', timeout=self.default_timeout)
+
+ def hard_reset(self):
+ super(AndroidDevice, self).hard_reset()
+ self._is_ready = False
+ self._just_rebooted = True
+
+ def boot(self, **kwargs):
+ self.reset()
+
+ def connect(self): # NOQA pylint: disable=R0912
+ iteration_number = 0
+ max_iterations = self.ready_timeout / self.delay
+ available = False
+ self.logger.debug('Polling for device {}...'.format(self.adb_name))
+ while iteration_number < max_iterations:
+ devices = adb_list_devices()
+ if self.adb_name:
+ for device in devices:
+ if device.name == self.adb_name and device.status != 'offline':
+ available = True
+ else: # adb_name not set
+ if len(devices) == 1:
+ available = True
+ elif len(devices) > 1:
+ raise DeviceError('More than one device is connected and adb_name is not set.')
+
+ if available:
+ break
+ else:
+ time.sleep(self.delay)
+ iteration_number += 1
+ else:
+ raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))
+
+ while iteration_number < max_iterations:
+ available = (1 == int('0' + adb_shell(self.adb_name, 'getprop sys.boot_completed', timeout=self.default_timeout)))
+ if available:
+ break
+ else:
+ time.sleep(self.delay)
+ iteration_number += 1
+ else:
+ raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))
+
+ if self._just_rebooted:
+ self.logger.debug('Waiting for boot to complete...')
+ # On some devices, adb connection gets reset some time after booting.
+ # This causes errors during execution. To prevent this, open a shell
+ # session and wait for it to be killed. Once its killed, give adb
+ # enough time to restart, and then the device should be ready.
+ # TODO: This is more of a work-around rather than an actual solution.
+ # Need to figure out what is going on the "proper" way of handling it.
+ try:
+ adb_shell(self.adb_name, '', timeout=20)
+ time.sleep(5) # give adb time to re-initialize
+ except TimeoutError:
+ pass # timed out waiting for the session to be killed -- assume not going to be.
+
+ self.logger.debug('Boot completed.')
+ self._just_rebooted = False
+ self._is_ready = True
+
+ def initialize(self, context, *args, **kwargs):
+ self.execute('mkdir -p {}'.format(self.working_directory))
+ if self.is_rooted:
+ if not self.executable_is_installed('busybox'):
+ self.busybox = self.deploy_busybox(context)
+ else:
+ self.busybox = 'busybox'
+ self.disable_screen_lock()
+ self.disable_selinux()
+ if self.enable_screen_check:
+ self.ensure_screen_is_on()
+ self.init(context, *args, **kwargs)
+
+ def disconnect(self):
+ if self._logcat_poller:
+ self._logcat_poller.close()
+
+ def ping(self):
+ try:
+ # May be triggered inside initialize()
+ adb_shell(self.adb_name, 'ls /', timeout=10)
+ except (TimeoutError, CalledProcessError):
+ raise DeviceNotRespondingError(self.adb_name or self.name)
+
+ def start(self):
+ if self.logcat_poll_period:
+ if self._logcat_poller:
+ self._logcat_poller.close()
+ self._logcat_poller = _LogcatPoller(self, self.logcat_poll_period, timeout=self.default_timeout)
+ self._logcat_poller.start()
+
+ def stop(self):
+ if self._logcat_poller:
+ self._logcat_poller.stop()
+
+ def get_android_version(self):
+ return ANDROID_VERSION_MAP.get(self.get_sdk_version(), None)
+
+ def get_android_id(self):
+ """
+ Get the device's ANDROID_ID. Which is
+
+ "A 64-bit number (as a hex string) that is randomly generated when the user
+ first sets up the device and should remain constant for the lifetime of the
+ user's device."
+
+ .. note:: This will get reset on userdata erasure.
+
+ """
+ return self.execute('settings get secure android_id').strip()
+
+ def get_sdk_version(self):
+ try:
+ return int(self.getprop('ro.build.version.sdk'))
+ except (ValueError, TypeError):
+ return None
+
+ def get_installed_package_version(self, package):
+ """
+ Returns the version (versionName) of the specified package if it is installed
+ on the device, or ``None`` otherwise.
+
+ Added in version 2.1.4
+
+ """
+ output = self.execute('dumpsys package {}'.format(package))
+ for line in convert_new_lines(output).split('\n'):
+ if 'versionName' in line:
+ return line.split('=', 1)[1]
+ return None
+
+ def list_packages(self):
+ """
+ List packages installed on the device.
+
+ Added in version 2.1.4
+
+ """
+ output = self.execute('pm list packages')
+ output = output.replace('package:', '')
+ return output.split()
+
+ def package_is_installed(self, package_name):
+ """
+ Returns ``True`` the if a package with the specified name is installed on
+ the device, and ``False`` otherwise.
+
+ Added in version 2.1.4
+
+ """
+ return package_name in self.list_packages()
+
+ def executable_is_installed(self, executable_name):
+ return executable_name in self.listdir(self.binaries_directory)
+
+ def is_installed(self, name):
+ return self.executable_is_installed(name) or self.package_is_installed(name)
+
+ def listdir(self, path, as_root=False, **kwargs):
+ contents = self.execute('ls {}'.format(path), as_root=as_root)
+ return [x.strip() for x in contents.split()]
+
+ def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
+ """
+ Modified in version 2.1.4: added ``as_root`` parameter.
+
+ """
+ self._check_ready()
+ if not as_root:
+ adb_command(self.adb_name, "push '{}' '{}'".format(source, dest), timeout=timeout)
+ else:
+ device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))
+ self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))
+ adb_command(self.adb_name, "push '{}' '{}'".format(source, device_tempfile), timeout=timeout)
+ self.execute('cp {} {}'.format(device_tempfile, dest), as_root=True)
+
+ def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
+ """
+ Modified in version 2.1.4: added ``as_root`` parameter.
+
+ """
+ self._check_ready()
+ if not as_root:
+ adb_command(self.adb_name, "pull '{}' '{}'".format(source, dest), timeout=timeout)
+ else:
+ device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))
+ self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))
+ self.execute('cp {} {}'.format(source, device_tempfile), as_root=True)
+ adb_command(self.adb_name, "pull '{}' '{}'".format(device_tempfile, dest), timeout=timeout)
+
+ def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
+ self._check_ready()
+ adb_shell(self.adb_name, "rm '{}'".format(filepath), as_root=as_root, timeout=self.default_timeout)
+
+ def file_exists(self, filepath):
+ self._check_ready()
+ output = adb_shell(self.adb_name, 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath),
+ timeout=self.default_timeout)
+ if int(output):
+ return True
+ else:
+ return False
+
+ def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221
+ ext = os.path.splitext(filepath)[1].lower()
+ if ext == '.apk':
+ return self.install_apk(filepath, timeout)
+ else:
+ return self.install_executable(filepath, with_name)
+
+ def install_apk(self, filepath, timeout=default_timeout): # pylint: disable=W0221
+ self._check_ready()
+ ext = os.path.splitext(filepath)[1].lower()
+ if ext == '.apk':
+ return adb_command(self.adb_name, "install {}".format(filepath), timeout=timeout)
+ else:
+ raise DeviceError('Can\'t install {}: unsupported format.'.format(filepath))
+
+ def install_executable(self, filepath, with_name=None):
+ """
+ Installs a binary executable on device. Requires root access. Returns
+ the path to the installed binary, or ``None`` if the installation has failed.
+ Optionally, ``with_name`` parameter may be used to specify a different name under
+ which the executable will be installed.
+
+ Added in version 2.1.3.
+ Updated in version 2.1.5 with ``with_name`` parameter.
+
+ """
+ executable_name = with_name or os.path.basename(filepath)
+ on_device_file = self.path.join(self.working_directory, executable_name)
+ on_device_executable = self.path.join(self.binaries_directory, executable_name)
+ self.push_file(filepath, on_device_file)
+ matched = []
+ for entry in self.list_file_systems():
+ if self.binaries_directory.rstrip('/').startswith(entry.mount_point):
+ matched.append(entry)
+
+ if matched:
+ entry = sorted(matched, key=lambda x: len(x.mount_point))[-1]
+ if 'rw' not in entry.options:
+ self.execute('mount -o rw,remount {} {}'.format(entry.device, entry.mount_point), as_root=True)
+ self.execute('cp {} {}'.format(on_device_file, on_device_executable), as_root=True)
+ self.execute('chmod 0777 {}'.format(on_device_executable), as_root=True)
+ return on_device_executable
+ else:
+ raise DeviceError('Could not find mount point for binaries directory {}'.format(self.binaries_directory))
+
+ def uninstall(self, package):
+ self._check_ready()
+ adb_command(self.adb_name, "uninstall {}".format(package), timeout=self.default_timeout)
+
+ def uninstall_executable(self, executable_name):
+ """
+ Requires root access.
+
+ Added in version 2.1.3.
+
+ """
+ on_device_executable = self.path.join(self.binaries_directory, executable_name)
+ for entry in self.list_file_systems():
+ if entry.mount_point == '/system':
+ if 'rw' not in entry.options:
+ self.execute('mount -o rw,remount {} /system'.format(entry.device), as_root=True)
+ self.delete_file(on_device_executable)
+
+ def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
+ as_root=False, busybox=False, **kwargs):
+ """
+ Execute the specified command on the device using adb.
+
+ Parameters:
+
+ :param command: The command to be executed. It should appear exactly
+ as if you were typing it into a shell.
+ :param timeout: Time, in seconds, to wait for adb to return before aborting
+ and raising an error. Defaults to ``AndroidDevice.default_timeout``.
+ :param check_exit_code: If ``True``, the return code of the command on the Device will
+ be check and exception will be raised if it is not 0.
+ Defaults to ``True``.
+ :param background: If ``True``, will execute adb in a subprocess, and will return
+ immediately, not waiting for adb to return. Defaults to ``False``
+ :param busybox: If ``True``, will use busybox to execute the command. Defaults to ``False``.
+
+ Added in version 2.1.3
+
+ .. note:: The device must be rooted to be able to use busybox.
+
+ :param as_root: If ``True``, will attempt to execute command in privileged mode. The device
+ must be rooted, otherwise an error will be raised. Defaults to ``False``.
+
+ Added in version 2.1.3
+
+ :returns: If ``background`` parameter is set to ``True``, the subprocess object will
+ be returned; otherwise, the contents of STDOUT from the device will be returned.
+
+ :raises: DeviceError if adb timed out or if the command returned non-zero exit
+ code on the device, or if attempting to execute a command in privileged mode on an
+ unrooted device.
+
+ """
+ self._check_ready()
+ if as_root and not self.is_rooted:
+ raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command))
+ if busybox:
+ if not self.is_rooted:
+ DeviceError('Attempting to execute "{}" with busybox. '.format(command) +
+ 'Busybox can only be deployed to rooted devices.')
+ command = ' '.join([self.busybox, command])
+ if background:
+ return adb_background_shell(self.adb_name, command, as_root=as_root)
+ else:
+ return adb_shell(self.adb_name, command, timeout, check_exit_code, as_root)
+
+ def kick_off(self, command):
+ """
+ Like execute but closes adb session and returns immediately, leaving the command running on the
+ device (this is different from execute(background=True) which keeps adb connection open and returns
+ a subprocess object).
+
+ .. note:: This relies on busybox's nohup applet and so won't work on unrooted devices.
+
+ Added in version 2.1.4
+
+ """
+ if not self.is_rooted:
+ raise DeviceError('kick_off uses busybox\'s nohup applet and so can only be run a rooted device.')
+ try:
+ command = 'cd {} && busybox nohup {}'.format(self.working_directory, command)
+ output = self.execute(command, timeout=1, as_root=True)
+ except TimeoutError:
+ pass
+ else:
+ raise ValueError('Background command exited before timeout; got "{}"'.format(output))
+
+ def get_properties(self, context):
+ """Captures and saves the information from /system/build.prop and /proc/version"""
+ props = {}
+ props['android_id'] = self.get_android_id()
+ buildprop_file = os.path.join(context.host_working_directory, 'build.prop')
+ if not os.path.isfile(buildprop_file):
+ self.pull_file('/system/build.prop', context.host_working_directory)
+ self._update_build_properties(buildprop_file, props)
+ context.add_run_artifact('build_properties', buildprop_file, 'export')
+
+ version_file = os.path.join(context.host_working_directory, 'version')
+ if not os.path.isfile(version_file):
+ self.pull_file('/proc/version', context.host_working_directory)
+ self._update_versions(version_file, props)
+ context.add_run_artifact('device_version', version_file, 'export')
+ return props
+
+ def getprop(self, prop=None):
+ """Returns parsed output of Android getprop command. If a property is
+ specified, only the value for that property will be returned (with
+ ``None`` returned if the property doesn't exist. Otherwise,
+ ``wlauto.utils.android.AndroidProperties`` will be returned, which is
+ a dict-like object."""
+ props = AndroidProperties(self.execute('getprop'))
+ if prop:
+ return props[prop]
+ return props
+
+ # Android-specific methods. These either rely on specifics of adb or other
+ # Android-only concepts in their interface and/or implementation.
+
+ def forward_port(self, from_port, to_port):
+ """
+ Forward a port on the device to a port on localhost.
+
+ :param from_port: Port on the device which to forward.
+ :param to_port: Port on the localhost to which the device port will be forwarded.
+
+ Ports should be specified using adb spec. See the "adb forward" section in "adb help".
+
+ """
+ adb_command(self.adb_name, 'forward {} {}'.format(from_port, to_port), timeout=self.default_timeout)
+
+ def dump_logcat(self, outfile, filter_spec=None):
+ """
+ Dump the contents of logcat, for the specified filter spec to the
+ specified output file.
+ See http://developer.android.com/tools/help/logcat.html
+
+ :param outfile: Output file on the host into which the contents of the
+ log will be written.
+ :param filter_spec: Logcat filter specification.
+ see http://developer.android.com/tools/debugging/debugging-log.html#filteringOutput
+
+ """
+ if self._logcat_poller:
+ return self._logcat_poller.write_log(outfile)
+ else:
+ if filter_spec:
+ command = 'logcat -d -s {} > {}'.format(filter_spec, outfile)
+ else:
+ command = 'logcat -d > {}'.format(outfile)
+ return adb_command(self.adb_name, command, timeout=self.default_timeout)
+
+ def clear_logcat(self):
+ """Clear (flush) logcat log."""
+ if self._logcat_poller:
+ return self._logcat_poller.clear_buffer()
+ else:
+ return adb_shell(self.adb_name, 'logcat -c', timeout=self.default_timeout)
+
+ def capture_screen(self, filepath):
+ """Caputers the current device screen into the specified file in a PNG format."""
+ on_device_file = self.path.join(self.working_directory, 'screen_capture.png')
+ self.execute('screencap -p {}'.format(on_device_file))
+ self.pull_file(on_device_file, filepath)
+ self.delete_file(on_device_file)
+
+ def is_screen_on(self):
+ """Returns ``True`` if the device screen is currently on, ``False`` otherwise."""
+ output = self.execute('dumpsys power')
+ match = SCREEN_STATE_REGEX.search(output)
+ if match:
+ return boolean(match.group(1))
+ else:
+ raise DeviceError('Could not establish screen state.')
+
+ def ensure_screen_is_on(self):
+ if not self.is_screen_on():
+ self.execute('input keyevent 26')
+
+ def disable_screen_lock(self):
+ """
+ Attempts to disable he screen lock on the device.
+
+ .. note:: This does not always work...
+
+ Added inversion 2.1.4
+
+ """
+ lockdb = '/data/system/locksettings.db'
+ sqlcommand = "update locksettings set value=\\'0\\' where name=\\'screenlock.disabled\\';"
+ self.execute('sqlite3 {} "{}"'.format(lockdb, sqlcommand), as_root=True)
+
+ def disable_selinux(self):
+ # This may be invoked from intialize() so we can't use execute() or the
+ # standard API for doing this.
+ api_level = int(adb_shell(self.adb_name, 'getprop ro.build.version.sdk',
+ timeout=self.default_timeout).strip())
+ # SELinux was added in Android 4.3 (API level 18). Trying to
+ # 'getenforce' in earlier versions will produce an error.
+ if api_level >= 18:
+ se_status = self.execute('getenforce', as_root=True).strip()
+ if se_status == 'Enforcing':
+ self.execute('setenforce 0', as_root=True)
+
+ # Internal methods: do not use outside of the class.
+
+ def _update_build_properties(self, filepath, props):
+ try:
+ with open(filepath) as fh:
+ for line in fh:
+ line = re.sub(r'#.*', '', line).strip()
+ if not line:
+ continue
+ key, value = line.split('=', 1)
+ props[key] = value
+ except ValueError:
+ self.logger.warning('Could not parse build.prop.')
+
+ def _update_versions(self, filepath, props):
+ with open(filepath) as fh:
+ text = fh.read()
+ props['version'] = text
+ text = re.sub(r'#.*', '', text).strip()
+ match = re.search(r'^(Linux version .*?)\s*\((gcc version .*)\)$', text)
+ if match:
+ props['linux_version'] = match.group(1).strip()
+ props['gcc_version'] = match.group(2).strip()
+ else:
+ self.logger.warning('Could not parse version string.')
+
+
+class _LogcatPoller(threading.Thread):
+
+ join_timeout = 5
+
+ def __init__(self, device, period, timeout=None):
+ super(_LogcatPoller, self).__init__()
+ self.adb_device = device.adb_name
+ self.logger = device.logger
+ self.period = period
+ self.timeout = timeout
+ self.stop_signal = threading.Event()
+ self.lock = threading.RLock()
+ self.buffer_file = tempfile.mktemp()
+ self.last_poll = 0
+ self.daemon = True
+ self.exc = None
+
+ def run(self):
+ self.logger.debug('Starting logcat polling.')
+ try:
+ while True:
+ if self.stop_signal.is_set():
+ break
+ with self.lock:
+ current_time = time.time()
+ if (current_time - self.last_poll) >= self.period:
+ self._poll()
+ time.sleep(0.5)
+ except Exception: # pylint: disable=W0703
+ self.exc = WorkerThreadError(self.name, sys.exc_info())
+ self.logger.debug('Logcat polling stopped.')
+
+ def stop(self):
+ self.logger.debug('Stopping logcat polling.')
+ self.stop_signal.set()
+ self.join(self.join_timeout)
+ if self.is_alive():
+ self.logger.error('Could not join logcat poller thread.')
+ if self.exc:
+ raise self.exc # pylint: disable=E0702
+
+ def clear_buffer(self):
+ self.logger.debug('Clearing logcat buffer.')
+ with self.lock:
+ adb_shell(self.adb_device, 'logcat -c', timeout=self.timeout)
+ with open(self.buffer_file, 'w') as _: # NOQA
+ pass
+
+ def write_log(self, outfile):
+ self.logger.debug('Writing logbuffer to {}.'.format(outfile))
+ with self.lock:
+ self._poll()
+ if os.path.isfile(self.buffer_file):
+ shutil.copy(self.buffer_file, outfile)
+ else: # there was no logcat trace at this time
+ with open(outfile, 'w') as _: # NOQA
+ pass
+
+ def close(self):
+ self.logger.debug('Closing logcat poller.')
+ if os.path.isfile(self.buffer_file):
+ os.remove(self.buffer_file)
+
+ def _poll(self):
+ with self.lock:
+ self.last_poll = time.time()
+ adb_command(self.adb_device, 'logcat -d >> {}'.format(self.buffer_file), timeout=self.timeout)
+ adb_command(self.adb_device, 'logcat -c', timeout=self.timeout)
+
+
+class BigLittleDevice(AndroidDevice): # pylint: disable=W0223
+
+ parameters = [
+ Parameter('scheduler', default='hmp', override=True),
+ ]
+
diff --git a/wlauto/common/android/resources.py b/wlauto/common/android/resources.py
new file mode 100644
index 00000000..27231e16
--- /dev/null
+++ b/wlauto/common/android/resources.py
@@ -0,0 +1,36 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto.common.resources import FileResource
+
+
+class ReventFile(FileResource):
+
+ name = 'revent'
+
+ def __init__(self, owner, stage):
+ super(ReventFile, self).__init__(owner)
+ self.stage = stage
+
+
+class JarFile(FileResource):
+
+ name = 'jar'
+
+
+class ApkFile(FileResource):
+
+ name = 'apk'
diff --git a/wlauto/common/android/workload.py b/wlauto/common/android/workload.py
new file mode 100644
index 00000000..ee49c061
--- /dev/null
+++ b/wlauto/common/android/workload.py
@@ -0,0 +1,425 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import sys
+import time
+
+from wlauto.core.extension import Parameter
+from wlauto.core.workload import Workload
+from wlauto.core.resource import NO_ONE
+from wlauto.common.resources import ExtensionAsset, Executable
+from wlauto.exceptions import WorkloadError, ResourceError
+from wlauto.utils.android import ApkInfo
+from wlauto.utils.types import boolean
+import wlauto.common.android.resources
+
+
+DELAY = 5
+
+
+class UiAutomatorWorkload(Workload):
+ """
+ Base class for all workloads that rely on a UI Automator JAR file.
+
+ This class should be subclassed by workloads that rely on android UiAutomator
+ to work. This class handles transferring the UI Automator JAR file to the device
+ and invoking it to run the workload. By default, it will look for the JAR file in
+ the same directory as the .py file for the workload (this can be changed by overriding
+ the ``uiauto_file`` property in the subclassing workload).
+
+ To inintiate UI Automation, the fully-qualified name of the Java class and the
+ corresponding method name are needed. By default, the package part of the class name
+ is derived from the class file, and class and method names are ``UiAutomation``
+ and ``runUiAutomaton`` respectively. If you have generated the boilder plate for the
+ UiAutomatior code using ``create_workloads`` utility, then everything should be named
+ correctly. If you're creating the Java project manually, you need to make sure the names
+ match what is expected, or you could override ``uiauto_package``, ``uiauto_class`` and
+ ``uiauto_method`` class attributes with the value that match your Java code.
+
+ You can also pass parameters to the JAR file. To do this add the parameters to
+ ``self.uiauto_params`` dict inside your class's ``__init__`` or ``setup`` methods.
+
+ """
+
+ supported_platforms = ['android']
+
+ uiauto_package = ''
+ uiauto_class = 'UiAutomation'
+ uiauto_method = 'runUiAutomation'
+
+ # Can be overidden by subclasses to adjust to run time of specific
+ # benchmarks.
+ run_timeout = 4 * 60 # seconds
+
+ def __init__(self, device, _call_super=True, **kwargs): # pylint: disable=W0613
+ if _call_super:
+ super(UiAutomatorWorkload, self).__init__(device, **kwargs)
+ self.uiauto_file = None
+ self.device_uiauto_file = None
+ self.command = None
+ self.uiauto_params = {}
+
+ def init_resources(self, context):
+ self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self))
+ if not self.uiauto_file:
+ raise ResourceError('No UI automation JAR file found for workload {}.'.format(self.name))
+ self.device_uiauto_file = self.device.path.join(self.device.working_directory,
+ os.path.basename(self.uiauto_file))
+ if not self.uiauto_package:
+ self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
+
+ def setup(self, context):
+ method_string = '{}.{}#{}'.format(self.uiauto_package, self.uiauto_class, self.uiauto_method)
+ params_dict = self.uiauto_params
+ params_dict['workdir'] = self.device.working_directory
+ params = ''
+ for k, v in self.uiauto_params.iteritems():
+ params += ' -e {} {}'.format(k, v)
+ self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string)
+ self.device.push_file(self.uiauto_file, self.device_uiauto_file)
+ self.device.killall('uiautomator')
+
+ def run(self, context):
+ result = self.device.execute(self.command, self.run_timeout)
+ if 'FAILURE' in result:
+ raise WorkloadError(result)
+ else:
+ self.logger.debug(result)
+ time.sleep(DELAY)
+
+ def update_result(self, context):
+ pass
+
+ def teardown(self, context):
+ self.device.delete_file(self.device_uiauto_file)
+
+ def validate(self):
+ if not self.uiauto_file:
+ raise WorkloadError('No UI automation JAR file found for workload {}.'.format(self.name))
+ if not self.uiauto_package:
+ raise WorkloadError('No UI automation package specified for workload {}.'.format(self.name))
+
+
+class ApkWorkload(Workload):
+ """
+ A workload based on an APK file.
+
+ Defines the following attributes:
+
+ :package: The package name of the app. This is usually a Java-style name of the form
+ ``com.companyname.appname``.
+ :activity: This is the initial activity of the app. This will be used to launch the
+ app during the setup.
+ :view: The class of the main view pane of the app. This needs to be defined in order
+ to collect SurfaceFlinger-derived statistics (such as FPS) for the app, but
+ may otherwise be left as ``None``.
+ :install_timeout: Timeout for the installation of the APK. This may vary wildly based on
+ the size and nature of a specific APK, and so should be defined on
+ per-workload basis.
+
+ .. note:: To a lesser extent, this will also vary based on the the
+ device and the nature of adb connection (USB vs Ethernet),
+ so, as with all timeouts, so leeway must be included in
+ the specified value.
+
+ .. note:: Both package and activity for a workload may be obtained from the APK using
+ the ``aapt`` tool that comes with the ADT (Android Developemnt Tools) bundle.
+
+ """
+ package = None
+ activity = None
+ view = None
+ install_timeout = None
+ default_install_timeout = 300
+
+ parameters = [
+ Parameter('uninstall_apk', kind=boolean, default=False,
+ description="If ``True``, will uninstall workload's APK as part of teardown."),
+ ]
+
+ def __init__(self, device, _call_super=True, **kwargs):
+ if _call_super:
+ super(ApkWorkload, self).__init__(device, **kwargs)
+ self.apk_file = None
+ self.apk_version = None
+ self.logcat_log = None
+ self.force_reinstall = kwargs.get('force_reinstall', False)
+ if not self.install_timeout:
+ self.install_timeout = self.default_install_timeout
+
+ def init_resources(self, context):
+ self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self), version=getattr(self, 'version', None))
+
+ def setup(self, context):
+ self.initialize_package(context)
+ self.start_activity()
+ self.device.execute('am kill-all') # kill all *background* activities
+ self.device.clear_logcat()
+
+ def initialize_package(self, context):
+ installed_version = self.device.get_installed_package_version(self.package)
+ host_version = ApkInfo(self.apk_file).version_name
+ if installed_version != host_version:
+ if installed_version:
+ message = '{} host version: {}, device version: {}; re-installing...'
+ self.logger.debug(message.format(os.path.basename(self.apk_file), host_version, installed_version))
+ else:
+ message = '{} host version: {}, not found on device; installing...'
+ self.logger.debug(message.format(os.path.basename(self.apk_file), host_version))
+ self.force_reinstall = True
+ else:
+ message = '{} version {} found on both device and host.'
+ self.logger.debug(message.format(os.path.basename(self.apk_file), host_version))
+ if self.force_reinstall:
+ if installed_version:
+ self.device.uninstall(self.package)
+ self.install_apk(context)
+ else:
+ self.reset(context)
+ self.apk_version = host_version
+
+ def start_activity(self):
+ output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity))
+ if 'Error:' in output:
+ self.device.execute('am force-stop {}'.format(self.package)) # this will dismiss any erro dialogs
+ raise WorkloadError(output)
+ self.logger.debug(output)
+
+ def reset(self, context): # pylint: disable=W0613
+ self.device.execute('am force-stop {}'.format(self.package))
+ self.device.execute('pm clear {}'.format(self.package))
+
+ def install_apk(self, context):
+ output = self.device.install(self.apk_file, self.install_timeout)
+ if 'Failure' in output:
+ if 'ALREADY_EXISTS' in output:
+ self.logger.warn('Using already installed APK (did not unistall properly?)')
+ else:
+ raise WorkloadError(output)
+ else:
+ self.logger.debug(output)
+ self.do_post_install(context)
+
+ def do_post_install(self, context):
+ """ May be overwritten by dervied classes."""
+ pass
+
+ def run(self, context):
+ pass
+
+ def update_result(self, context):
+ self.logcat_log = os.path.join(context.output_directory, 'logcat.log')
+ self.device.dump_logcat(self.logcat_log)
+ context.add_iteration_artifact(name='logcat',
+ path='logcat.log',
+ kind='log',
+ description='Logact dump for the run.')
+
+ def teardown(self, context):
+ self.device.execute('am force-stop {}'.format(self.package))
+ if self.uninstall_apk:
+ self.device.uninstall(self.package)
+
+ def validate(self):
+ if not self.apk_file:
+ raise WorkloadError('No APK file found for workload {}.'.format(self.name))
+
+
+AndroidBenchmark = ApkWorkload # backward compatibility
+
+
+class ReventWorkload(Workload):
+
+ default_setup_timeout = 5 * 60 # in seconds
+ default_run_timeout = 10 * 60 # in seconds
+
+ def __init__(self, device, _call_super=True, **kwargs):
+ if _call_super:
+ super(ReventWorkload, self).__init__(device, **kwargs)
+ devpath = self.device.path
+ self.on_device_revent_binary = devpath.join(self.device.working_directory, 'revent')
+ self.on_device_setup_revent = devpath.join(self.device.working_directory, '{}.setup.revent'.format(self.device.name))
+ self.on_device_run_revent = devpath.join(self.device.working_directory, '{}.run.revent'.format(self.device.name))
+ self.setup_timeout = kwargs.get('setup_timeout', self.default_setup_timeout)
+ self.run_timeout = kwargs.get('run_timeout', self.default_run_timeout)
+ self.revent_setup_file = None
+ self.revent_run_file = None
+
+ def init_resources(self, context):
+ self.revent_setup_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'setup'))
+ self.revent_run_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'run'))
+
+ def setup(self, context):
+ self._check_revent_files(context)
+ self.device.killall('revent')
+ command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_setup_revent)
+ self.device.execute(command, timeout=self.setup_timeout)
+
+ def run(self, context):
+ command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_run_revent)
+ self.logger.debug('Replaying {}'.format(os.path.basename(self.on_device_run_revent)))
+ self.device.execute(command, timeout=self.run_timeout)
+ self.logger.debug('Replay completed.')
+
+ def update_result(self, context):
+ pass
+
+ def teardown(self, context):
+ self.device.delete_file(self.on_device_setup_revent)
+ self.device.delete_file(self.on_device_run_revent)
+
+ def _check_revent_files(self, context):
+ # check the revent binary
+ revent_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
+ if not os.path.isfile(revent_binary):
+ message = '{} does not exist. '.format(revent_binary)
+ message += 'Please build revent for your system and place it in that location'
+ raise WorkloadError(message)
+ if not self.revent_setup_file:
+ # pylint: disable=too-few-format-args
+ message = '{0}.setup.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name)
+ raise WorkloadError(message)
+ if not self.revent_run_file:
+ # pylint: disable=too-few-format-args
+ message = '{0}.run.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name)
+ raise WorkloadError(message)
+
+ self.on_device_revent_binary = self.device.install_executable(revent_binary)
+ self.device.push_file(self.revent_run_file, self.on_device_run_revent)
+ self.device.push_file(self.revent_setup_file, self.on_device_setup_revent)
+
+
+class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
+
+ def __init__(self, device, **kwargs):
+ UiAutomatorWorkload.__init__(self, device, **kwargs)
+ AndroidBenchmark.__init__(self, device, _call_super=False, **kwargs)
+
+ def init_resources(self, context):
+ UiAutomatorWorkload.init_resources(self, context)
+ AndroidBenchmark.init_resources(self, context)
+
+ def setup(self, context):
+ UiAutomatorWorkload.setup(self, context)
+ AndroidBenchmark.setup(self, context)
+
+ def update_result(self, context):
+ UiAutomatorWorkload.update_result(self, context)
+ AndroidBenchmark.update_result(self, context)
+
+ def teardown(self, context):
+ UiAutomatorWorkload.teardown(self, context)
+ AndroidBenchmark.teardown(self, context)
+
+
+class GameWorkload(ApkWorkload, ReventWorkload):
+ """
+ GameWorkload is the base class for all the workload that use revent files to
+ run.
+
+ For more in depth details on how to record revent files, please see
+ :ref:`revent_files_creation`. To subclass this class, please refer to
+ :ref:`GameWorkload`.
+
+ Additionally, this class defines the following attributes:
+
+ :asset_file: A tarball containing additional assets for the workload. These are the assets
+ that are not part of the APK but would need to be downloaded by the workload
+ (usually, on first run of the app). Since the presence of a network connection
+ cannot be assumed on some devices, this provides an alternative means of obtaining
+ the assets.
+ :saved_state_file: A tarball containing the saved state for a workload. This tarball gets
+ deployed in the same way as the asset file. The only difference being that
+ it is usually much slower and re-deploying the tarball should alone be
+ enough to reset the workload to a known state (without having to reinstall
+ the app or re-deploy the other assets).
+ :loading_time: Time it takes for the workload to load after the initial activity has been
+ started.
+
+ """
+
+ # May be optionally overwritten by subclasses
+ asset_file = None
+ saved_state_file = None
+ view = 'SurfaceView'
+ install_timeout = 500
+ loading_time = 10
+
+ def __init__(self, device, **kwargs): # pylint: disable=W0613
+ ApkWorkload.__init__(self, device, **kwargs)
+ ReventWorkload.__init__(self, device, _call_super=False, **kwargs)
+ self.logcat_process = None
+ self.module_dir = os.path.dirname(sys.modules[self.__module__].__file__)
+ self.revent_dir = os.path.join(self.module_dir, 'revent_files')
+
+ def init_resources(self, context):
+ ApkWorkload.init_resources(self, context)
+ ReventWorkload.init_resources(self, context)
+
+ def setup(self, context):
+ ApkWorkload.setup(self, context)
+ self.logger.debug('Waiting for the game to load...')
+ time.sleep(self.loading_time)
+ ReventWorkload.setup(self, context)
+
+ def do_post_install(self, context):
+ ApkWorkload.do_post_install(self, context)
+ self._deploy_assets(context)
+
+ def reset(self, context):
+ # If saved state exists, restore it; if not, do full
+ # uninstall/install cycle.
+ if self.saved_state_file:
+ self._deploy_resource_tarball(context, self.saved_state_file)
+ else:
+ ApkWorkload.reset(self, context)
+ self._deploy_assets(context)
+
+ def run(self, context):
+ ReventWorkload.run(self, context)
+
+ def teardown(self, context):
+ if not self.saved_state_file:
+ ApkWorkload.teardown(self, context)
+ else:
+ self.device.execute('am force-stop {}'.format(self.package))
+ ReventWorkload.teardown(self, context)
+
+ def _deploy_assets(self, context, timeout=300):
+ if self.asset_file:
+ self._deploy_resource_tarball(context, self.asset_file, timeout)
+ if self.saved_state_file: # must be deployed *after* asset tarball!
+ self._deploy_resource_tarball(context, self.saved_state_file, timeout)
+
+ def _deploy_resource_tarball(self, context, resource_file, timeout=300):
+ kind = 'data'
+ if ':' in resource_file:
+ kind, resource_file = resource_file.split(':', 1)
+ ondevice_cache = self.device.path.join(self.device.resource_cache, self.name, resource_file)
+ if not self.device.file_exists(ondevice_cache):
+ asset_tarball = context.resolver.get(ExtensionAsset(self, resource_file))
+ if not asset_tarball:
+ message = 'Could not find resource {} for workload {}.'
+ raise WorkloadError(message.format(resource_file, self.name))
+ # adb push will create intermediate directories if they don't
+ # exist.
+ self.device.push_file(asset_tarball, ondevice_cache)
+
+ device_asset_directory = self.device.path.join(self.device.external_storage_directory, 'Android', kind)
+ deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory,
+ self.device.busybox,
+ ondevice_cache)
+ self.device.execute(deploy_command, timeout=timeout, as_root=True)
diff --git a/wlauto/common/bin/arm64/busybox b/wlauto/common/bin/arm64/busybox
new file mode 100755
index 00000000..6d09a079
--- /dev/null
+++ b/wlauto/common/bin/arm64/busybox
Binary files differ
diff --git a/wlauto/common/bin/arm64/revent b/wlauto/common/bin/arm64/revent
new file mode 100755
index 00000000..4d7ee72f
--- /dev/null
+++ b/wlauto/common/bin/arm64/revent
Binary files differ
diff --git a/wlauto/common/bin/armeabi/busybox b/wlauto/common/bin/armeabi/busybox
new file mode 100755
index 00000000..1714d40a
--- /dev/null
+++ b/wlauto/common/bin/armeabi/busybox
Binary files differ
diff --git a/wlauto/common/bin/armeabi/revent b/wlauto/common/bin/armeabi/revent
new file mode 100755
index 00000000..e0fa4d23
--- /dev/null
+++ b/wlauto/common/bin/armeabi/revent
Binary files differ
diff --git a/wlauto/common/linux/__init__.py b/wlauto/common/linux/__init__.py
new file mode 100644
index 00000000..16224d6f
--- /dev/null
+++ b/wlauto/common/linux/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/common/linux/device.py b/wlauto/common/linux/device.py
new file mode 100644
index 00000000..ecac286a
--- /dev/null
+++ b/wlauto/common/linux/device.py
@@ -0,0 +1,966 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+import os
+import re
+from collections import namedtuple
+from subprocess import CalledProcessError
+
+from wlauto.core.extension import Parameter
+from wlauto.core.device import Device, RuntimeParameter, CoreParameter
+from wlauto.core.resource import NO_ONE
+from wlauto.exceptions import ConfigError, DeviceError, TimeoutError, DeviceNotRespondingError
+from wlauto.common.resources import Executable
+from wlauto.utils.cpuinfo import Cpuinfo
+from wlauto.utils.misc import convert_new_lines, escape_double_quotes
+from wlauto.utils.ssh import SshShell
+from wlauto.utils.types import boolean, list_of_strings
+
+
+# a dict of governor name and a list of it tunables that can't be read
+WRITE_ONLY_TUNABLES = {
+ 'interactive': ['boostpulse']
+}
+
+FstabEntry = namedtuple('FstabEntry', ['device', 'mount_point', 'fs_type', 'options', 'dump_freq', 'pass_num'])
+PsEntry = namedtuple('PsEntry', 'user pid ppid vsize rss wchan pc state name')
+
+
+class BaseLinuxDevice(Device): # pylint: disable=abstract-method
+
+ path_module = 'posixpath'
+ has_gpu = True
+
+ parameters = [
+ Parameter('scheduler', kind=str, default='unknown',
+ allowed_values=['unknown', 'smp', 'hmp', 'iks', 'ea', 'other'],
+ description="""
+ Specifies the type of multi-core scheduling model utilized in the device. The value
+ must be one of the following:
+
+ :unknown: A generic Device interface is used to interact with the underlying device
+ and the underlying scheduling model is unkown.
+ :smp: A standard single-core or Symmetric Multi-Processing system.
+ :hmp: ARM Heterogeneous Multi-Processing system.
+ :iks: Linaro In-Kernel Switcher.
+ :ea: ARM Energy-Aware scheduler.
+ :other: Any other system not covered by the above.
+
+ .. note:: most currently-available systems would fall under ``smp`` rather than
+ this value. ``other`` is there to future-proof against new schemes
+ not yet covered by WA.
+
+ """),
+ Parameter('iks_switch_frequency', kind=int, default=None,
+ description="""
+ This is the switching frequency, in kilohertz, of IKS devices. This parameter *MUST NOT*
+ be set for non-IKS device (i.e. ``scheduler != 'iks'``). If left unset for IKS devices,
+ it will default to ``800000``, i.e. 800MHz.
+ """),
+
+ ]
+
+ runtime_parameters = [
+ RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'),
+ CoreParameter('${core}_cores', 'get_number_of_active_cores', 'set_number_of_active_cores',
+ value_name='number'),
+ CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency',
+ value_name='freq'),
+ CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency',
+ value_name='freq'),
+ CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor',
+ value_name='governor'),
+ CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables',
+ value_name='tunables'),
+ ]
+
+ @property
+ def active_cpus(self):
+ val = self.get_sysfile_value('/sys/devices/system/cpu/online')
+ cpus = re.findall(r"([\d]\-[\d]|[\d])", val)
+ active_cpus = []
+ for cpu in cpus:
+ if '-' in cpu:
+ lo, hi = cpu.split('-')
+ active_cpus.extend(range(int(lo), int(hi) + 1))
+ else:
+ active_cpus.append(int(cpu))
+ return active_cpus
+
+ @property
+ def number_of_cores(self):
+ """
+ Added in version 2.1.4.
+
+ """
+ if self._number_of_cores is None:
+ corere = re.compile('^\s*cpu\d+\s*$')
+ output = self.execute('ls /sys/devices/system/cpu')
+ self._number_of_cores = 0
+ for entry in output.split():
+ if corere.match(entry):
+ self._number_of_cores += 1
+ return self._number_of_cores
+
+ @property
+ def resource_cache(self):
+ return self.path.join(self.working_directory, '.cache')
+
+ @property
+ def file_transfer_cache(self):
+ return self.path.join(self.working_directory, '.transfer')
+
+ @property
+ def cpuinfo(self):
+ if not self._cpuinfo:
+ self._cpuinfo = Cpuinfo(self.execute('cat /proc/cpuinfo'))
+ return self._cpuinfo
+
+ def __init__(self, **kwargs):
+ super(BaseLinuxDevice, self).__init__(**kwargs)
+ self.busybox = None
+ self._is_initialized = False
+ self._is_ready = False
+ self._just_rebooted = False
+ self._is_rooted = None
+ self._available_frequencies = {}
+ self._available_governors = {}
+ self._available_governor_tunables = {}
+ self._number_of_cores = None
+ self._written_sysfiles = []
+ self._cpuinfo = None
+
+ def validate(self):
+ if len(self.core_names) != len(self.core_clusters):
+ raise ConfigError('core_names and core_clusters are of different lengths.')
+ if self.iks_switch_frequency is not None and self.scheduler != 'iks': # pylint: disable=E0203
+ raise ConfigError('iks_switch_frequency must NOT be set for non-IKS devices.')
+ if self.iks_switch_frequency is None and self.scheduler == 'iks': # pylint: disable=E0203
+ self.iks_switch_frequency = 800000 # pylint: disable=W0201
+
+ def initialize(self, context, *args, **kwargs):
+ self.execute('mkdir -p {}'.format(self.working_directory))
+ if self.is_rooted:
+ if not self.is_installed('busybox'):
+ self.busybox = self.deploy_busybox(context)
+ else:
+ self.busybox = 'busybox'
+ self.init(context, *args, **kwargs)
+
+ def get_sysfile_value(self, sysfile, kind=None):
+ """
+ Get the contents of the specified sysfile.
+
+ :param sysfile: The file who's contents will be returned.
+
+ :param kind: The type of value to be expected in the sysfile. This can
+ be any Python callable that takes a single str argument.
+ If not specified or is None, the contents will be returned
+ as a string.
+
+ """
+ output = self.execute('cat \'{}\''.format(sysfile), as_root=True).strip() # pylint: disable=E1103
+ if kind:
+ return kind(output)
+ else:
+ return output
+
+ def set_sysfile_value(self, sysfile, value, verify=True):
+ """
+ Set the value of the specified sysfile. By default, the value will be checked afterwards.
+ Can be overridden by setting ``verify`` parameter to ``False``.
+
+ """
+ value = str(value)
+ self.execute('echo {} > \'{}\''.format(value, sysfile), check_exit_code=False, as_root=True)
+ if verify:
+ output = self.get_sysfile_value(sysfile)
+ if not output.strip() == value: # pylint: disable=E1103
+ message = 'Could not set the value of {} to {}'.format(sysfile, value)
+ raise DeviceError(message)
+ self._written_sysfiles.append(sysfile)
+
+ def get_sysfile_values(self):
+ """
+ Returns a dict mapping paths of sysfiles that were previously set to their
+ current values.
+
+ """
+ values = {}
+ for sysfile in self._written_sysfiles:
+ values[sysfile] = self.get_sysfile_value(sysfile)
+ return values
+
+ def set_sysfile_values(self, params):
+ """
+ The plural version of ``set_sysfile_value``. Takes a single parameter which is a mapping of
+ file paths to values to be set. By default, every value written will be verified. The can
+ be disabled for individual paths by appending ``'!'`` to them.
+
+ """
+ for sysfile, value in params.iteritems():
+ verify = not sysfile.endswith('!')
+ sysfile = sysfile.rstrip('!')
+ self.set_sysfile_value(sysfile, value, verify=verify)
+
+ def deploy_busybox(self, context, force=False):
+ """
+ Deploys the busybox Android binary (hence in android module) to the
+ specified device, and returns the path to the binary on the device.
+
+ :param device: device to deploy the binary to.
+ :param context: an instance of ExecutionContext
+ :param force: by default, if the binary is already present on the
+ device, it will not be deployed again. Setting force
+ to ``True`` overrides that behavior and ensures that the
+ binary is always copied. Defaults to ``False``.
+
+ :returns: The on-device path to the busybox binary.
+
+ """
+ on_device_executable = self.path.join(self.binaries_directory, 'busybox')
+ if not force and self.file_exists(on_device_executable):
+ return on_device_executable
+ host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'busybox'))
+ return self.install(host_file)
+
+ def list_file_systems(self):
+ output = self.execute('mount')
+ fstab = []
+ for line in output.split('\n'):
+ fstab.append(FstabEntry(*line.split()))
+ return fstab
+
+ # Process query and control
+
+ def get_pids_of(self, process_name):
+ """Returns a list of PIDs of all processes with the specified name."""
+ result = self.execute('ps {}'.format(process_name[-15:]), check_exit_code=False).strip()
+ if result and 'not found' not in result:
+ return [int(x.split()[1]) for x in result.split('\n')[1:]]
+ else:
+ return []
+
+ def ps(self, **kwargs):
+ """
+ Returns the list of running processes on the device. Keyword arguments may
+ be used to specify simple filters for columns.
+
+ Added in version 2.1.4
+
+ """
+ lines = iter(convert_new_lines(self.execute('ps')).split('\n'))
+ lines.next() # header
+ result = []
+ for line in lines:
+ parts = line.split()
+ if parts:
+ result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
+ if not kwargs:
+ return result
+ else:
+ filtered_result = []
+ for entry in result:
+ if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
+ filtered_result.append(entry)
+ return filtered_result
+
+ def kill(self, pid, signal=None, as_root=False): # pylint: disable=W0221
+ """
+ Kill the specified process.
+
+ :param pid: PID of the process to kill.
+ :param signal: Specify which singal to send to the process. This must
+ be a valid value for -s option of kill. Defaults to ``None``.
+
+ Modified in version 2.1.4: added ``signal`` parameter.
+
+ """
+ signal_string = '-s {}'.format(signal) if signal else ''
+ self.execute('kill {} {}'.format(signal_string, pid), as_root=as_root)
+
+ def killall(self, process_name, signal=None, as_root=False): # pylint: disable=W0221
+ """
+ Kill all processes with the specified name.
+
+ :param process_name: The name of the process(es) to kill.
+ :param signal: Specify which singal to send to the process. This must
+ be a valid value for -s option of kill. Defaults to ``None``.
+
+ Modified in version 2.1.5: added ``as_root`` parameter.
+
+ """
+ for pid in self.get_pids_of(process_name):
+ self.kill(pid, signal=signal, as_root=as_root)
+
+ # cpufreq
+
+ def list_available_cpu_governors(self, cpu):
+ """Returns a list of governors supported by the cpu."""
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ if cpu not in self._available_governors:
+ cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu)
+ output = self.execute(cmd, check_exit_code=True)
+ self._available_governors[cpu] = output.strip().split() # pylint: disable=E1103
+ return self._available_governors[cpu]
+
+ def get_cpu_governor(self, cpu):
+ """Returns the governor currently set for the specified CPU."""
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
+ return self.get_sysfile_value(sysfile)
+
+ def set_cpu_governor(self, cpu, governor, **kwargs):
+ """
+ Set the governor for the specified CPU.
+ See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+ :param cpu: The CPU for which the governor is to be set. This must be
+ the full name as it appears in sysfs, e.g. "cpu0".
+ :param governor: The name of the governor to be used. This must be
+ supported by the specific device.
+
+ Additional keyword arguments can be used to specify governor tunables for
+ governors that support them.
+
+ :note: On big.LITTLE all cores in a cluster must be using the same governor.
+ Setting the governor on any core in a cluster will also set it on all
+ other cores in that cluster.
+
+ :raises: ConfigError if governor is not supported by the CPU.
+ :raises: DeviceError if, for some reason, the governor could not be set.
+
+ """
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ supported = self.list_available_cpu_governors(cpu)
+ if governor not in supported:
+ raise ConfigError('Governor {} not supported for cpu {}'.format(governor, cpu))
+ sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
+ self.set_sysfile_value(sysfile, governor)
+ self.set_cpu_governor_tunables(cpu, governor, **kwargs)
+
+ def list_available_cpu_governor_tunables(self, cpu):
+ """Returns a list of tunables available for the governor on the specified CPU."""
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ governor = self.get_cpu_governor(cpu)
+ if governor not in self._available_governor_tunables:
+ try:
+ tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
+ self._available_governor_tunables[governor] = self.listdir(tunables_path)
+ except DeviceError: # probably an older kernel
+ try:
+ tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
+ self._available_governor_tunables[governor] = self.listdir(tunables_path)
+ except DeviceError: # governor does not support tunables
+ self._available_governor_tunables[governor] = []
+ return self._available_governor_tunables[governor]
+
+ def get_cpu_governor_tunables(self, cpu):
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ governor = self.get_cpu_governor(cpu)
+ tunables = {}
+ for tunable in self.list_available_cpu_governor_tunables(cpu):
+ if tunable not in WRITE_ONLY_TUNABLES.get(governor, []):
+ try:
+ path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
+ tunables[tunable] = self.get_sysfile_value(path)
+ except DeviceError: # May be an older kernel
+ path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
+ tunables[tunable] = self.get_sysfile_value(path)
+ return tunables
+
+ def set_cpu_governor_tunables(self, cpu, governor, **kwargs):
+ """
+ Set tunables for the specified governor. Tunables should be specified as
+ keyword arguments. Which tunables and values are valid depends on the
+ governor.
+
+ :param cpu: The cpu for which the governor will be set. This must be the
+ full cpu name as it appears in sysfs, e.g. ``cpu0``.
+ :param governor: The name of the governor. Must be all lower case.
+
+ The rest should be keyword parameters mapping tunable name onto the value to
+ be set for it.
+
+ :raises: ConfigError if governor specified is not a valid governor name, or if
+ a tunable specified is not valid for the governor.
+ :raises: DeviceError if could not set tunable.
+
+
+ """
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ valid_tunables = self.list_available_cpu_governor_tunables(cpu)
+ for tunable, value in kwargs.iteritems():
+ if tunable in valid_tunables:
+ try:
+ path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
+ self.set_sysfile_value(path, value)
+ except DeviceError: # May be an older kernel
+ path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
+ self.set_sysfile_value(path, value)
+ else:
+ message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
+ message += 'Available tunables are: {}'.format(valid_tunables)
+ raise ConfigError(message)
+
+ def enable_cpu(self, cpu):
+ """
+ Enable the specified core.
+
+ :param cpu: CPU core to enable. This must be the full name as it
+ appears in sysfs, e.g. "cpu0".
+
+ """
+ self.hotplug_cpu(cpu, online=True)
+
+ def disable_cpu(self, cpu):
+ """
+ Disable the specified core.
+
+ :param cpu: CPU core to disable. This must be the full name as it
+ appears in sysfs, e.g. "cpu0".
+ """
+ self.hotplug_cpu(cpu, online=False)
+
+ def hotplug_cpu(self, cpu, online):
+ """
+ Hotplug the specified CPU either on or off.
+ See https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
+
+ :param cpu: The CPU for which the governor is to be set. This must be
+ the full name as it appears in sysfs, e.g. "cpu0".
+ :param online: CPU will be enabled if this value bool()'s to True, and
+ will be disabled otherwise.
+
+ """
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ status = 1 if online else 0
+ sysfile = '/sys/devices/system/cpu/{}/online'.format(cpu)
+ self.set_sysfile_value(sysfile, status)
+
+ def list_available_cpu_frequencies(self, cpu):
+ """Returns a list of frequencies supported by the cpu or an empty list
+ if not could be found."""
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ if cpu not in self._available_frequencies:
+ try:
+ cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
+ output = self.execute(cmd)
+ self._available_frequencies[cpu] = map(int, output.strip().split()) # pylint: disable=E1103
+ except DeviceError:
+ # we return an empty list because on some devices scaling_available_frequencies
+ # is not generated. So we are returing an empty list as an indication
+ # http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
+ self._available_frequencies[cpu] = []
+ return self._available_frequencies[cpu]
+
+ def get_cpu_min_frequency(self, cpu):
+ """
+ Returns the min frequency currently set for the specified CPU.
+
+ Warning, this method does not check if the cpu is online or not. It will
+ try to read the minimum frequency and the following exception will be
+ raised ::
+
+ :raises: DeviceError if for some reason the frequency could not be read.
+
+ """
+ sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
+ return self.get_sysfile_value(sysfile)
+
+ def set_cpu_min_frequency(self, cpu, frequency):
+ """
+ Set's the minimum value for CPU frequency. Actual frequency will
+ depend on the Governor used and may vary during execution. The value should be
+ either an int or a string representing an integer. The Value must also be
+ supported by the device. The available frequencies can be obtained by calling
+ get_available_frequencies() or examining
+
+ /sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
+
+ on the device.
+
+ :raises: ConfigError if the frequency is not supported by the CPU.
+ :raises: DeviceError if, for some reason, frequency could not be set.
+
+ """
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ available_frequencies = self.list_available_cpu_frequencies(cpu)
+ try:
+ value = int(frequency)
+ if available_frequencies and value not in available_frequencies:
+ raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
+ value,
+ available_frequencies))
+ sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
+ self.set_sysfile_value(sysfile, value)
+ except ValueError:
+ raise ValueError('value must be an integer; got: "{}"'.format(value))
+
+ def get_cpu_max_frequency(self, cpu):
+ """
+ Returns the max frequency currently set for the specified CPU.
+
+ Warning, this method does not check if the cpu is online or not. It will
+ try to read the maximum frequency and the following exception will be
+ raised ::
+
+ :raises: DeviceError if for some reason the frequency could not be read.
+ """
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
+ return self.get_sysfile_value(sysfile)
+
+ def set_cpu_max_frequency(self, cpu, frequency):
+ """
+ Set's the minimum value for CPU frequency. Actual frequency will
+ depend on the Governor used and may vary during execution. The value should be
+ either an int or a string representing an integer. The Value must also be
+ supported by the device. The available frequencies can be obtained by calling
+ get_available_frequencies() or examining
+
+ /sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
+
+ on the device.
+
+ :raises: ConfigError if the frequency is not supported by the CPU.
+ :raises: DeviceError if, for some reason, frequency could not be set.
+
+ """
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ available_frequencies = self.list_available_cpu_frequencies(cpu)
+ try:
+ value = int(frequency)
+ if available_frequencies and value not in available_frequencies:
+ raise DeviceError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
+ value,
+ available_frequencies))
+ sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
+ self.set_sysfile_value(sysfile, value)
+ except ValueError:
+ raise ValueError('value must be an integer; got: "{}"'.format(value))
+
+ def get_cpuidle_states(self, cpu=0):
+ """
+ Return map of cpuidle states with their descriptive names.
+ """
+ if isinstance(cpu, int):
+ cpu = 'cpu{}'.format(cpu)
+ cpuidle_states = {}
+ statere = re.compile('^\s*state\d+\s*$')
+ output = self.execute("ls /sys/devices/system/cpu/{}/cpuidle".format(cpu))
+ for entry in output.split():
+ if statere.match(entry):
+ cpuidle_states[entry] = self.get_sysfile_value("/sys/devices/system/cpu/{}/cpuidle/{}/desc".format(cpu, entry))
+ return cpuidle_states
+
+ # Core- and cluster-level mapping for the above cpu-level APIs above. The
+ # APIs make the following assumptions, which were True for all devices that
+ # existed at the time of writing:
+ # 1. A cluster can only contain cores of one type.
+ # 2. All cores in a cluster are tied to the same DVFS domain, therefore
+ # changes to cpufreq for a core will affect all other cores on the
+ # same cluster.
+
+ def get_core_clusters(self, core, strict=True):
+ """Returns the list of clusters that contain the specified core. if ``strict``
+ is ``True``, raises ValueError if no clusters has been found (returns empty list
+ if ``strict`` is ``False``)."""
+ core_indexes = [i for i, c in enumerate(self.core_names) if c == core]
+ clusters = sorted(list(set(self.core_clusters[i] for i in core_indexes)))
+ if strict and not clusters:
+ raise ValueError('No active clusters for core {}'.format(core))
+ return clusters
+
+ def get_cluster_cpu(self, cluster):
+ """Returns the first *active* cpu for the cluster. If the entire cluster
+ has been hotplugged, this will raise a ``ValueError``."""
+ cpu_indexes = set([i for i, c in enumerate(self.core_clusters) if c == cluster])
+ active_cpus = sorted(list(cpu_indexes.intersection(self.active_cpus)))
+ if not active_cpus:
+ raise ValueError('All cpus for cluster {} are offline'.format(cluster))
+ return active_cpus[0]
+
+ def list_available_cluster_governors(self, cluster):
+ return self.list_available_cpu_governors(self.get_cluster_cpu(cluster))
+
+ def get_cluster_governor(self, cluster):
+ return self.get_cpu_governor(self.get_cluster_cpu(cluster))
+
+ def set_cluster_governor(self, cluster, governor, **tunables):
+ return self.set_cpu_governor(self.get_cluster_cpu(cluster), governor, **tunables)
+
+ def list_available_cluster_governor_tunables(self, cluster):
+ return self.list_available_cpu_governor_tunables(self.get_cluster_cpu(cluster))
+
+ def get_cluster_governor_tunables(self, cluster):
+ return self.get_cpu_governor_tunables(self.get_cluster_cpu(cluster))
+
+ def set_cluster_governor_tunables(self, cluster, governor, **tunables):
+ return self.set_cpu_governor_tunables(self.get_cluster_cpu(cluster), governor, **tunables)
+
+ def get_cluster_min_frequency(self, cluster):
+ return self.get_cpu_min_frequency(self.get_cluster_cpu(cluster))
+
+ def set_cluster_min_frequency(self, cluster, freq):
+ return self.set_cpu_min_frequency(self.get_cluster_cpu(cluster), freq)
+
+ def get_cluster_max_frequency(self, cluster):
+ return self.get_cpu_max_frequency(self.get_cluster_cpu(cluster))
+
+ def set_cluster_max_frequency(self, cluster, freq):
+ return self.set_cpu_max_frequency(self.get_cluster_cpu(cluster), freq)
+
+ def get_core_cpu(self, core):
+ for cluster in self.get_core_clusters(core):
+ try:
+ return self.get_cluster_cpu(cluster)
+ except ValueError:
+ pass
+ raise ValueError('No active CPUs found for core {}'.format(core))
+
+ def list_available_core_governors(self, core):
+ return self.list_available_cpu_governors(self.get_core_cpu(core))
+
+ def get_core_governor(self, core):
+ return self.get_cpu_governor(self.get_core_cpu(core))
+
+ def set_core_governor(self, core, governor, **tunables):
+ for cluster in self.get_core_clusters(core):
+ self.set_cluster_governor(cluster, governor, **tunables)
+
+ def list_available_core_governor_tunables(self, core):
+ return self.list_available_cpu_governor_tunables(self.get_core_cpu(core))
+
+ def get_core_governor_tunables(self, core):
+ return self.get_cpu_governor_tunables(self.get_core_cpu(core))
+
+ def set_core_governor_tunables(self, core, tunables):
+ for cluster in self.get_core_clusters(core):
+ governor = self.get_cluster_governor(cluster)
+ self.set_cluster_governor_tunables(cluster, governor, **tunables)
+
+ def get_core_min_frequency(self, core):
+ return self.get_cpu_min_frequency(self.get_core_cpu(core))
+
+ def set_core_min_frequency(self, core, freq):
+ for cluster in self.get_core_clusters(core):
+ self.set_cluster_min_frequency(cluster, freq)
+
+ def get_core_max_frequency(self, core):
+ return self.get_cpu_max_frequency(self.get_core_cpu(core))
+
+ def set_core_max_frequency(self, core, freq):
+ for cluster in self.get_core_clusters(core):
+ self.set_cluster_max_frequency(cluster, freq)
+
+ def get_number_of_active_cores(self, core):
+ if core not in self.core_names:
+ raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
+ active_cpus = self.active_cpus
+ num_active_cores = 0
+ for i, c in enumerate(self.core_names):
+ if c == core and i in active_cpus:
+ num_active_cores += 1
+ return num_active_cores
+
+ def set_number_of_active_cores(self, core, number):
+ if core not in self.core_names:
+ raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
+ core_ids = [i for i, c in enumerate(self.core_names) if c == core]
+ max_cores = len(core_ids)
+ if number > max_cores:
+ message = 'Attempting to set the number of active {} to {}; maximum is {}'
+ raise ValueError(message.format(core, number, max_cores))
+ for i in xrange(0, number):
+ self.enable_cpu(core_ids[i])
+ for i in xrange(number, max_cores):
+ self.disable_cpu(core_ids[i])
+
+ # internal methods
+
+ def _check_ready(self):
+ if not self._is_ready:
+ raise AttributeError('Device not ready.')
+
+ def _get_core_cluster(self, core):
+ """Returns the first cluster that has cores of the specified type. Raises
+ value error if no cluster for the specified type has been found"""
+ core_indexes = [i for i, c in enumerate(self.core_names) if c == core]
+ core_clusters = set(self.core_clusters[i] for i in core_indexes)
+ if not core_clusters:
+ raise ValueError('No cluster found for core {}'.format(core))
+ return sorted(list(core_clusters))[0]
+
+
+class LinuxDevice(BaseLinuxDevice):
+
+ platform = 'linux'
+
+ default_timeout = 30
+ delay = 2
+ long_delay = 3 * delay
+ ready_timeout = 60
+
+ parameters = [
+ Parameter('host', mandatory=True, description='Host name or IP address for the device.'),
+ Parameter('username', mandatory=True, description='User name for the account on the device.'),
+ Parameter('password', description='Password for the account on the device (for password-based auth).'),
+ Parameter('keyfile', description='Keyfile to be used for key-based authentication.'),
+ Parameter('port', kind=int, description='SSH port number on the device.'),
+
+ Parameter('use_telnet', kind=boolean, default=False,
+ description='Optionally, telnet may be used instead of ssh, though this is discouraged.'),
+
+ Parameter('working_directory', default=None,
+ description='''
+ Working directory to be used by WA. This must be in a location where the specified user
+ has write permissions. This will default to /home/<username>/wa (or to /root/wa, if
+ username is 'root').
+ '''),
+ Parameter('binaries_directory', default='/usr/local/bin',
+ description='Location of executable binaries on this device (must be in PATH).'),
+ Parameter('property_files', kind=list_of_strings,
+ default=['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release'],
+ description='''
+ A list of paths to files containing static OS properties. These will be pulled into the
+ __meta directory in output for each run in order to provide information about the platfrom.
+ These paths do not have to exist and will be ignored if the path is not present on a
+ particular device.
+ '''),
+ ]
+
+ @property
+ def is_rooted(self):
+ if self._is_rooted is None:
+ try:
+ self.execute('ls /', as_root=True)
+ self._is_rooted = True
+ except DeviceError:
+ self._is_rooted = False
+ return self._is_rooted
+
+ def __init__(self, *args, **kwargs):
+ super(LinuxDevice, self).__init__(*args, **kwargs)
+ self.shell = None
+ self.local_binaries_directory = None
+ self._is_rooted = None
+
+ def validate(self):
+ if not self.password and not self.keyfile:
+ raise ConfigError('Either a password or a keyfile must be provided.')
+ if self.working_directory is None: # pylint: disable=access-member-before-definition
+ if self.username == 'root':
+ self.working_directory = '/root/wa' # pylint: disable=attribute-defined-outside-init
+ else:
+ self.working_directory = '/home/{}/wa'.format(self.username) # pylint: disable=attribute-defined-outside-init
+ self.local_binaries_directory = self.path.join(self.working_directory, 'bin')
+
+ def initialize(self, context, *args, **kwargs):
+ self.execute('mkdir -p {}'.format(self.local_binaries_directory))
+ self.execute('export PATH={}:$PATH'.format(self.local_binaries_directory))
+ super(LinuxDevice, self).initialize(context, *args, **kwargs)
+
+ # Power control
+
+ def reset(self):
+ self._is_ready = False
+ self.execute('reboot', as_root=True)
+
+ def hard_reset(self):
+ super(LinuxDevice, self).hard_reset()
+ self._is_ready = False
+
+ def boot(self, **kwargs):
+ self.reset()
+
+ def connect(self): # NOQA pylint: disable=R0912
+ self.shell = SshShell(timeout=self.default_timeout)
+ self.shell.login(self.host, self.username, self.password, self.keyfile, self.port, telnet=self.use_telnet)
+ self._is_ready = True
+
+ def disconnect(self): # NOQA pylint: disable=R0912
+ self.shell.logout()
+ self._is_ready = False
+
+ # Execution
+
+ def has_root(self):
+ try:
+ self.execute('ls /', as_root=True)
+ return True
+ except DeviceError as e:
+ if 'not in the sudoers file' not in e.message:
+ raise e
+ return False
+
+ def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
+ as_root=False, strip_colors=True, **kwargs):
+ """
+ Execute the specified command on the device using adb.
+
+ Parameters:
+
+ :param command: The command to be executed. It should appear exactly
+ as if you were typing it into a shell.
+ :param timeout: Time, in seconds, to wait for adb to return before aborting
+ and raising an error. Defaults to ``AndroidDevice.default_timeout``.
+ :param check_exit_code: If ``True``, the return code of the command on the Device will
+ be check and exception will be raised if it is not 0.
+ Defaults to ``True``.
+ :param background: If ``True``, will execute create a new ssh shell rather than using
+ the default session and will return it immediately. If this is ``True``,
+ ``timeout``, ``strip_colors`` and (obvisously) ``check_exit_code`` will
+ be ignored; also, with this, ``as_root=True`` is only valid if ``username``
+ for the device was set to ``root``.
+ :param as_root: If ``True``, will attempt to execute command in privileged mode. The device
+ must be rooted, otherwise an error will be raised. Defaults to ``False``.
+
+ Added in version 2.1.3
+
+ :returns: If ``background`` parameter is set to ``True``, the subprocess object will
+ be returned; otherwise, the contents of STDOUT from the device will be returned.
+
+ """
+ self._check_ready()
+ if background:
+ if as_root and self.username != 'root':
+ raise DeviceError('Cannot execute in background with as_root=True unless user is root.')
+ return self.shell.background(command)
+ else:
+ return self.shell.execute(command, timeout, check_exit_code, as_root, strip_colors)
+
+ def kick_off(self, command):
+ """
+ Like execute but closes adb session and returns immediately, leaving the command running on the
+ device (this is different from execute(background=True) which keeps adb connection open and returns
+ a subprocess object).
+
+ """
+ self._check_ready()
+ command = 'sh -c "{}" 1>/dev/null 2>/dev/null &'.format(escape_double_quotes(command))
+ return self.shell.execute(command)
+
+ # File management
+
+ def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
+ self._check_ready()
+ if not as_root or self.username == 'root':
+ self.shell.push_file(source, dest, timeout=timeout)
+ else:
+ tempfile = self.path.join(self.working_directory, self.path.basename(dest))
+ self.shell.push_file(source, tempfile, timeout=timeout)
+ self.shell.execute('cp -r {} {}'.format(tempfile, dest), timeout=timeout, as_root=True)
+
+ def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
+ self._check_ready()
+ if not as_root or self.username == 'root':
+ self.shell.pull_file(source, dest, timeout=timeout)
+ else:
+ tempfile = self.path.join(self.working_directory, self.path.basename(source))
+ self.shell.execute('cp -r {} {}'.format(source, tempfile), timeout=timeout, as_root=True)
+ self.shell.execute('chown -R {} {}'.format(self.username, tempfile), timeout=timeout, as_root=True)
+ self.shell.pull_file(tempfile, dest, timeout=timeout)
+
+ def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
+ self.execute('rm -rf {}'.format(filepath), as_root=as_root)
+
+ def file_exists(self, filepath):
+ output = self.execute('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
+ return boolean(output.strip()) # pylint: disable=maybe-no-member
+
+ def listdir(self, path, as_root=False, **kwargs):
+ contents = self.execute('ls -1 {}'.format(path), as_root=as_root)
+ return [x.strip() for x in contents.split('\n')] # pylint: disable=maybe-no-member
+
+ def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221
+ if self.is_rooted:
+ destpath = self.path.join(self.binaries_directory,
+ with_name and with_name or self.path.basename(filepath))
+ self.push_file(filepath, destpath, as_root=True)
+ self.execute('chmod a+x {}'.format(destpath), timeout=timeout, as_root=True)
+ else:
+ destpath = self.path.join(self.local_binaries_directory,
+ with_name and with_name or self.path.basename(filepath))
+ self.push_file(filepath, destpath)
+ self.execute('chmod a+x {}'.format(destpath), timeout=timeout)
+ return destpath
+
+ install_executable = install # compatibility
+
+ def uninstall(self, name):
+ path = self.path.join(self.local_binaries_directory, name)
+ self.delete_file(path)
+
+ uninstall_executable = uninstall # compatibility
+
+ def is_installed(self, name):
+ try:
+ self.execute('which {}'.format(name))
+ return True
+ except DeviceError:
+ return False
+
+ # misc
+
+ def ping(self):
+ try:
+ # May be triggered inside initialize()
+ self.shell.execute('ls /', timeout=5)
+ except (TimeoutError, CalledProcessError):
+ raise DeviceNotRespondingError(self.host)
+
+ def capture_screen(self, filepath):
+ if not self.is_installed('scrot'):
+ self.logger.debug('Could not take screenshot as scrot is not installed.')
+ return
+ try:
+ tempfile = self.path.join(self.working_directory, os.path.basename(filepath))
+ self.execute('DISPLAY=:0.0 scrot {}'.format(tempfile))
+ self.pull_file(tempfile, filepath)
+ self.delete_file(tempfile)
+ except DeviceError as e:
+ if "Can't open X dispay." not in e.message:
+ raise e
+ message = e.message.split('OUTPUT:', 1)[1].strip()
+ self.logger.debug('Could not take screenshot: {}'.format(message))
+
+ def is_screen_on(self):
+ pass # TODO
+
+ def ensure_screen_is_on(self):
+ pass # TODO
+
+ def get_properties(self, context):
+ for propfile in self.property_files:
+ if not self.file_exists(propfile):
+ continue
+ normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.')
+ outfile = os.path.join(context.host_working_directory, normname)
+ self.pull_file(propfile, outfile)
+ return {}
+
diff --git a/wlauto/common/resources.py b/wlauto/common/resources.py
new file mode 100644
index 00000000..bd841428
--- /dev/null
+++ b/wlauto/common/resources.py
@@ -0,0 +1,64 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+
+from wlauto.core.resource import Resource
+
+
+class FileResource(Resource):
+ """
+ Base class for all resources that are a regular file in the
+ file system.
+
+ """
+
+ def delete(self, instance):
+ os.remove(instance)
+
+
+class File(FileResource):
+
+ name = 'file'
+
+ def __init__(self, owner, path, url=None):
+ super(File, self).__init__(owner)
+ self.path = path
+ self.url = url
+
+ def __str__(self):
+ return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
+
+
+class ExtensionAsset(File):
+
+ name = 'extension_asset'
+
+ def __init__(self, owner, path):
+ super(ExtensionAsset, self).__init__(owner, os.path.join(owner.name, path))
+
+
+class Executable(FileResource):
+
+ name = 'executable'
+
+ def __init__(self, owner, platform, filename):
+ super(Executable, self).__init__(owner)
+ self.platform = platform
+ self.filename = filename
+
+ def __str__(self):
+ return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
diff --git a/wlauto/config_example.py b/wlauto/config_example.py
new file mode 100644
index 00000000..66eed1d8
--- /dev/null
+++ b/wlauto/config_example.py
@@ -0,0 +1,284 @@
+"""
+Default config for Workload Automation. DO NOT MODIFY this file. This file
+gets copied to ~/.workload_automation/config.py on initial run of run_workloads.
+Add your configuration to that file instead.
+
+"""
+# *** WARNING: ***
+# Configuration listed in this file is NOT COMPLETE. This file sets the default
+# configuration for WA and gives EXAMPLES of other configuration available. It
+# is not supposed to be an exhaustive list.
+# PLEASE REFER TO WA DOCUMENTATION FOR THE COMPLETE LIST OF AVAILABLE
+# EXTENSIONS AND THEIR CONFIGURATION.
+
+
+# This defines when the device will be rebooted during Workload Automation execution. #
+# #
+# Valid policies are: #
+# never: The device will never be rebooted. #
+# as_needed: The device will only be rebooted if the need arises (e.g. if it #
+# becomes unresponsive #
+# initial: The device will be rebooted when the execution first starts, just before executing #
+# the first workload spec. #
+# each_spec: The device will be rebooted before running a new workload spec. #
+# each_iteration: The device will be rebooted before each new iteration. #
+# #
+reboot_policy = 'as_needed'
+
+# Defines the order in which the agenda spec will be executed. At the moment, #
+# the following execution orders are supported: #
+# #
+# by_iteration: The first iteration of each workload spec is executed one ofter the other, #
+# so all workloads are executed before proceeding on to the second iteration. #
+# This is the default if no order is explicitly specified. #
+# If multiple sections were specified, this will also split them up, so that specs #
+# in the same section are further apart in the execution order. #
+# by_section: Same as "by_iteration", but runn specs from the same section one after the other #
+# by_spec: All iterations of the first spec are executed before moving on to the next #
+# spec. This may also be specified as ``"classic"``, as this was the way #
+# workloads were executed in earlier versions of WA. #
+# random: Randomisizes the order in which specs run. #
+execution_order = 'by_iteration'
+
+####################################################################################################
+######################################### Device Settings ##########################################
+####################################################################################################
+# Specify the device you want to run workload automation on. This must be a #
+# string with the ID of the device. At the moment, only 'TC2' is supported. #
+# #
+device = 'generic_android'
+
+# Configuration options that will be passed onto the device. These are obviously device-specific, #
+# so check the documentation for the particular device to find out which options and values are #
+# valid. The settings listed below are common to all devices #
+# #
+device_config = dict(
+ # The name used by adb to identify the device. Use "adb devices" in bash to list
+ # the devices currently seen by adb.
+ #adb_name='10.109.173.2:5555',
+
+ # The directory on the device that WA will use to push files to
+ #working_directory='/sdcard/wa-working',
+
+ # This specifies the device's CPU cores. The order must match how they
+ # appear in cpufreq. The example below is for TC2.
+ # core_names = ['a7', 'a7', 'a7', 'a15', 'a15']
+
+ # Specifies cluster mapping for the device's cores.
+ # core_clusters = [0, 0, 0, 1, 1]
+)
+
+
+####################################################################################################
+################################### Instrumention Configuration ####################################
+####################################################################################################
+# This defines the additionnal instrumentation that will be enabled during workload execution, #
+# which in turn determines what additional data (such as /proc/interrupts content or Streamline #
+# traces) will be available in the results directory. #
+# #
+instrumentation = [
+ # Records the time it took to run the workload
+ 'execution_time',
+
+ # Collects /proc/interrupts before and after execution and does a diff.
+ 'interrupts',
+
+ # Collects the contents of/sys/devices/system/cpu before and after execution and does a diff.
+ 'cpufreq',
+
+ # Gets energy usage from the workload form HWMON devices
+ # NOTE: the hardware needs to have the right sensors in order for this to work
+ #'hwmon',
+
+ # Run perf in the background during workload execution and then collect the results. perf is a
+ # standard Linux performance analysis tool.
+ #'perf',
+
+ # Collect Streamline traces during workload execution. Streamline is part of DS-5
+ #'streamline',
+
+ # Collects traces by interacting with Ftrace Linux kernel internal tracer
+ #'trace-cmd',
+
+ # Obtains the power consumption of the target device's core measured by National Instruments
+ # Data Acquisition(DAQ) device.
+ #'daq',
+
+ # Collects CCI counter data.
+ #'cci_pmu_logger',
+
+ # Collects FPS (Frames Per Second) and related metrics (such as jank) from
+ # the View of the workload (Note: only a single View per workload is
+ # supported at the moment, so this is mainly useful for games).
+ #'fps',
+]
+
+
+####################################################################################################
+################################# Result Processors Configuration ##################################
+####################################################################################################
+# Specifies how results will be processed and presented. #
+# #
+result_processors = [
+ # Creates a results.txt file for each iteration that lists all collected metrics
+ # in "name = value (units)" format
+ 'standard',
+
+ # Creates a results.csv that contains metrics for all iterations of all workloads
+ # in the .csv format.
+ 'csv',
+
+ # Creates a summary.csv that contains summary metrics for all iterations of all
+ # all in the .csv format. Summary metrics are defined on per-worklod basis
+ # are typically things like overall scores. The contents of summary.csv are
+ # always a subset of the contents of results.csv (if it is generated).
+ 'summary_csv',
+
+ # Creates a results.csv that contains metrics for all iterations of all workloads
+ # in the JSON format
+ #'json',
+
+ # Write results to an sqlite3 database. By default, a new database will be
+ # generated for each run, however it is possible to specify a path to an
+ # existing DB file (see result processor configuration below), in which
+ # case results from multiple runs may be stored in the one file.
+ #'sqlite',
+]
+
+
+####################################################################################################
+################################### Logging output Configuration ###################################
+####################################################################################################
+# Specify the format of logging messages. The format uses the old formatting syntax: #
+# #
+# http://docs.python.org/2/library/stdtypes.html#string-formatting-operations #
+# #
+# The attributes that can be used in formats are listested here: #
+# #
+# http://docs.python.org/2/library/logging.html#logrecord-attributes #
+# #
+logging = {
+ # Log file format
+ 'file format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
+ # Verbose console output format
+ 'verbose format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
+ # Regular console output format
+ 'regular format': '%(levelname)-8s %(message)s',
+ # Colouring the console output
+ 'colour_enabled': True,
+}
+
+
+####################################################################################################
+#################################### Instruments Configuration #####################################
+####################################################################################################
+# Instrumention Configuration is related to specific insturment's settings. Some of the #
+# instrumentations require specific settings in order for them to work. These settings are #
+# specified here. #
+# Note that these settings only take effect if the corresponding instrument is
+# enabled above.
+
+####################################################################################################
+######################################## perf configuration ########################################
+
+# The hardware events such as instructions executed, cache-misses suffered, or branches
+# mispredicted to be reported by perf. Events can be obtained from the device by tpying
+# 'perf list'.
+#perf_events = ['migrations', 'cs']
+
+# The perf options which can be obtained from man page for perf-record
+#perf_options = '-a -i'
+
+####################################################################################################
+####################################### hwmon configuration ########################################
+
+# The kinds of sensors hwmon instrument will look for
+#hwmon_sensors = ['energy', 'temp']
+
+####################################################################################################
+##################################### streamline configuration #####################################
+
+# The port number on which gatord will listen
+#port = 8080
+
+# Enabling/disabling the run of 'streamline -analyze' on the captured data.
+#streamline_analyze = True
+
+# Enabling/disabling the generation of a CSV report
+#streamline_report_csv = True
+
+####################################################################################################
+###################################### trace-cmd configuration #####################################
+
+# trace-cmd events to be traced. The events can be found by rooting on the device then type
+# 'trace-cmd list -e'
+#trace_events = ['power*']
+
+####################################################################################################
+######################################### DAQ configuration ########################################
+
+# The host address of the machine that runs the daq Server which the insturment communicates with
+#daq_server_host = '10.1.17.56'
+
+# The port number for daq Server in which daq insturment communicates with
+#daq_server_port = 56788
+
+# The values of resistors 1 and 2 (in Ohms) across which the voltages are measured
+#daq_resistor_values = [0.002, 0.002]
+
+####################################################################################################
+################################### cci_pmu_logger configuration ###################################
+
+# The events to be counted by PMU
+# NOTE: The number of events must not exceed the number of counters available (which is 4 for CCI-400)
+#cci_pmu_events = ['0x63', '0x83']
+
+# The name of the events which will be used when reporting PMU counts
+#cci_pmu_event_labels = ['event_0x63', 'event_0x83']
+
+# The period (in jiffies) between counter reads
+#cci_pmu_period = 15
+
+####################################################################################################
+################################### fps configuration ##############################################
+
+# Data points below this FPS will dropped as not constituting "real" gameplay. The assumption
+# being that while actually running, the FPS in the game will not drop below X frames per second,
+# except on loading screens, menus, etc, which should not contribute to FPS calculation.
+#fps_drop_threshold=5
+
+# If set to True, this will keep the raw dumpsys output in the results directory (this is maily
+# used for debugging). Note: frames.csv with collected frames data will always be generated
+# regardless of this setting.
+#fps_keep_raw=False
+
+####################################################################################################
+################################# Result Processor Configuration ###################################
+####################################################################################################
+
+# Specifies an alternative database to store results in. If the file does not
+# exist, it will be created (the directiory of the file must exist however). If
+# the file does exist, the results will be added to the existing data set (each
+# run as a UUID, so results won't clash even if identical agendas were used).
+# Note that in order for this to work, the version of the schema used to generate
+# the DB file must match that of the schema used for the current run. Please
+# see "What's new" secition in WA docs to check if the schema has changed in
+# recent releases of WA.
+#sqlite_database = '/work/results/myresults.sqlite'
+
+# If the file specified by sqlite_database exists, setting this to True will
+# cause that file to be overwritten rather than updated -- existing results in
+# the file will be lost.
+#sqlite_overwrite = False
+
+# distribution: internal
+
+####################################################################################################
+#################################### Resource Getter configuration #################################
+####################################################################################################
+
+# The location on your system where /arm/scratch is mounted. Used by
+# Scratch resource getter.
+#scratch_mount_point = '/arm/scratch'
+
+# end distribution
diff --git a/wlauto/core/__init__.py b/wlauto/core/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/core/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/core/agenda.py b/wlauto/core/agenda.py
new file mode 100644
index 00000000..ad820c8f
--- /dev/null
+++ b/wlauto/core/agenda.py
@@ -0,0 +1,244 @@
+# Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+from copy import copy
+from collections import OrderedDict, defaultdict
+
+from wlauto.exceptions import ConfigError
+from wlauto.utils.misc import load_struct_from_yaml, LoadSyntaxError
+from wlauto.utils.types import counter, reset_counter
+
+import yaml
+
+
+def get_aliased_param(d, aliases, default=None, pop=True):
+ alias_map = [i for i, a in enumerate(aliases) if a in d]
+ if len(alias_map) > 1:
+ message = 'Only one of {} may be specified in a single entry'
+ raise ConfigError(message.format(aliases))
+ elif alias_map:
+ if pop:
+ return d.pop(aliases[alias_map[0]])
+ else:
+ return d[aliases[alias_map[0]]]
+ else:
+ return default
+
+
+class AgendaEntry(object):
+
+ def to_dict(self):
+ return copy(self.__dict__)
+
+
+class AgendaWorkloadEntry(AgendaEntry):
+ """
+ Specifies execution of a workload, including things like the number of
+ iterations, device runtime_parameters configuration, etc.
+
+ """
+
+ def __init__(self, **kwargs):
+ super(AgendaWorkloadEntry, self).__init__()
+ self.id = kwargs.pop('id')
+ self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name'])
+ if not self.workload_name:
+ raise ConfigError('No workload name specified in entry {}'.format(self.id))
+ self.label = kwargs.pop('label', self.workload_name)
+ self.number_of_iterations = kwargs.pop('iterations', None)
+ self.boot_parameters = get_aliased_param(kwargs,
+ ['boot_parameters', 'boot_params'],
+ default=OrderedDict())
+ self.runtime_parameters = get_aliased_param(kwargs,
+ ['runtime_parameters', 'runtime_params'],
+ default=OrderedDict())
+ self.workload_parameters = get_aliased_param(kwargs,
+ ['workload_parameters', 'workload_params', 'params'],
+ default=OrderedDict())
+ self.instrumentation = kwargs.pop('instrumentation', [])
+ self.flash = kwargs.pop('flash', OrderedDict())
+ if kwargs:
+ raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys())))
+
+
+class AgendaSectionEntry(AgendaEntry):
+ """
+ Specifies execution of a workload, including things like the number of
+ iterations, device runtime_parameters configuration, etc.
+
+ """
+
+ def __init__(self, agenda, **kwargs):
+ super(AgendaSectionEntry, self).__init__()
+ self.id = kwargs.pop('id')
+ self.number_of_iterations = kwargs.pop('iterations', None)
+ self.boot_parameters = get_aliased_param(kwargs,
+ ['boot_parameters', 'boot_params'],
+ default=OrderedDict())
+ self.runtime_parameters = get_aliased_param(kwargs,
+ ['runtime_parameters', 'runtime_params', 'params'],
+ default=OrderedDict())
+ self.workload_parameters = get_aliased_param(kwargs,
+ ['workload_parameters', 'workload_params'],
+ default=OrderedDict())
+ self.instrumentation = kwargs.pop('instrumentation', [])
+ self.flash = kwargs.pop('flash', OrderedDict())
+ self.workloads = []
+ for w in kwargs.pop('workloads', []):
+ self.workloads.append(agenda.get_workload_entry(w))
+ if kwargs:
+ raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys())))
+
+ def to_dict(self):
+ d = copy(self.__dict__)
+ d['workloads'] = [w.to_dict() for w in self.workloads]
+ return d
+
+
+class AgendaGlobalEntry(AgendaEntry):
+ """
+ Workload configuration global to all workloads.
+
+ """
+
+ def __init__(self, **kwargs):
+ super(AgendaGlobalEntry, self).__init__()
+ self.number_of_iterations = kwargs.pop('iterations', None)
+ self.boot_parameters = get_aliased_param(kwargs,
+ ['boot_parameters', 'boot_params'],
+ default=OrderedDict())
+ self.runtime_parameters = get_aliased_param(kwargs,
+ ['runtime_parameters', 'runtime_params', 'params'],
+ default=OrderedDict())
+ self.workload_parameters = get_aliased_param(kwargs,
+ ['workload_parameters', 'workload_params'],
+ default=OrderedDict())
+ self.instrumentation = kwargs.pop('instrumentation', [])
+ self.flash = kwargs.pop('flash', OrderedDict())
+ if kwargs:
+ raise ConfigError('Invalid entries in global section: {}'.format(kwargs))
+
+
+class Agenda(object):
+
+ def __init__(self, source=None):
+ self.filepath = None
+ self.config = None
+ self.global_ = None
+ self.sections = []
+ self.workloads = []
+ self._seen_ids = defaultdict(set)
+ if source:
+ try:
+ reset_counter('section')
+ reset_counter('workload')
+ self._load(source)
+ except (ConfigError, LoadSyntaxError, SyntaxError), e:
+ raise ConfigError(str(e))
+
+ def add_workload_entry(self, w):
+ entry = self.get_workload_entry(w)
+ self.workloads.append(entry)
+
+ def get_workload_entry(self, w):
+ if isinstance(w, basestring):
+ w = {'name': w}
+ if not isinstance(w, dict):
+ raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath))
+ self._assign_id_if_needed(w, 'workload')
+ return AgendaWorkloadEntry(**w)
+
+ def _load(self, source):
+ raw = self._load_raw_from_source(source)
+ if not isinstance(raw, dict):
+ message = '{} does not contain a valid agenda structure; top level must be a dict.'
+ raise ConfigError(message.format(self.filepath))
+ for k, v in raw.iteritems():
+ if k == 'config':
+ self.config = v
+ elif k == 'global':
+ self.global_ = AgendaGlobalEntry(**v)
+ elif k == 'sections':
+ self._collect_existing_ids(v, 'section')
+ for s in v:
+ if not isinstance(s, dict):
+ raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath))
+ self._collect_existing_ids(s.get('workloads', []), 'workload')
+ for s in v:
+ self._assign_id_if_needed(s, 'section')
+ self.sections.append(AgendaSectionEntry(self, **s))
+ elif k == 'workloads':
+ self._collect_existing_ids(v, 'workload')
+ for w in v:
+ self.workloads.append(self.get_workload_entry(w))
+ else:
+ raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath))
+
+ def _load_raw_from_source(self, source):
+ if hasattr(source, 'read') and hasattr(source, 'name'): # file-like object
+ self.filepath = source.name
+ raw = load_struct_from_yaml(text=source.read())
+ elif isinstance(source, basestring):
+ if os.path.isfile(source):
+ self.filepath = source
+ raw = load_struct_from_yaml(filepath=self.filepath)
+ else: # assume YAML text
+ raw = load_struct_from_yaml(text=source)
+ else:
+ raise ConfigError('Unknown agenda source: {}'.format(source))
+ return raw
+
+ def _collect_existing_ids(self, ds, pool):
+ # Collection needs to take place first so that auto IDs can be
+ # correctly assigned, e.g. if someone explicitly specified an ID
+ # of '1' for one of the workloads.
+ for d in ds:
+ if isinstance(d, dict) and 'id' in d:
+ did = str(d['id'])
+ if did in self._seen_ids[pool]:
+ raise ConfigError('Duplicate {} ID: {}'.format(pool, did))
+ self._seen_ids[pool].add(did)
+
+ def _assign_id_if_needed(self, d, pool):
+ # Also enforces string IDs
+ if d.get('id') is None:
+ did = str(counter(pool))
+ while did in self._seen_ids[pool]:
+ did = str(counter(pool))
+ d['id'] = did
+ self._seen_ids[pool].add(did)
+ else:
+ d['id'] = str(d['id'])
+
+
+# Modifying the yaml parser to use an OrderedDict, rather then regular Python
+# dict for mappings. This preservers the order in which the items are
+# specified. See
+# http://stackoverflow.com/a/21048064
+
+_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
+
+
+def dict_representer(dumper, data):
+ return dumper.represent_mapping(_mapping_tag, data.iteritems())
+
+
+def dict_constructor(loader, node):
+ return OrderedDict(loader.construct_pairs(node))
+
+
+yaml.add_representer(OrderedDict, dict_representer)
+yaml.add_constructor(_mapping_tag, dict_constructor)
diff --git a/wlauto/core/bootstrap.py b/wlauto/core/bootstrap.py
new file mode 100644
index 00000000..cfca78bf
--- /dev/null
+++ b/wlauto/core/bootstrap.py
@@ -0,0 +1,195 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import shutil
+import imp
+import sys
+import re
+from collections import namedtuple, OrderedDict
+
+from wlauto.exceptions import ConfigError
+from wlauto.utils.misc import merge_dicts, normalize, unique
+from wlauto.utils.types import identifier
+
+
+_this_dir = os.path.dirname(__file__)
+_user_home = os.path.expanduser('~')
+
+# loading our external packages over those from the environment
+sys.path.insert(0, os.path.join(_this_dir, '..', 'external'))
+
+
+# Defines extension points for the WA framework. This table is used by the
+# ExtensionLoader (among other places) to identify extensions it should look
+# for.
+# Parameters that need to be specified in a tuple for each extension type:
+# name: The name of the extension type. This will be used to resolve get_
+# and list_methods in the extension loader.
+# class: The base class for the extension type. Extension loader will check
+# whether classes it discovers are subclassed from this.
+# default package: This is the package that will be searched for extensions
+# of that type by default (if not other packages are
+# specified when creating the extension loader). This
+# package *must* exist.
+# default path: This is the subdirectory under the environment_root which
+# will be searched for extensions of this type by default (if
+# no other paths are specified when creating the extension
+# loader). This directory will be automatically created if it
+# does not exist.
+
+#pylint: disable=C0326
+_EXTENSION_TYPE_TABLE = [
+ # name, class, default package, default path
+ ('command', 'wlauto.core.command.Command', 'wlauto.commands', 'commands'),
+ ('device', 'wlauto.core.device.Device', 'wlauto.devices', 'devices'),
+ ('instrument', 'wlauto.core.instrumentation.Instrument', 'wlauto.instrumentation', 'instruments'),
+ ('module', 'wlauto.core.extension.Module', 'wlauto.modules', 'modules'),
+ ('resource_getter', 'wlauto.core.resource.ResourceGetter', 'wlauto.resource_getters', 'resource_getters'),
+ ('result_processor', 'wlauto.core.result.ResultProcessor', 'wlauto.result_processors', 'result_processors'),
+ ('workload', 'wlauto.core.workload.Workload', 'wlauto.workloads', 'workloads'),
+]
+_Extension = namedtuple('_Extension', 'name, cls, default_package, default_path')
+_extensions = [_Extension._make(ext) for ext in _EXTENSION_TYPE_TABLE] # pylint: disable=W0212
+
+
+class ConfigLoader(object):
+ """
+ This class is responsible for loading and validating config files.
+
+ """
+
+ def __init__(self):
+ self._loaded = False
+ self._config = {}
+ self.config_count = 0
+ self._loaded_files = []
+ self.environment_root = None
+ self.output_directory = 'wa_output'
+ self.reboot_after_each_iteration = True
+ self.dependencies_directory = None
+ self.agenda = None
+ self.extension_packages = []
+ self.extension_paths = []
+ self.extensions = []
+ self.verbosity = 0
+ self.debug = False
+ self.package_directory = os.path.dirname(_this_dir)
+ self.commands = {}
+
+ @property
+ def meta_directory(self):
+ return os.path.join(self.output_directory, '__meta')
+
+ @property
+ def log_file(self):
+ return os.path.join(self.output_directory, 'run.log')
+
+ def update(self, source):
+ if isinstance(source, dict):
+ self.update_from_dict(source)
+ else:
+ self.config_count += 1
+ self.update_from_file(source)
+
+ def update_from_file(self, source):
+ try:
+ new_config = imp.load_source('config_{}'.format(self.config_count), source)
+ except SyntaxError, e:
+ message = 'Sytax error in config: {}'.format(str(e))
+ raise ConfigError(message)
+ self._config = merge_dicts(self._config, vars(new_config),
+ list_duplicates='first', match_types=False, dict_type=OrderedDict)
+ self._loaded_files.append(source)
+ self._loaded = True
+
+ def update_from_dict(self, source):
+ normalized_source = dict((identifier(k), v) for k, v in source.iteritems())
+ self._config = merge_dicts(self._config, normalized_source, list_duplicates='first',
+ match_types=False, dict_type=OrderedDict)
+ self._loaded = True
+
+ def get_config_paths(self):
+ return [lf.rstrip('c') for lf in self._loaded_files]
+
+ def _check_loaded(self):
+ if not self._loaded:
+ raise ConfigError('Config file not loaded.')
+
+ def __getattr__(self, name):
+ self._check_loaded()
+ return self._config.get(normalize(name))
+
+
+def init_environment(env_root, dep_dir, extension_paths, overwrite_existing=False): # pylint: disable=R0914
+ """Initialise a fresh user environment creating the workload automation"""
+ if os.path.exists(env_root):
+ if not overwrite_existing:
+ raise ConfigError('Environment {} already exists.'.format(env_root))
+ shutil.rmtree(env_root)
+
+ os.makedirs(env_root)
+ with open(os.path.join(_this_dir, '..', 'config_example.py')) as rf:
+ text = re.sub(r'""".*?"""', '', rf.read(), 1, re.DOTALL)
+ with open(os.path.join(_env_root, 'config.py'), 'w') as wf:
+ wf.write(text)
+
+ os.makedirs(dep_dir)
+ for path in extension_paths:
+ os.makedirs(path)
+
+ # If running with sudo on POSIX, change the ownership to the real user.
+ real_user = os.getenv('SUDO_USER')
+ if real_user:
+ import pwd # done here as module won't import on win32
+ user_entry = pwd.getpwnam(real_user)
+ uid, gid = user_entry.pw_uid, user_entry.pw_gid
+ os.chown(env_root, uid, gid)
+ # why, oh why isn't there a recusive=True option for os.chown?
+ for root, dirs, files in os.walk(env_root):
+ for d in dirs:
+ os.chown(os.path.join(root, d), uid, gid)
+ for f in files: # pylint: disable=W0621
+ os.chown(os.path.join(root, f), uid, gid)
+
+
+_env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(_user_home, '.workload_automation'))
+_dep_dir = os.path.join(_env_root, 'dependencies')
+_extension_paths = [os.path.join(_env_root, ext.default_path) for ext in _extensions]
+_extension_paths.extend(os.getenv('WA_EXTENSION_PATHS', '').split(os.pathsep))
+
+if not os.path.isdir(_env_root):
+ init_environment(_env_root, _dep_dir, _extension_paths)
+elif not os.path.isfile(os.path.join(_env_root, 'config.py')):
+ with open(os.path.join(_this_dir, '..', 'config_example.py')) as f:
+ f_text = re.sub(r'""".*?"""', '', f.read(), 1, re.DOTALL)
+ with open(os.path.join(_env_root, 'config.py'), 'w') as f:
+ f.write(f_text)
+
+settings = ConfigLoader()
+settings.environment_root = _env_root
+settings.dependencies_directory = _dep_dir
+settings.extension_paths = _extension_paths
+settings.extensions = _extensions
+
+_packages_file = os.path.join(_env_root, 'packages')
+if os.path.isfile(_packages_file):
+ with open(_packages_file) as fh:
+ settings.extension_packages = unique(fh.read().split())
+
+_env_config = os.path.join(settings.environment_root, 'config.py')
+settings.update(_env_config)
+
diff --git a/wlauto/core/command.py b/wlauto/core/command.py
new file mode 100644
index 00000000..5822145a
--- /dev/null
+++ b/wlauto/core/command.py
@@ -0,0 +1,67 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import textwrap
+
+from wlauto.core.extension import Extension
+from wlauto.core.entry_point import init_argument_parser
+from wlauto.utils.doc import format_body
+
+
+class Command(Extension):
+ """
+ Defines a Workload Automation command. This will be executed from the command line as
+ ``wa <command> [args ...]``. This defines the name to be used when invoking wa, the
+ code that will actually be executed on invocation and the argument parser to be used
+ to parse the reset of the command line arguments.
+
+ """
+
+ help = None
+ usage = None
+ description = None
+ epilog = None
+ formatter_class = None
+
+ def __init__(self, subparsers):
+ super(Command, self).__init__()
+ self.group = subparsers
+ parser_params = dict(help=(self.help or self.description), usage=self.usage,
+ description=format_body(textwrap.dedent(self.description), 80),
+ epilog=self.epilog)
+ if self.formatter_class:
+ parser_params['formatter_class'] = self.formatter_class
+ self.parser = subparsers.add_parser(self.name, **parser_params)
+ init_argument_parser(self.parser) # propagate top-level options
+ self.initialize()
+
+ def initialize(self):
+ """
+ Perform command-specific initialisation (e.g. adding command-specific options to the command's
+ parser).
+
+ """
+ pass
+
+ def execute(self, args):
+ """
+ Execute this command.
+
+ :args: An ``argparse.Namespace`` containing command line arguments (as returned by
+ ``argparse.ArgumentParser.parse_args()``. This would usually be the result of
+ invoking ``self.parser``.
+
+ """
+ raise NotImplementedError()
diff --git a/wlauto/core/configuration.py b/wlauto/core/configuration.py
new file mode 100644
index 00000000..432e55ae
--- /dev/null
+++ b/wlauto/core/configuration.py
@@ -0,0 +1,756 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import json
+from copy import copy
+from collections import OrderedDict
+
+from wlauto.exceptions import ConfigError
+from wlauto.utils.misc import merge_dicts, merge_lists, load_struct_from_file
+from wlauto.utils.types import regex_type, identifier
+
+
+class SharedConfiguration(object):
+
+ def __init__(self):
+ self.number_of_iterations = None
+ self.workload_name = None
+ self.label = None
+ self.boot_parameters = OrderedDict()
+ self.runtime_parameters = OrderedDict()
+ self.workload_parameters = OrderedDict()
+ self.instrumentation = []
+
+
+class ConfigurationJSONEncoder(json.JSONEncoder):
+
+ def default(self, obj): # pylint: disable=E0202
+ if isinstance(obj, WorkloadRunSpec):
+ return obj.to_dict()
+ elif isinstance(obj, RunConfiguration):
+ return obj.to_dict()
+ elif isinstance(obj, RebootPolicy):
+ return obj.policy
+ elif isinstance(obj, regex_type):
+ return obj.pattern
+ else:
+ return json.JSONEncoder.default(self, obj)
+
+
+class WorkloadRunSpec(object):
+ """
+ Specifies execution of a workload, including things like the number of
+ iterations, device runtime_parameters configuration, etc.
+
+ """
+
+ # These should be handled by the framework if not explicitly specified
+ # so it's a programming error if they're not
+ framework_mandatory_parameters = ['id', 'number_of_iterations']
+
+ # These *must* be specified by the user (through one mechanism or another)
+ # and it is a configuration error if they're not.
+ mandatory_parameters = ['workload_name']
+
+ def __init__(self,
+ id=None, # pylint: disable=W0622
+ number_of_iterations=None,
+ workload_name=None,
+ boot_parameters=None,
+ label=None,
+ section_id=None,
+ workload_parameters=None,
+ runtime_parameters=None,
+ instrumentation=None,
+ flash=None,
+ ): # pylint: disable=W0622
+ self.id = id
+ self.number_of_iterations = number_of_iterations
+ self.workload_name = workload_name
+ self.label = label or self.workload_name
+ self.section_id = section_id
+ self.boot_parameters = boot_parameters or OrderedDict()
+ self.runtime_parameters = runtime_parameters or OrderedDict()
+ self.workload_parameters = workload_parameters or OrderedDict()
+ self.instrumentation = instrumentation or []
+ self.flash = flash or OrderedDict()
+ self._workload = None
+ self._section = None
+ self.enabled = True
+
+ def set(self, param, value):
+ if param in ['id', 'section_id', 'number_of_iterations', 'workload_name', 'label']:
+ if value is not None:
+ setattr(self, param, value)
+ elif param in ['boot_parameters', 'runtime_parameters', 'workload_parameters', 'flash']:
+ setattr(self, param, merge_dicts(getattr(self, param), value, list_duplicates='last',
+ dict_type=OrderedDict, should_normalize=False))
+ elif param in ['instrumentation']:
+ setattr(self, param, merge_lists(getattr(self, param), value, duplicates='last'))
+ else:
+ raise ValueError('Unexpected workload spec parameter: {}'.format(param))
+
+ def validate(self):
+ for param_name in self.framework_mandatory_parameters:
+ param = getattr(self, param_name)
+ if param is None:
+ msg = '{} not set for workload spec.'
+ raise RuntimeError(msg.format(param_name))
+ for param_name in self.mandatory_parameters:
+ param = getattr(self, param_name)
+ if param is None:
+ msg = '{} not set for workload spec for workload {}'
+ raise ConfigError(msg.format(param_name, self.id))
+
+ def match_selectors(self, selectors):
+ """
+ Returns ``True`` if this spec matches the specified selectors, and
+ ``False`` otherwise. ``selectors`` must be a dict-like object with
+ attribute names mapping onto selector values. At the moment, only equality
+ selection is supported; i.e. the value of the attribute of the spec must
+ match exactly the corresponding value specified in the ``selectors`` dict.
+
+ """
+ if not selectors:
+ return True
+ for k, v in selectors.iteritems():
+ if getattr(self, k, None) != v:
+ return False
+ return True
+
+ @property
+ def workload(self):
+ if self._workload is None:
+ raise RuntimeError("Workload for {} has not been loaded".format(self))
+ return self._workload
+
+ @property
+ def secition(self):
+ if self.section_id and self._section is None:
+ raise RuntimeError("Section for {} has not been loaded".format(self))
+ return self._section
+
+ def load(self, device, ext_loader):
+ """Loads the workload for the specified device using the specified loader.
+ This must be done before attempting to execute the spec."""
+ self._workload = ext_loader.get_workload(self.workload_name, device, **self.workload_parameters)
+
+ def to_dict(self):
+ d = copy(self.__dict__)
+ del d['_workload']
+ del d['_section']
+ return d
+
+ def __str__(self):
+ return '{} {}'.format(self.id, self.label)
+
+ def __cmp__(self, other):
+ if not isinstance(other, WorkloadRunSpec):
+ return cmp('WorkloadRunSpec', other.__class__.__name__)
+ return cmp(self.id, other.id)
+
+
+class _SpecConfig(object):
+ # TODO: This is a bit of HACK for alias resolution. This formats Alias
+ # params as if they came from config.
+
+ def __init__(self, name, params=None):
+ setattr(self, name, params or {})
+
+
+class RebootPolicy(object):
+ """
+ Represents the reboot policy for the execution -- at what points the device
+ should be rebooted. This, in turn, is controlled by the policy value that is
+ passed in on construction and would typically be read from the user's settings.
+ Valid policy values are:
+
+ :never: The device will never be rebooted.
+ :as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc.
+ :initial: The device will be rebooted when the execution first starts, just before
+ executing the first workload spec.
+ :each_spec: The device will be rebooted before running a new workload spec.
+ :each_iteration: The device will be rebooted before each new iteration.
+
+ """
+
+ valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_iteration']
+
+ def __init__(self, policy):
+ policy = policy.strip().lower().replace(' ', '_')
+ if policy not in self.valid_policies:
+ message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies))
+ raise ConfigError(message)
+ self.policy = policy
+
+ @property
+ def can_reboot(self):
+ return self.policy != 'never'
+
+ @property
+ def perform_initial_boot(self):
+ return self.policy not in ['never', 'as_needed']
+
+ @property
+ def reboot_on_each_spec(self):
+ return self.policy in ['each_spec', 'each_iteration']
+
+ @property
+ def reboot_on_each_iteration(self):
+ return self.policy == 'each_iteration'
+
+ def __str__(self):
+ return self.policy
+
+ __repr__ = __str__
+
+ def __cmp__(self, other):
+ if isinstance(other, RebootPolicy):
+ return cmp(self.policy, other.policy)
+ else:
+ return cmp(self.policy, other)
+
+
+class RunConfigurationItem(object):
+ """
+ This represents a predetermined "configuration point" (an individual setting)
+ and describes how it must be handled when encountered.
+
+ """
+
+ # Also defines the NULL value for each category
+ valid_categories = {
+ 'scalar': None,
+ 'list': [],
+ 'dict': {},
+ }
+
+ # A callable that takes an arbitrary number of positional arguments
+ # is also valid.
+ valid_methods = ['keep', 'replace', 'merge']
+
+ def __init__(self, name, category, method):
+ if category not in self.valid_categories:
+ raise ValueError('Invalid category: {}'.format(category))
+ if not callable(method) and method not in self.valid_methods:
+ raise ValueError('Invalid method: {}'.format(method))
+ if category == 'scalar' and method == 'merge':
+ raise ValueError('Method cannot be "merge" for a scalar')
+ self.name = name
+ self.category = category
+ self.method = method
+
+ def combine(self, *args):
+ """
+ Combine the provided values according to the method for this
+ configuration item. Order matters -- values are assumed to be
+ in the order they were specified by the user. The resulting value
+ is also checked to patch the specified type.
+
+ """
+ args = [a for a in args if a is not None]
+ if not args:
+ return self.valid_categories[self.category]
+ if self.method == 'keep' or len(args) == 1:
+ value = args[0]
+ elif self.method == 'replace':
+ value = args[-1]
+ elif self.method == 'merge':
+ if self.category == 'list':
+ value = merge_lists(*args, duplicates='last', dict_type=OrderedDict)
+ elif self.category == 'dict':
+ value = merge_dicts(*args,
+ should_merge_lists=True,
+ should_normalize=False,
+ list_duplicates='last',
+ dict_type=OrderedDict)
+ else:
+ raise ValueError('Unexpected category for merge : "{}"'.format(self.category))
+ elif callable(self.method):
+ value = self.method(*args)
+ else:
+ raise ValueError('Unexpected method: "{}"'.format(self.method))
+
+ return value
+
+
+def _combine_ids(*args):
+ return '_'.join(args)
+
+
+class RunConfiguration(object):
+ """
+ Loads and maintains the unified configuration for this run. This includes configuration
+ for WA execution as a whole, and parameters for specific specs.
+
+ WA configuration mechanism aims to be flexible and easy to use, while at the same
+ time providing storing validation and early failure on error. To meet these requirements,
+ the implementation gets rather complicated. This is going to be a quick overview of
+ the underlying mechanics.
+
+ .. note:: You don't need to know this to use WA, or to write extensions for it. From
+ the point of view of extension writers, configuration from various sources
+ "magically" appears as attributes of their classes. This explanation peels
+ back the curtain and is intended for those who, for one reason or another,
+ need to understand how the magic works.
+
+ **terminology**
+
+ run
+
+ A single execution of a WA agenda.
+
+ run config(uration) (object)
+
+ An instance of this class. There is one per run.
+
+ config(uration) item
+
+ A single configuration entry or "setting", e.g. the device interface to use. These
+ can be for the run as a whole, or for a specific extension.
+
+ (workload) spec
+
+ A specification of a single workload execution. This combines workload configuration
+ with things like the number of iterations to run, which instruments to enable, etc.
+ More concretely, this is an instance of :class:`WorkloadRunSpec`.
+
+ **overview**
+
+ There are three types of WA configuration:
+
+ 1. "Meta" configuration that determines how the rest of the configuration is
+ processed (e.g. where extensions get loaded from). Since this does not pertain
+ to *run* configuration, it will not be covered further.
+ 2. Global run configuration, e.g. which workloads, result processors and instruments
+ will be enabled for a run.
+ 3. Per-workload specification configuration, that determines how a particular workload
+ instance will get executed (e.g. what workload parameters will be used, how many
+ iterations.
+
+ **run configuration**
+
+ Run configuration may appear in a config file (usually ``~/.workload_automation/config.py``),
+ or in the ``config`` section of an agenda. Configuration is specified as a nested structure
+ of dictionaries (associative arrays, or maps) and lists in the syntax following the format
+ implied by the file extension (currently, YAML and Python are supported). If the same
+ configuration item appears in more than one source, they are merged with conflicting entries
+ taking the value from the last source that specified them.
+
+ In addition to a fixed set of global configuration items, configuration for any WA
+ Extension (instrument, result processor, etc) may also be specified, namespaced under
+ the extension's name (i.e. the extensions name is a key in the global config with value
+ being a dict of parameters and their values). Some Extension parameters also specify a
+ "global alias" that may appear at the top-level of the config rather than under the
+ Extension's name. It is *not* an error to specify configuration for an Extension that has
+ not been enabled for a particular run; such configuration will be ignored.
+
+
+ **per-workload configuration**
+
+ Per-workload configuration can be specified in three places in the agenda: the
+ workload entry in the ``workloads`` list, the ``global`` entry (configuration there
+ will be applied to every workload entry), and in a section entry in ``sections`` list
+ ( configuration in every section will be applied to every workload entry separately,
+ creating a "cross-product" of section and workload configurations; additionally,
+ sections may specify their own workload lists).
+
+ If they same configuration item appears in more than one of the above places, they will
+ be merged in the following order: ``global``, ``section``, ``workload``, with conflicting
+ scalar values in the later overriding those from previous locations.
+
+
+ **Global parameter aliases**
+
+ As mentioned above, an Extension's parameter may define a global alias, which will be
+ specified and picked up from the top-level config, rather than config for that specific
+ extension. It is an error to specify the value for a parameter both through a global
+ alias and through extension config dict in the same configuration file. It is, however,
+ possible to use a global alias in one file, and specify extension configuration for the
+ same parameter in another file, in which case, the usual merging rules would apply.
+
+ **Loading and validation of configuration**
+
+ Validation of user-specified configuration happens at several stages of run initialisation,
+ to ensure that appropriate context for that particular type of validation is available and
+ that meaningful errors can be reported, as early as is feasible.
+
+ - Syntactic validation is performed when configuration is first loaded.
+ This is done by the loading mechanism (e.g. YAML parser), rather than WA itself. WA
+ propagates any errors encountered as ``ConfigError``\ s.
+ - Once a config file is loaded into a Python structure, it scanned to
+ extract settings. Static configuration is validated and added to the config. Extension
+ configuration is collected into a collection of "raw" config, and merged as appropriate, but
+ is not processed further at this stage.
+ - Once all configuration sources have been processed, the configuration as a whole
+ is validated (to make sure there are no missing settings, etc).
+ - Extensions are loaded through the run config object, which instantiates
+ them with appropriate parameters based on the "raw" config collected earlier. When an
+ Extension is instantiated in such a way, it's config is "officially" added to run configuration
+ tracked by the run config object. Raw config is discarded at the end of the run, so
+ that any config that wasn't loaded in this way is not recoded (as it was not actually used).
+ - Extension parameters a validated individually (for type, value ranges, etc) as they are
+ loaded in the Extension's __init__.
+ - An extension's ``validate()`` method is invoked before it is used (exactly when this
+ happens depends on the extension's type) to perform any final validation *that does not
+ rely on the target being present* (i.e. this would happen before WA connects to the target).
+ This can be used perform inter-parameter validation for an extension (e.g. when valid range for
+ one parameter depends on another), and more general WA state assumptions (e.g. a result
+ processor can check that an instrument it depends on has been installed).
+ - Finally, it is the responsibility of individual extensions to validate any assumptions
+ they make about the target device (usually as part of their ``setup()``).
+
+ **Handling of Extension aliases.**
+
+ WA extensions can have zero or more aliases (not to be confused with global aliases for extension
+ *parameters*). An extension allows associating an alternative name for the extension with a set
+ of parameter values. In other words aliases associate common configurations for an extension with
+ a name, providing a shorthand for it. For example, "t-rex_offscreen" is an alias for "glbenchmark"
+ workload that specifies that "use_case" should be "t-rex" and "variant" should be "offscreen".
+
+ **special loading rules**
+
+ Note that as a consequence of being able to specify configuration for *any* Extension namespaced
+ under the Extension's name in the top-level config, two distinct mechanisms exist form configuring
+ devices and workloads. This is valid, however due to their nature, they are handled in a special way.
+ This may be counter intuitive, so configuration of devices and workloads creating entries for their
+ names in the config is discouraged in favour of using the "normal" mechanisms of configuring them
+ (``device_config`` for devices and workload specs in the agenda for workloads).
+
+ In both cases (devices and workloads), "normal" config will always override named extension config
+ *irrespective of which file it was specified in*. So a ``adb_name`` name specified in ``device_config``
+ inside ``~/.workload_automation/config.py`` will override ``adb_name`` specified for ``juno`` in the
+ agenda (even when device is set to "juno").
+
+ Again, this ignores normal loading rules, so the use of named extension configuration for devices
+ and workloads is discouraged. There maybe some situations where this behaviour is useful however
+ (e.g. maintaining configuration for different devices in one config file).
+
+ """
+
+ default_reboot_policy = 'as_needed'
+ default_execution_order = 'by_iteration'
+
+ # This is generic top-level configuration.
+ general_config = [
+ RunConfigurationItem('run_name', 'scalar', 'replace'),
+ RunConfigurationItem('project', 'scalar', 'replace'),
+ RunConfigurationItem('project_stage', 'dict', 'replace'),
+ RunConfigurationItem('execution_order', 'scalar', 'replace'),
+ RunConfigurationItem('reboot_policy', 'scalar', 'replace'),
+ RunConfigurationItem('device', 'scalar', 'replace'),
+ RunConfigurationItem('flashing_config', 'dict', 'replace'),
+ ]
+
+ # Configuration specified for each workload spec. "workload_parameters"
+ # aren't listed because they are handled separately.
+ workload_config = [
+ RunConfigurationItem('id', 'scalar', _combine_ids),
+ RunConfigurationItem('number_of_iterations', 'scalar', 'replace'),
+ RunConfigurationItem('workload_name', 'scalar', 'replace'),
+ RunConfigurationItem('label', 'scalar', 'replace'),
+ RunConfigurationItem('section_id', 'scalar', 'replace'),
+ RunConfigurationItem('boot_parameters', 'dict', 'merge'),
+ RunConfigurationItem('runtime_parameters', 'dict', 'merge'),
+ RunConfigurationItem('instrumentation', 'list', 'merge'),
+ RunConfigurationItem('flash', 'dict', 'merge'),
+ ]
+
+ # List of names that may be present in configuration (and it is valid for
+ # them to be there) but are not handled buy RunConfiguration.
+ ignore_names = ['logging']
+
+ def get_reboot_policy(self):
+ if not self._reboot_policy:
+ self._reboot_policy = RebootPolicy(self.default_reboot_policy)
+ return self._reboot_policy
+
+ def set_reboot_policy(self, value):
+ if isinstance(value, RebootPolicy):
+ self._reboot_policy = value
+ else:
+ self._reboot_policy = RebootPolicy(value)
+
+ reboot_policy = property(get_reboot_policy, set_reboot_policy)
+
+ @property
+ def all_instrumentation(self):
+ result = set()
+ for spec in self.workload_specs:
+ result = result.union(set(spec.instrumentation))
+ return result
+
+ def __init__(self, ext_loader):
+ self.ext_loader = ext_loader
+ self.device = None
+ self.device_config = None
+ self.execution_order = None
+ self.project = None
+ self.project_stage = None
+ self.run_name = None
+ self.instrumentation = {}
+ self.result_processors = {}
+ self.workload_specs = []
+ self.flashing_config = {}
+ self.other_config = {} # keeps track of used config for extensions other than of the four main kinds.
+ self._used_config_items = []
+ self._global_instrumentation = []
+ self._reboot_policy = None
+ self._agenda = None
+ self._finalized = False
+ self._general_config_map = {i.name: i for i in self.general_config}
+ self._workload_config_map = {i.name: i for i in self.workload_config}
+ # Config files may contains static configuration for extensions that
+ # would not be part of this of this run (e.g. DB connection settings
+ # for a result processor that has not been enabled). Such settings
+ # should not be part of configuration for this run (as they will not
+ # be affecting it), but we still need to keep track it in case a later
+ # config (e.g. from the agenda) enables the extension.
+ # For this reason, all extension config is first loaded into the
+ # following dict and when an extension is identified as need for the
+ # run, its config is picked up from this "raw" dict and it becomes part
+ # of the run configuration.
+ self._raw_config = {'instrumentation': [], 'result_processors': []}
+
+ def get_extension(self, ext_name, *args):
+ self._check_finalized()
+ self._load_default_config_if_necessary(ext_name)
+ ext_config = self._raw_config[ext_name]
+ ext_cls = self.ext_loader.get_extension_class(ext_name)
+ if ext_cls.kind not in ['workload', 'device', 'instrument', 'result_processor']:
+ self.other_config[ext_name] = ext_config
+ return self.ext_loader.get_extension(ext_name, *args, **ext_config)
+
+ def to_dict(self):
+ d = copy(self.__dict__)
+ to_remove = ['ext_loader', 'workload_specs'] + [k for k in d.keys() if k.startswith('_')]
+ for attr in to_remove:
+ del d[attr]
+ d['workload_specs'] = [s.to_dict() for s in self.workload_specs]
+ d['reboot_policy'] = self.reboot_policy # this is a property so not in __dict__
+ return d
+
+ def load_config(self, source):
+ """Load configuration from the specified source. The source must be
+ either a path to a valid config file or a dict-like object. Currently,
+ config files can be either python modules (.py extension) or YAML documents
+ (.yaml extension)."""
+ if self._finalized:
+ raise ValueError('Attempting to load a config file after run configuration has been finalized.')
+ try:
+ config_struct = _load_raw_struct(source)
+ self._merge_config(config_struct)
+ except ConfigError as e:
+ message = 'Error in {}:\n\t{}'
+ raise ConfigError(message.format(getattr(source, 'name', None), e.message))
+
+ def set_agenda(self, agenda, selectors=None):
+ """Set the agenda for this run; Unlike with config files, there can only be one agenda."""
+ if self._agenda:
+ # note: this also guards against loading an agenda after finalized() has been called,
+ # as that would have required an agenda to be set.
+ message = 'Attempting to set a second agenda {};\n\talready have agenda {} set'
+ raise ValueError(message.format(agenda.filepath, self._agenda.filepath))
+ try:
+ self._merge_config(agenda.config or {})
+ self._load_specs_from_agenda(agenda, selectors)
+ self._agenda = agenda
+ except ConfigError as e:
+ message = 'Error in {}:\n\t{}'
+ raise ConfigError(message.format(agenda.filepath, e.message))
+
+ def finalize(self):
+ """This must be invoked once all configuration sources have been loaded. This will
+ do the final processing, setting instrumentation and result processor configuration
+ for the run And making sure that all the mandatory config has been specified."""
+ if self._finalized:
+ return
+ if not self._agenda:
+ raise ValueError('Attempting to finalize run configuration before an agenda is loaded.')
+ self._finalize_config_list('instrumentation')
+ self._finalize_config_list('result_processors')
+ if not self.device:
+ raise ConfigError('Device not specified in the config.')
+ self._finalize_device_config()
+ if not self.reboot_policy.reboot_on_each_spec:
+ for spec in self.workload_specs:
+ if spec.boot_parameters:
+ message = 'spec {} specifies boot_parameters; reboot policy must be at least "each_spec"'
+ raise ConfigError(message.format(spec.id))
+ for spec in self.workload_specs:
+ for globinst in self._global_instrumentation:
+ if globinst not in spec.instrumentation:
+ spec.instrumentation.append(globinst)
+ spec.validate()
+ self._finalized = True
+
+ def serialize(self, wfh):
+ json.dump(self, wfh, cls=ConfigurationJSONEncoder, indent=4)
+
+ def _merge_config(self, config):
+ """
+ Merge the settings specified by the ``config`` dict-like object into current
+ configuration.
+
+ """
+ if not isinstance(config, dict):
+ raise ValueError('config must be a dict; found {}'.format(config.__class__.__name__))
+
+ for k, v in config.iteritems():
+ k = identifier(k)
+ if k in self.ext_loader.global_param_aliases:
+ self._resolve_global_alias(k, v)
+ elif k in self._general_config_map:
+ self._set_run_config_item(k, v)
+ elif self.ext_loader.has_extension(k):
+ self._set_extension_config(k, v)
+ elif k == 'device_config':
+ self._set_raw_dict(k, v)
+ elif k in ['instrumentation', 'result_processors']:
+ # Instrumentation can be enabled and disabled by individual
+ # workloads, so we need to track it in two places: a list of
+ # all instruments for the run (as they will all need to be
+ # initialized and installed, and a list of only the "global"
+ # instruments which can then be merged into instrumentation
+ # lists of individual workload specs.
+ self._set_raw_list('_global_{}'.format(k), v)
+ self._set_raw_list(k, v)
+ elif k in self.ignore_names:
+ pass
+ else:
+ raise ConfigError('Unknown configuration option: {}'.format(k))
+
+ def _resolve_global_alias(self, name, value):
+ ga = self.ext_loader.global_param_aliases[name]
+ for param, ext in ga.iteritems():
+ for name in [ext.name] + [a.name for a in ext.aliases]:
+ self._load_default_config_if_necessary(name)
+ self._raw_config[name][param.name] = value
+
+ def _set_run_config_item(self, name, value):
+ item = self._general_config_map[name]
+ combined_value = item.combine(getattr(self, name, None), value)
+ setattr(self, name, combined_value)
+
+ def _set_extension_config(self, name, value):
+ default_config = self.ext_loader.get_default_config(name)
+ self._set_raw_dict(name, value, default_config)
+
+ def _set_raw_dict(self, name, value, default_config=None):
+ existing_config = self._raw_config.get(name, default_config or {})
+ new_config = _merge_config_dicts(existing_config, value)
+ self._raw_config[name] = new_config
+
+ def _set_raw_list(self, name, value):
+ old_value = self._raw_config.get(name, [])
+ new_value = merge_lists(old_value, value, duplicates='last')
+ self._raw_config[name] = new_value
+
+ def _finalize_config_list(self, attr_name):
+ """Note: the name is somewhat misleading. This finalizes a list
+ form the specified configuration (e.g. "instrumentation"); internal
+ representation is actually a dict, not a list..."""
+ ext_config = {}
+ raw_list = self._raw_config.get(attr_name, [])
+ for extname in raw_list:
+ default_config = self.ext_loader.get_default_config(extname)
+ ext_config[extname] = self._raw_config.get(extname, default_config)
+ list_name = '_global_{}'.format(attr_name)
+ setattr(self, list_name, raw_list)
+ setattr(self, attr_name, ext_config)
+
+ def _finalize_device_config(self):
+ self._load_default_config_if_necessary(self.device)
+ config = _merge_config_dicts(self._raw_config.get(self.device),
+ self._raw_config.get('device_config', {}))
+ self.device_config = config
+
+ def _load_default_config_if_necessary(self, name):
+ if name not in self._raw_config:
+ self._raw_config[name] = self.ext_loader.get_default_config(name)
+
+ def _load_specs_from_agenda(self, agenda, selectors):
+ global_dict = agenda.global_.to_dict() if agenda.global_ else {}
+ if agenda.sections:
+ for section_entry in agenda.sections:
+ section_dict = section_entry.to_dict()
+ for workload_entry in agenda.workloads + section_entry.workloads:
+ workload_dict = workload_entry.to_dict()
+ self._load_workload_spec(global_dict, section_dict, workload_dict, selectors)
+ else: # no sections were specified
+ for workload_entry in agenda.workloads:
+ workload_dict = workload_entry.to_dict()
+ self._load_workload_spec(global_dict, {}, workload_dict, selectors)
+
+ def _load_workload_spec(self, global_dict, section_dict, workload_dict, selectors):
+ spec = WorkloadRunSpec()
+ for name, config in self._workload_config_map.iteritems():
+ value = config.combine(global_dict.get(name), section_dict.get(name), workload_dict.get(name))
+ spec.set(name, value)
+ if section_dict:
+ spec.set('section_id', section_dict.get('id'))
+
+ realname, alias_config = self.ext_loader.resolve_alias(spec.workload_name)
+ if not spec.label:
+ spec.label = spec.workload_name
+ spec.workload_name = realname
+ dicts = [self.ext_loader.get_default_config(realname),
+ alias_config,
+ self._raw_config.get(spec.workload_name),
+ global_dict.get('workload_parameters'),
+ section_dict.get('workload_parameters'),
+ workload_dict.get('workload_parameters')]
+ dicts = [d for d in dicts if d is not None]
+ value = _merge_config_dicts(*dicts)
+ spec.set('workload_parameters', value)
+
+ if not spec.number_of_iterations:
+ spec.number_of_iterations = 1
+
+ if spec.match_selectors(selectors):
+ instrumentation_config = self._raw_config['instrumentation']
+ for instname in spec.instrumentation:
+ if instname not in instrumentation_config:
+ instrumentation_config.append(instname)
+ self.workload_specs.append(spec)
+
+ def _check_finalized(self):
+ if not self._finalized:
+ raise ValueError('Attempting to access configuration before it has been finalized.')
+
+
+def _load_raw_struct(source):
+ """Load a raw dict config structure from the specified source."""
+ if isinstance(source, basestring):
+ if os.path.isfile(source):
+ raw = load_struct_from_file(filepath=source)
+ else:
+ raise ConfigError('File "{}" does not exit'.format(source))
+ elif isinstance(source, dict):
+ raw = source
+ else:
+ raise ConfigError('Unknown config source: {}'.format(source))
+ return raw
+
+
+def _merge_config_dicts(*args, **kwargs):
+ """Provides a different set of default settings for ```merge_dicts`` """
+ return merge_dicts(*args,
+ should_merge_lists=kwargs.get('should_merge_lists', False),
+ should_normalize=kwargs.get('should_normalize', False),
+ list_duplicates=kwargs.get('list_duplicates', 'last'),
+ dict_type=kwargs.get('dict_type', OrderedDict))
diff --git a/wlauto/core/device.py b/wlauto/core/device.py
new file mode 100644
index 00000000..bef51fce
--- /dev/null
+++ b/wlauto/core/device.py
@@ -0,0 +1,418 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Base classes for device interfaces.
+
+ :Device: The base class for all devices. This defines the interface that must be
+ implemented by all devices and therefore any workload and instrumentation
+ can always rely on.
+ :AndroidDevice: Implements most of the :class:`Device` interface, and extends it
+ with a number of Android-specific methods.
+ :BigLittleDevice: Subclasses :class:`AndroidDevice` to implement big.LITTLE-specific
+ runtime parameters.
+ :SimpleMulticoreDevice: Subclasses :class:`AndroidDevice` to implement homogeneous cores
+ device runtime parameters.
+
+"""
+
+import os
+import imp
+import string
+from collections import OrderedDict
+from contextlib import contextmanager
+
+from wlauto.core.extension import Extension, ExtensionMeta, AttributeCollection, Parameter
+from wlauto.exceptions import DeviceError, ConfigError
+from wlauto.utils.types import list_of_strings, list_of_integers
+
+
+__all__ = ['RuntimeParameter', 'CoreParameter', 'Device', 'DeviceMeta']
+
+
+class RuntimeParameter(object):
+ """
+ A runtime parameter which has its getter and setter methods associated it
+ with it.
+
+ """
+
+ def __init__(self, name, getter, setter,
+ getter_args=None, setter_args=None,
+ value_name='value', override=False):
+ """
+ :param name: the name of the parameter.
+ :param getter: the getter method which returns the value of this parameter.
+ :param setter: the setter method which sets the value of this parameter. The setter
+ always expects to be passed one argument when it is called.
+ :param getter_args: keyword arguments to be used when invoking the getter.
+ :param setter_args: keyword arguments to be used when invoking the setter.
+ :param override: A ``bool`` that specifies whether a parameter of the same name further up the
+ hierarchy should be overridden. If this is ``False`` (the default), an exception
+ will be raised by the ``AttributeCollection`` instead.
+
+ """
+ self.name = name
+ self.getter = getter
+ self.setter = setter
+ self.getter_args = getter_args or {}
+ self.setter_args = setter_args or {}
+ self.value_name = value_name
+ self.override = override
+
+ def __str__(self):
+ return self.name
+
+ __repr__ = __str__
+
+
+class CoreParameter(RuntimeParameter):
+ """A runtime parameter that will get expanded into a RuntimeParameter for each core type."""
+
+ def get_runtime_parameters(self, core_names):
+ params = []
+ for core in set(core_names):
+ name = string.Template(self.name).substitute(core=core)
+ getter = string.Template(self.getter).substitute(core=core)
+ setter = string.Template(self.setter).substitute(core=core)
+ getargs = dict(self.getter_args.items() + [('core', core)])
+ setargs = dict(self.setter_args.items() + [('core', core)])
+ params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override))
+ return params
+
+
+class DeviceMeta(ExtensionMeta):
+
+ to_propagate = ExtensionMeta.to_propagate + [
+ ('runtime_parameters', RuntimeParameter, AttributeCollection),
+ ]
+
+
+class Device(Extension):
+ """
+ Base class for all devices supported by Workload Automation. Defines
+ the interface the rest of WA uses to interact with devices.
+
+ :name: Unique name used to identify the device.
+ :platform: The name of the device's platform (e.g. ``Android``) this may
+ be used by workloads and instrumentation to assess whether they
+ can run on the device.
+ :working_directory: a string of the directory which is
+ going to be used by the workloads on the device.
+ :binaries_directory: a string of the binary directory for
+ the device.
+ :has_gpu: Should be ``True`` if the device as a separate GPU, and
+ ``False`` if graphics processing is done on a CPU.
+
+ .. note:: Pretty much all devices currently on the market
+ have GPUs, however this may not be the case for some
+ development boards.
+
+ :path_module: The name of one of the modules implementing the os.path
+ interface, e.g. ``posixpath`` or ``ntpath``. You can provide
+ your own implementation rather than relying on one of the
+ standard library modules, in which case you need to specify
+ the *full* path to you module. e.g. '/home/joebloggs/mypathimp.py'
+ :parameters: A list of RuntimeParameter objects. The order of the objects
+ is very important as the setters and getters will be called
+ in the order the RuntimeParameter objects inserted.
+ :active_cores: This should be a list of all the currently active cpus in
+ the device in ``'/sys/devices/system/cpu/online'``. The
+ returned list should be read from the device at the time
+ of read request.
+
+ """
+ __metaclass__ = DeviceMeta
+
+ parameters = [
+ Parameter('core_names', kind=list_of_strings, mandatory=True, default=None,
+ description="""
+ This is a list of all cpu cores on the device with each
+ element being the core type, e.g. ``['a7', 'a7', 'a15']``. The
+ order of the cores must match the order they are listed in
+ ``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must
+ be an A7 core, and ``'cpu2'`` an A15.'
+ """),
+ Parameter('core_clusters', kind=list_of_integers, mandatory=True, default=None,
+ description="""
+ This is a list indicating the cluster affinity of the CPU cores,
+ each element correponding to the cluster ID of the core coresponding
+ to it's index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on
+ cluster 0, while cpu2 is on cluster 1.
+ """),
+ ]
+
+ runtime_parameters = []
+
+ # These must be overwritten by subclasses.
+ name = None
+ platform = None
+ default_working_directory = None
+ has_gpu = None
+ path_module = None
+ active_cores = None
+
+ def __init__(self, **kwargs): # pylint: disable=W0613
+ super(Device, self).__init__(**kwargs)
+ if not self.path_module:
+ raise NotImplementedError('path_module must be specified by the deriving classes.')
+ libpath = os.path.dirname(os.__file__)
+ modpath = os.path.join(libpath, self.path_module)
+ if not modpath.lower().endswith('.py'):
+ modpath += '.py'
+ try:
+ self.path = imp.load_source('device_path', modpath)
+ except IOError:
+ raise DeviceError('Unsupported path module: {}'.format(self.path_module))
+
+ def reset(self):
+ """
+ Initiate rebooting of the device.
+
+ Added in version 2.1.3.
+
+ """
+ raise NotImplementedError()
+
+ def boot(self, *args, **kwargs):
+ """
+ Perform the seteps necessary to boot the device to the point where it is ready
+ to accept other commands.
+
+ Changed in version 2.1.3: no longer expected to wait until boot completes.
+
+ """
+ raise NotImplementedError()
+
+ def connect(self, *args, **kwargs):
+ """
+ Establish a connection to the device that will be used for subsequent commands.
+
+ Added in version 2.1.3.
+ """
+ raise NotImplementedError()
+
+ def disconnect(self):
+ """ Close the established connection to the device. """
+ raise NotImplementedError()
+
+ def initialize(self, context, *args, **kwargs):
+ """
+ Default implementation just calls through to init(). May be overriden by specialised
+ abstract sub-cleasses to implent platform-specific intialization without requiring
+ concrete implementations to explicitly invoke parent's init().
+
+ Added in version 2.1.3.
+
+ """
+ self.init(context, *args, **kwargs)
+
+ def init(self, context, *args, **kwargs):
+ """
+ Initialize the device. This method *must* be called after a device reboot before
+ any other commands can be issued, however it may also be called without rebooting.
+
+ It is up to device-specific implementations to identify what initialisation needs
+ to be preformed on a particular invocation. Bear in mind that no assumptions can be
+ made about the state of the device prior to the initiation of workload execution,
+ so full initialisation must be performed at least once, even if no reboot has occurred.
+ After that, the device-specific implementation may choose to skip initialization if
+ the device has not been rebooted; it is up to the implementation to keep track of
+ that, however.
+
+ All arguments are device-specific (see the documentation for the your device).
+
+ """
+ pass
+
+ def ping(self):
+ """
+ This must return successfully if the device is able to receive commands, or must
+ raise :class:`wlauto.exceptions.DeviceUnresponsiveError` if the device cannot respond.
+
+ """
+ raise NotImplementedError()
+
+ def get_runtime_parameter_names(self):
+ return [p.name for p in self._expand_runtime_parameters()]
+
+ def get_runtime_parameters(self):
+ """ returns the runtime parameters that have been set. """
+ # pylint: disable=cell-var-from-loop
+ runtime_parameters = OrderedDict()
+ for rtp in self._expand_runtime_parameters():
+ if not rtp.getter:
+ continue
+ getter = getattr(self, rtp.getter)
+ rtp_value = getter(**rtp.getter_args)
+ runtime_parameters[rtp.name] = rtp_value
+ return runtime_parameters
+
+ def set_runtime_parameters(self, params):
+ """
+ The parameters are taken from the keyword arguments and are specific to
+ a particular device. See the device documentation.
+
+ """
+ runtime_parameters = self._expand_runtime_parameters()
+ rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters}
+
+ params = OrderedDict((k.lower(), v) for k, v in params.iteritems())
+
+ expected_keys = rtp_map.keys()
+ if not set(params.keys()) <= set(expected_keys):
+ unknown_params = list(set(params.keys()).difference(set(expected_keys)))
+ raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params))
+
+ for param in params:
+ rtp = rtp_map[param]
+ setter = getattr(self, rtp.setter)
+ args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])])
+ setter(**args)
+
+ def capture_screen(self, filepath):
+ """Captures the current device screen into the specified file in a PNG format."""
+ raise NotImplementedError()
+
+ def get_properties(self, output_path):
+ """Captures and saves the device configuration properties version and
+ any other relevant information. Return them in a dict"""
+ raise NotImplementedError()
+
+ def listdir(self, path, **kwargs):
+ """ List the contents of the specified directory. """
+ raise NotImplementedError()
+
+ def push_file(self, source, dest):
+ """ Push a file from the host file system onto the device. """
+ raise NotImplementedError()
+
+ def pull_file(self, source, dest):
+ """ Pull a file from device system onto the host file system. """
+ raise NotImplementedError()
+
+ def delete_file(self, filepath):
+ """ Delete the specified file on the device. """
+ raise NotImplementedError()
+
+ def file_exists(self, filepath):
+ """ Check if the specified file or directory exist on the device. """
+ raise NotImplementedError()
+
+ def get_pids_of(self, process_name):
+ """ Returns a list of PIDs of the specified process name. """
+ raise NotImplementedError()
+
+ def kill(self, pid, as_root=False):
+ """ Kill the process with the specified PID. """
+ raise NotImplementedError()
+
+ def killall(self, process_name, as_root=False):
+ """ Kill all running processes with the specified name. """
+ raise NotImplementedError()
+
+ def install(self, filepath, **kwargs):
+ """ Install the specified file on the device. What "install" means is device-specific
+ and may possibly also depend on the type of file."""
+ raise NotImplementedError()
+
+ def uninstall(self, filepath):
+ """ Uninstall the specified file on the device. What "uninstall" means is device-specific
+ and may possibly also depend on the type of file."""
+ raise NotImplementedError()
+
+ def execute(self, command, timeout=None, **kwargs):
+ """
+ Execute the specified command command on the device and return the output.
+
+ :param command: Command to be executed on the device.
+ :param timeout: If the command does not return after the specified time,
+ execute() will abort with an error. If there is no timeout for
+ the command, this should be set to 0 or None.
+
+ Other device-specific keyword arguments may also be specified.
+
+ :returns: The stdout output from the command.
+
+ """
+ raise NotImplementedError()
+
+ def set_sysfile_value(self, filepath, value, verify=True):
+ """
+ Write the specified value to the specified file on the device
+ and verify that the value has actually been written.
+
+ :param file: The file to be modified.
+ :param value: The value to be written to the file. Must be
+ an int or a string convertable to an int.
+ :param verify: Specifies whether the value should be verified, once written.
+
+ Should raise DeviceError if could write value.
+
+ """
+ raise NotImplementedError()
+
+ def get_sysfile_value(self, sysfile, kind=None):
+ """
+ Get the contents of the specified sysfile.
+
+ :param sysfile: The file who's contents will be returned.
+
+ :param kind: The type of value to be expected in the sysfile. This can
+ be any Python callable that takes a single str argument.
+ If not specified or is None, the contents will be returned
+ as a string.
+
+ """
+ raise NotImplementedError()
+
+ def start(self):
+ """
+ This gets invoked before an iteration is started and is endented to help the
+ device manange any internal supporting functions.
+
+ """
+ pass
+
+ def stop(self):
+ """
+ This gets invoked after iteration execution has completed and is endented to help the
+ device manange any internal supporting functions.
+
+ """
+ pass
+
+ def __str__(self):
+ return 'Device<{}>'.format(self.name)
+
+ __repr__ = __str__
+
+ def _expand_runtime_parameters(self):
+ expanded_params = []
+ for param in self.runtime_parameters:
+ if isinstance(param, CoreParameter):
+ expanded_params.extend(param.get_runtime_parameters(self.core_names)) # pylint: disable=no-member
+ else:
+ expanded_params.append(param)
+ return expanded_params
+
+ @contextmanager
+ def _check_alive(self):
+ try:
+ yield
+ except Exception as e:
+ self.ping()
+ raise e
+
diff --git a/wlauto/core/entry_point.py b/wlauto/core/entry_point.py
new file mode 100644
index 00000000..a0af5f58
--- /dev/null
+++ b/wlauto/core/entry_point.py
@@ -0,0 +1,75 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import sys
+import argparse
+import logging
+
+from wlauto.core.bootstrap import settings
+from wlauto.core.extension_loader import ExtensionLoader
+from wlauto.exceptions import WAError
+from wlauto.utils.misc import get_traceback
+from wlauto.utils.log import init_logging
+from wlauto.utils.cli import init_argument_parser
+from wlauto.utils.doc import format_body
+
+
+import warnings
+warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
+
+
+logger = logging.getLogger('command_line')
+
+
+def load_commands(subparsers):
+ ext_loader = ExtensionLoader(paths=settings.extension_paths)
+ for command in ext_loader.list_commands():
+ settings.commands[command.name] = ext_loader.get_command(command.name, subparsers=subparsers)
+
+
+def main():
+ try:
+ description = ("Execute automated workloads on a remote device and process "
+ "the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
+ "help for individual subcommands.")
+ parser = argparse.ArgumentParser(description=format_body(description, 80),
+ prog='wa',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ init_argument_parser(parser)
+ load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser
+ args = parser.parse_args()
+ settings.verbosity = args.verbose
+ settings.debug = args.debug
+ if args.config:
+ settings.update(args.config)
+ init_logging(settings.verbosity)
+
+ command = settings.commands[args.command]
+ sys.exit(command.execute(args))
+
+ except KeyboardInterrupt:
+ logging.info('Got CTRL-C. Aborting.')
+ sys.exit(3)
+ except WAError, e:
+ logging.critical(e)
+ sys.exit(1)
+ except Exception, e: # pylint: disable=broad-except
+ tb = get_traceback()
+ logging.critical(tb)
+ logging.critical('{}({})'.format(e.__class__.__name__, e))
+ sys.exit(2)
+
diff --git a/wlauto/core/execution.py b/wlauto/core/execution.py
new file mode 100644
index 00000000..c903031c
--- /dev/null
+++ b/wlauto/core/execution.py
@@ -0,0 +1,798 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=no-member
+
+"""
+This module contains the execution logic for Workload Automation. It defines the
+following actors:
+
+ WorkloadSpec: Identifies the workload to be run and defines parameters under
+ which it should be executed.
+
+ Executor: Responsible for the overall execution process. It instantiates
+ and/or intialises the other actors, does any necessary vaidation
+ and kicks off the whole process.
+
+ Execution Context: Provides information about the current state of run
+ execution to instrumentation.
+
+ RunInfo: Information about the current run.
+
+ Runner: This executes workload specs that are passed to it. It goes through
+ stages of execution, emitting an appropriate signal at each step to
+ allow instrumentation to do its stuff.
+
+"""
+import os
+import uuid
+import logging
+import subprocess
+import random
+from copy import copy
+from datetime import datetime
+from contextlib import contextmanager
+from collections import Counter, defaultdict, OrderedDict
+from itertools import izip_longest
+
+import wlauto.core.signal as signal
+from wlauto.core import instrumentation
+from wlauto.core.bootstrap import settings
+from wlauto.core.extension import Artifact
+from wlauto.core.configuration import RunConfiguration
+from wlauto.core.extension_loader import ExtensionLoader
+from wlauto.core.resolver import ResourceResolver
+from wlauto.core.result import ResultManager, IterationResult, RunResult
+from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
+ DeviceError, DeviceNotRespondingError)
+from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, merge_dicts, format_duration
+
+
+# The maximum number of reboot attempts for an iteration.
+MAX_REBOOT_ATTEMPTS = 3
+
+# If something went wrong during device initialization, wait this
+# long (in seconds) before retrying. This is necessary, as retrying
+# immediately may not give the device enough time to recover to be able
+# to reboot.
+REBOOT_DELAY = 3
+
+
+class RunInfo(object):
+ """
+ Information about the current run, such as it's unique ID, run
+ time, etc.
+
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.uuid = uuid.uuid4()
+ self.start_time = None
+ self.end_time = None
+ self.duration = None
+ self.project = config.project
+ self.project_stage = config.project_stage
+ self.run_name = config.run_name
+ self.notes = None
+ self.device_properties = {}
+
+ def to_dict(self):
+ d = copy(self.__dict__)
+ d['uuid'] = str(self.uuid)
+ del d['config']
+ d = merge_dicts(d, self.config.to_dict())
+ return d
+
+
+class ExecutionContext(object):
+ """
+ Provides a context for instrumentation. Keeps track of things like
+ current workload and iteration.
+
+ This class also provides two status members that can be used by workloads
+ and instrumentation to keep track of arbitrary state. ``result``
+ is reset on each new iteration of a workload; run_status is maintained
+ throughout a Workload Automation run.
+
+ """
+
+ # These are the artifacts generated by the core framework.
+ default_run_artifacts = [
+ Artifact('runlog', 'run.log', 'log', mandatory=True,
+ description='The log for the entire run.'),
+ ]
+
+ @property
+ def current_iteration(self):
+ if self.current_job:
+ spec_id = self.current_job.spec.id
+ return self.job_iteration_counts[spec_id]
+ else:
+ return None
+
+ @property
+ def workload(self):
+ return getattr(self.spec, 'workload', None)
+
+ @property
+ def spec(self):
+ return getattr(self.current_job, 'spec', None)
+
+ @property
+ def result(self):
+ return getattr(self.current_job, 'result', None)
+
+ def __init__(self, device, config):
+ self.device = device
+ self.config = config
+ self.reboot_policy = config.reboot_policy
+ self.output_directory = None
+ self.current_job = None
+ self.resolver = None
+ self.last_error = None
+ self.run_info = None
+ self.run_result = None
+ self.run_output_directory = settings.output_directory
+ self.host_working_directory = settings.meta_directory
+ self.iteration_artifacts = None
+ self.run_artifacts = copy(self.default_run_artifacts)
+ self.job_iteration_counts = defaultdict(int)
+ self.aborted = False
+ if settings.agenda:
+ self.run_artifacts.append(Artifact('agenda',
+ os.path.join(self.host_working_directory,
+ os.path.basename(settings.agenda)),
+ 'meta',
+ mandatory=True,
+ description='Agenda for this run.'))
+ for i in xrange(1, settings.config_count + 1):
+ self.run_artifacts.append(Artifact('config_{}'.format(i),
+ os.path.join(self.host_working_directory,
+ 'config_{}.py'.format(i)),
+ kind='meta',
+ mandatory=True,
+ description='Config file used for the run.'))
+
+ def initialize(self):
+ if not os.path.isdir(self.run_output_directory):
+ os.makedirs(self.run_output_directory)
+ self.output_directory = self.run_output_directory
+ self.resolver = ResourceResolver(self.config)
+ self.run_info = RunInfo(self.config)
+ self.run_result = RunResult(self.run_info)
+
+ def next_job(self, job):
+ """Invoked by the runner when starting a new iteration of workload execution."""
+ self.current_job = job
+ self.job_iteration_counts[self.spec.id] += 1
+ self.current_job.result.iteration = self.current_iteration
+ if not self.aborted:
+ outdir_name = '_'.join(map(str, [self.spec.label, self.spec.id, self.current_iteration]))
+ self.output_directory = _d(os.path.join(self.run_output_directory, outdir_name))
+ self.iteration_artifacts = [wa for wa in self.workload.artifacts]
+
+ def end_job(self):
+ if self.current_job.result.status == IterationResult.ABORTED:
+ self.aborted = True
+ self.current_job = None
+ self.output_directory = self.run_output_directory
+
+ def add_artifact(self, name, path, kind, *args, **kwargs):
+ if self.current_job is None:
+ self.add_run_artifact(name, path, kind, *args, **kwargs)
+ else:
+ self.add_iteration_artifact(name, path, kind, *args, **kwargs)
+
+ def add_run_artifact(self, name, path, kind, *args, **kwargs):
+ path = _check_artifact_path(path, self.run_output_directory)
+ self.run_artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
+
+ def add_iteration_artifact(self, name, path, kind, *args, **kwargs):
+ path = _check_artifact_path(path, self.output_directory)
+ self.iteration_artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
+
+ def get_artifact(self, name):
+ if self.iteration_artifacts:
+ for art in self.iteration_artifacts:
+ if art.name == name:
+ return art
+ for art in self.run_artifacts:
+ if art.name == name:
+ return art
+ return None
+
+
+def _check_artifact_path(path, rootpath):
+ if path.startswith(rootpath):
+ return os.path.abspath(path)
+ rootpath = os.path.abspath(rootpath)
+ full_path = os.path.join(rootpath, path)
+ if not os.path.isfile(full_path):
+ raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
+ return full_path
+
+
+class Executor(object):
+ """
+ The ``Executor``'s job is to set up the execution context and pass to a ``Runner``
+ along with a loaded run specification. Once the ``Runner`` has done its thing,
+ the ``Executor`` performs some final reporint before returning.
+
+ The initial context set up involves combining configuration from various sources,
+ loading of requided workloads, loading and installation of instruments and result
+ processors, etc. Static validation of the combined configuration is also performed.
+
+ """
+ # pylint: disable=R0915
+
+ def __init__(self):
+ self.logger = logging.getLogger('Executor')
+ self.error_logged = False
+ self.warning_logged = False
+ self.config = None
+ self.ext_loader = None
+ self.device = None
+ self.context = None
+
+ def execute(self, agenda, selectors=None): # NOQA
+ """
+ Execute the run specified by an agenda. Optionally, selectors may be used to only
+ selecute a subset of the specified agenda.
+
+ Params::
+
+ :agenda: an ``Agenda`` instance to be executed.
+ :selectors: A dict mapping selector name to the coresponding values.
+
+ **Selectors**
+
+ Currently, the following seectors are supported:
+
+ ids
+ The value must be a sequence of workload specfication IDs to be executed. Note
+ that if sections are specified inthe agenda, the workload specifacation ID will
+ be a combination of the section and workload IDs.
+
+ """
+ signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)
+ signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED)
+
+ self.logger.info('Initializing')
+ self.ext_loader = ExtensionLoader(packages=settings.extension_packages,
+ paths=settings.extension_paths)
+
+ self.logger.debug('Loading run configuration.')
+ self.config = RunConfiguration(self.ext_loader)
+ for filepath in settings.get_config_paths():
+ self.config.load_config(filepath)
+ self.config.set_agenda(agenda, selectors)
+ self.config.finalize()
+ config_outfile = os.path.join(settings.meta_directory, 'run_config.json')
+ with open(config_outfile, 'w') as wfh:
+ self.config.serialize(wfh)
+
+ self.logger.debug('Initialising device configuration.')
+ if not self.config.device:
+ raise ConfigError('Make sure a device is specified in the config.')
+ self.device = self.ext_loader.get_device(self.config.device, **self.config.device_config)
+ self.device.validate()
+
+ self.context = ExecutionContext(self.device, self.config)
+
+ self.logger.debug('Loading resource discoverers.')
+ self.context.initialize()
+ self.context.resolver.load()
+ self.context.add_artifact('run_config', config_outfile, 'meta')
+
+ self.logger.debug('Installing instrumentation')
+ for name, params in self.config.instrumentation.iteritems():
+ instrument = self.ext_loader.get_instrument(name, self.device, **params)
+ instrumentation.install(instrument)
+ instrumentation.validate()
+
+ self.logger.debug('Installing result processors')
+ result_manager = ResultManager()
+ for name, params in self.config.result_processors.iteritems():
+ processor = self.ext_loader.get_result_processor(name, **params)
+ result_manager.install(processor)
+ result_manager.validate()
+
+ self.logger.debug('Loading workload specs')
+ for workload_spec in self.config.workload_specs:
+ workload_spec.load(self.device, self.ext_loader)
+ workload_spec.workload.init_resources(self.context)
+ workload_spec.workload.validate()
+
+ if self.config.flashing_config:
+ if not self.device.flasher:
+ msg = 'flashing_config specified for {} device that does not support flashing.'
+ raise ConfigError(msg.format(self.device.name))
+ self.logger.debug('Flashing the device')
+ self.device.flasher.flash(self.device)
+
+ self.logger.info('Running workloads')
+ runner = self._get_runner(result_manager)
+ runner.init_queue(self.config.workload_specs)
+ runner.run()
+ self.execute_postamble()
+
+ def execute_postamble(self):
+ """
+ This happens after the run has completed. The overall results of the run are
+ summarised to the user.
+
+ """
+ result = self.context.run_result
+ counter = Counter()
+ for ir in result.iteration_results:
+ counter[ir.status] += 1
+ self.logger.info('Done.')
+ self.logger.info('Run duration: {}'.format(format_duration(self.context.run_info.duration)))
+ status_summary = 'Ran a total of {} iterations: '.format(sum(self.context.job_iteration_counts.values()))
+ parts = []
+ for status in IterationResult.values:
+ if status in counter:
+ parts.append('{} {}'.format(counter[status], status))
+ self.logger.info(status_summary + ', '.join(parts))
+ self.logger.info('Results can be found in {}'.format(settings.output_directory))
+
+ if self.error_logged:
+ self.logger.warn('There were errors during execution.')
+ self.logger.warn('Please see {}'.format(settings.log_file))
+ elif self.warning_logged:
+ self.logger.warn('There were warnings during execution.')
+ self.logger.warn('Please see {}'.format(settings.log_file))
+
+ def _get_runner(self, result_manager):
+ if not self.config.execution_order or self.config.execution_order == 'by_iteration':
+ if self.config.reboot_policy == 'each_spec':
+ self.logger.info('each_spec reboot policy with the default by_iteration execution order is '
+ 'equivalent to each_iteration policy.')
+ runnercls = ByIterationRunner
+ elif self.config.execution_order in ['classic', 'by_spec']:
+ runnercls = BySpecRunner
+ elif self.config.execution_order == 'by_section':
+ runnercls = BySectionRunner
+ elif self.config.execution_order == 'random':
+ runnercls = RandomRunner
+ else:
+ raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order))
+ return runnercls(self.device, self.context, result_manager)
+
+ def _error_signalled_callback(self):
+ self.error_logged = True
+ signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
+
+ def _warning_signalled_callback(self):
+ self.warning_logged = True
+ signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
+
+
+class RunnerJob(object):
+ """
+ Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration
+ specified by ``RunnerJobDescription.number_of_iterations``.
+
+ """
+
+ def __init__(self, spec):
+ self.spec = spec
+ self.iteration = None
+ self.result = IterationResult(self.spec)
+
+
+class Runner(object):
+ """
+ This class is responsible for actually performing a workload automation
+ run. The main responsibility of this class is to emit appropriate signals
+ at the various stages of the run to allow things like traces an other
+ instrumentation to hook into the process.
+
+ This is an abstract base class that defines each step of the run, but not
+ the order in which those steps are executed, which is left to the concrete
+ derived classes.
+
+ """
+ class _RunnerError(Exception):
+ """Internal runner error."""
+ pass
+
+ @property
+ def current_job(self):
+ if self.job_queue:
+ return self.job_queue[0]
+ return None
+
+ @property
+ def previous_job(self):
+ if self.completed_jobs:
+ return self.completed_jobs[-1]
+ return None
+
+ @property
+ def next_job(self):
+ if self.job_queue:
+ if len(self.job_queue) > 1:
+ return self.job_queue[1]
+ return None
+
+ @property
+ def spec_changed(self):
+ if self.previous_job is None and self.current_job is not None: # Start of run
+ return True
+ if self.previous_job is not None and self.current_job is None: # End of run
+ return True
+ return self.current_job.spec.id != self.previous_job.spec.id
+
+ @property
+ def spec_will_change(self):
+ if self.current_job is None and self.next_job is not None: # Start of run
+ return True
+ if self.current_job is not None and self.next_job is None: # End of run
+ return True
+ return self.current_job.spec.id != self.next_job.spec.id
+
+ def __init__(self, device, context, result_manager):
+ self.device = device
+ self.context = context
+ self.result_manager = result_manager
+ self.logger = logging.getLogger('Runner')
+ self.job_queue = []
+ self.completed_jobs = []
+ self._initial_reset = True
+
+ def init_queue(self, specs):
+ raise NotImplementedError()
+
+ def run(self): # pylint: disable=too-many-branches
+ self._send(signal.RUN_START)
+ self._initialize_run()
+
+ try:
+ while self.job_queue:
+ try:
+ self._init_job()
+ self._run_job()
+ except KeyboardInterrupt:
+ self.current_job.result.status = IterationResult.ABORTED
+ raise
+ except Exception, e: # pylint: disable=broad-except
+ self.current_job.result.status = IterationResult.FAILED
+ self.current_job.result.add_event(e.message)
+ if isinstance(e, DeviceNotRespondingError):
+ self.logger.info('Device appears to be unresponsive.')
+ if self.context.reboot_policy.can_reboot and self.device.can('reset_power'):
+ self.logger.info('Attempting to hard-reset the device...')
+ try:
+ self.device.hard_reset()
+ self.device.connect()
+ except DeviceError: # hard_boot not implemented for the device.
+ raise e
+ else:
+ raise e
+ else: # not a DeviceNotRespondingError
+ self.logger.error(e)
+ finally:
+ self._finalize_job()
+ except KeyboardInterrupt:
+ self.logger.info('Got CTRL-C. Finalizing run... (CTRL-C again to abort).')
+ # Skip through the remaining jobs.
+ while self.job_queue:
+ self.context.next_job(self.current_job)
+ self.current_job.result.status = IterationResult.ABORTED
+ self._finalize_job()
+ except DeviceNotRespondingError:
+ self.logger.info('Device unresponsive and recovery not possible. Skipping the rest of the run.')
+ self.context.aborted = True
+ while self.job_queue:
+ self.context.next_job(self.current_job)
+ self.current_job.result.status = IterationResult.SKIPPED
+ self._finalize_job()
+
+ instrumentation.enable_all()
+ self._finalize_run()
+ self._process_results()
+
+ self.result_manager.finalize(self.context)
+ self._send(signal.RUN_END)
+
+ def _initialize_run(self):
+ self.context.run_info.start_time = datetime.utcnow()
+ if self.context.reboot_policy.perform_initial_boot:
+ self.logger.info('\tBooting device')
+ with self._signal_wrap('INITIAL_BOOT'):
+ self._reboot_device()
+ else:
+ self.logger.info('Connecting to device')
+ self.device.connect()
+ self.logger.info('Initializing device')
+ self.device.initialize(self.context)
+
+ props = self.device.get_properties(self.context)
+ self.context.run_info.device_properties = props
+ self.result_manager.initialize(self.context)
+ self._send(signal.RUN_INIT)
+
+ if instrumentation.check_failures():
+ raise InstrumentError('Detected failure(s) during instrumentation initialization.')
+
+ def _init_job(self):
+ self.current_job.result.status = IterationResult.RUNNING
+ self.context.next_job(self.current_job)
+
+ def _run_job(self): # pylint: disable=too-many-branches
+ spec = self.current_job.spec
+ if not spec.enabled:
+ self.logger.info('Skipping workload %s (iteration %s)', spec, self.context.current_iteration)
+ self.current_job.result.status = IterationResult.SKIPPED
+ return
+
+ self.logger.info('Running workload %s (iteration %s)', spec, self.context.current_iteration)
+ if spec.flash:
+ if not self.context.reboot_policy.can_reboot:
+ raise ConfigError('Cannot flash as reboot_policy does not permit rebooting.')
+ if not self.device.can('flash'):
+ raise DeviceError('Device does not support flashing.')
+ self._flash_device(spec.flash)
+ elif not self.completed_jobs:
+ # Never reboot on the very fist job of a run, as we would have done
+ # the initial reboot if a reboot was needed.
+ pass
+ elif self.context.reboot_policy.reboot_on_each_spec and self.spec_changed:
+ self.logger.debug('Rebooting on spec change.')
+ self._reboot_device()
+ elif self.context.reboot_policy.reboot_on_each_iteration:
+ self.logger.debug('Rebooting on iteration.')
+ self._reboot_device()
+
+ instrumentation.disable_all()
+ instrumentation.enable(spec.instrumentation)
+ self.device.start()
+
+ if self.spec_changed:
+ self._send(signal.WORKLOAD_SPEC_START)
+ self._send(signal.ITERATION_START)
+
+ try:
+ setup_ok = False
+ with self._handle_errors('Setting up device parameters'):
+ self.device.set_runtime_parameters(spec.runtime_parameters)
+ setup_ok = True
+
+ if setup_ok:
+ with self._handle_errors('running {}'.format(spec.workload.name)):
+ self.current_job.result.status = IterationResult.RUNNING
+ self._run_workload_iteration(spec.workload)
+ else:
+ self.logger.info('\tSkipping the rest of the iterations for this spec.')
+ spec.enabled = False
+ except KeyboardInterrupt:
+ self._send(signal.ITERATION_END)
+ self._send(signal.WORKLOAD_SPEC_END)
+ raise
+ else:
+ self._send(signal.ITERATION_END)
+ if self.spec_will_change or not spec.enabled:
+ self._send(signal.WORKLOAD_SPEC_END)
+ finally:
+ self.device.stop()
+
+ def _finalize_job(self):
+ self.context.run_result.iteration_results.append(self.current_job.result)
+ self.job_queue[0].iteration = self.context.current_iteration
+ self.completed_jobs.append(self.job_queue.pop(0))
+ self.context.end_job()
+
+ def _finalize_run(self):
+ self.logger.info('Finalizing.')
+ self._send(signal.RUN_FIN)
+
+ with self._handle_errors('Disconnecting from the device'):
+ self.device.disconnect()
+
+ info = self.context.run_info
+ info.end_time = datetime.utcnow()
+ info.duration = info.end_time - info.start_time
+
+ def _process_results(self):
+ self.logger.info('Processing overall results')
+ with self._signal_wrap('OVERALL_RESULTS_PROCESSING'):
+ if instrumentation.check_failures():
+ self.context.run_result.non_iteration_errors = True
+ self.result_manager.process_run_result(self.context.run_result, self.context)
+
+ def _run_workload_iteration(self, workload):
+ self.logger.info('\tSetting up')
+ with self._signal_wrap('WORKLOAD_SETUP'):
+ try:
+ workload.setup(self.context)
+ except:
+ self.logger.info('\tSkipping the rest of the iterations for this spec.')
+ self.current_job.spec.enabled = False
+ raise
+ try:
+
+ self.logger.info('\tExecuting')
+ with self._handle_errors('Running workload'):
+ with self._signal_wrap('WORKLOAD_EXECUTION'):
+ workload.run(self.context)
+
+ self.logger.info('\tProcessing result')
+ self._send(signal.BEFORE_WORKLOAD_RESULT_UPDATE)
+ try:
+ if self.current_job.result.status != IterationResult.FAILED:
+ with self._handle_errors('Processing workload result',
+ on_error_status=IterationResult.PARTIAL):
+ workload.update_result(self.context)
+ self._send(signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE)
+
+ if self.current_job.result.status == IterationResult.RUNNING:
+ self.current_job.result.status = IterationResult.OK
+ finally:
+ self._send(signal.AFTER_WORKLOAD_RESULT_UPDATE)
+
+ finally:
+ self.logger.info('\tTearing down')
+ with self._handle_errors('Tearing down workload',
+ on_error_status=IterationResult.NONCRITICAL):
+ with self._signal_wrap('WORKLOAD_TEARDOWN'):
+ workload.teardown(self.context)
+ self.result_manager.add_result(self.current_job.result, self.context)
+
+ def _flash_device(self, flashing_params):
+ with self._signal_wrap('FLASHING'):
+ self.device.flash(**flashing_params)
+ self.device.connect()
+
+ def _reboot_device(self):
+ with self._signal_wrap('BOOT'):
+ for reboot_attempts in xrange(MAX_REBOOT_ATTEMPTS):
+ if reboot_attempts:
+ self.logger.info('\tRetrying...')
+ with self._handle_errors('Rebooting device'):
+ self.device.boot(**self.current_job.spec.boot_parameters)
+ break
+ else:
+ raise DeviceError('Could not reboot device; max reboot attempts exceeded.')
+ self.device.connect()
+
+ def _send(self, s):
+ signal.send(s, self, self.context)
+
+ def _take_screenshot(self, filename):
+ if self.context.output_directory:
+ filepath = os.path.join(self.context.output_directory, filename)
+ else:
+ filepath = os.path.join(settings.output_directory, filename)
+ self.device.capture_screen(filepath)
+
+ @contextmanager
+ def _handle_errors(self, action, on_error_status=IterationResult.FAILED):
+ try:
+ if action is not None:
+ self.logger.debug(action)
+ yield
+ except (KeyboardInterrupt, DeviceNotRespondingError):
+ raise
+ except (WAError, TimeoutError), we:
+ self.device.ping()
+ if self.current_job:
+ self.current_job.result.status = on_error_status
+ self.current_job.result.add_event(str(we))
+ try:
+ self._take_screenshot('error.png')
+ except Exception, e: # pylint: disable=W0703
+ # We're already in error state, so the fact that taking a
+ # screenshot failed is not surprising...
+ pass
+ if action:
+ action = action[0].lower() + action[1:]
+ self.logger.error('Error while {}:\n\t{}'.format(action, we))
+ except Exception, e: # pylint: disable=W0703
+ error_text = '{}("{}")'.format(e.__class__.__name__, e)
+ if self.current_job:
+ self.current_job.result.status = on_error_status
+ self.current_job.result.add_event(error_text)
+ self.logger.error('Error while {}'.format(action))
+ self.logger.error(error_text)
+ if isinstance(e, subprocess.CalledProcessError):
+ self.logger.error('Got:')
+ self.logger.error(e.output)
+ tb = get_traceback()
+ self.logger.error(tb)
+
+ @contextmanager
+ def _signal_wrap(self, signal_name):
+ """Wraps the suite in before/after signals, ensuring
+ that after signal is always sent."""
+ before_signal = getattr(signal, 'BEFORE_' + signal_name)
+ success_signal = getattr(signal, 'SUCCESSFUL_' + signal_name)
+ after_signal = getattr(signal, 'AFTER_' + signal_name)
+ try:
+ self._send(before_signal)
+ yield
+ self._send(success_signal)
+ finally:
+ self._send(after_signal)
+
+
+class BySpecRunner(Runner):
+ """
+ This is that "classic" implementation that executes all iterations of a workload
+ spec before proceeding onto the next spec.
+
+ """
+
+ def init_queue(self, specs):
+ jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable
+ self.job_queue = [j for spec_jobs in jobs for j in spec_jobs]
+
+
+class BySectionRunner(Runner):
+ """
+ Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
+ i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2...
+
+ If multiple sections where specified in the agenda, this will run all specs for the first section
+ followed by all specs for the seciod section, etc.
+
+ e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
+
+ X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
+
+ """
+
+ def init_queue(self, specs):
+ jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
+ self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
+
+
+class ByIterationRunner(Runner):
+ """
+ Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
+ i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2...
+
+ If multiple sections where specified in the agenda, this will run all sections for the first global
+ spec first, followed by all sections for the second spec, etc.
+
+ e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
+
+ X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
+
+ """
+
+ def init_queue(self, specs):
+ sections = OrderedDict()
+ for s in specs:
+ if s.section_id not in sections:
+ sections[s.section_id] = []
+ sections[s.section_id].append(s)
+ specs = [s for section_specs in izip_longest(*sections.values()) for s in section_specs if s]
+ jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
+ self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
+
+
+class RandomRunner(Runner):
+ """
+ This will run specs in a random order.
+
+ """
+
+ def init_queue(self, specs):
+ jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable
+ all_jobs = [j for spec_jobs in jobs for j in spec_jobs]
+ random.shuffle(all_jobs)
+ self.job_queue = all_jobs
diff --git a/wlauto/core/extension.py b/wlauto/core/extension.py
new file mode 100644
index 00000000..f09f7d8e
--- /dev/null
+++ b/wlauto/core/extension.py
@@ -0,0 +1,652 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E1101
+import os
+import logging
+import inspect
+from copy import copy
+from collections import OrderedDict
+
+from wlauto.core.bootstrap import settings
+from wlauto.exceptions import ValidationError, ConfigError
+from wlauto.utils.misc import isiterable, ensure_directory_exists as _d, get_article
+from wlauto.utils.types import identifier
+
+
+class AttributeCollection(object):
+ """
+ Accumulator for extension attribute objects (such as Parameters or Artifacts). This will
+ replace any class member list accumulating such attributes through the magic of
+ metaprogramming\ [*]_.
+
+ .. [*] which is totally safe and not going backfire in any way...
+
+ """
+
+ @property
+ def values(self):
+ return self._attrs.values()
+
+ def __init__(self, attrcls):
+ self._attrcls = attrcls
+ self._attrs = OrderedDict()
+
+ def add(self, p):
+ p = self._to_attrcls(p)
+ if p.name in self._attrs:
+ if p.override:
+ newp = copy(self._attrs[p.name])
+ for a, v in p.__dict__.iteritems():
+ if v is not None:
+ setattr(newp, a, v)
+ self._attrs[p.name] = newp
+ else:
+ # Duplicate attribute condition is check elsewhere.
+ pass
+ else:
+ self._attrs[p.name] = p
+
+ append = add
+
+ def __str__(self):
+ return 'AC({})'.format(map(str, self._attrs.values()))
+
+ __repr__ = __str__
+
+ def _to_attrcls(self, p):
+ if isinstance(p, basestring):
+ p = self._attrcls(p)
+ elif isinstance(p, tuple) or isinstance(p, list):
+ p = self._attrcls(*p)
+ elif isinstance(p, dict):
+ p = self._attrcls(**p)
+ elif not isinstance(p, self._attrcls):
+ raise ValueError('Invalid parameter value: {}'.format(p))
+ if (p.name in self._attrs and not p.override and
+ p.name != 'modules'): # TODO: HACK due to "diamond dependecy" in workloads...
+ raise ValueError('Attribute {} has already been defined.'.format(p.name))
+ return p
+
+ def __iadd__(self, other):
+ for p in other:
+ self.add(p)
+ return self
+
+ def __iter__(self):
+ return iter(self.values)
+
+ def __contains__(self, p):
+ return p in self._attrs
+
+ def __getitem__(self, i):
+ return self._attrs[i]
+
+ def __len__(self):
+ return len(self._attrs)
+
+
+class AliasCollection(AttributeCollection):
+
+ def __init__(self):
+ super(AliasCollection, self).__init__(Alias)
+
+ def _to_attrcls(self, p):
+ if isinstance(p, tuple) or isinstance(p, list):
+ # must be in the form (name, {param: value, ...})
+ p = self._attrcls(p[1], **p[1])
+ elif not isinstance(p, self._attrcls):
+ raise ValueError('Invalid parameter value: {}'.format(p))
+ if p.name in self._attrs:
+ raise ValueError('Attribute {} has already been defined.'.format(p.name))
+ return p
+
+
+class ListCollection(list):
+
+ def __init__(self, attrcls): # pylint: disable=unused-argument
+ super(ListCollection, self).__init__()
+
+
+class Param(object):
+ """
+ This is a generic parameter for an extension. Extensions instantiate this to declare which parameters
+ are supported.
+
+ """
+
+ def __init__(self, name, kind=None, mandatory=None, default=None, override=False,
+ allowed_values=None, description=None, constraint=None, global_alias=None):
+ """
+ Create a new Parameter object.
+
+ :param name: The name of the parameter. This will become an instance member of the
+ extension object to which the parameter is applied, so it must be a valid
+ python identifier. This is the only mandatory parameter.
+ :param kind: The type of parameter this is. This must be a callable that takes an arbitrary
+ object and converts it to the expected type, or raised ``ValueError`` if such
+ conversion is not possible. Most Python standard types -- ``str``, ``int``, ``bool``, etc. --
+ can be used here (though for ``bool``, ``wlauto.utils.misc.as_bool`` is preferred
+ as it intuitively handles strings like ``'false'``). This defaults to ``str`` if
+ not specified.
+ :param mandatory: If set to ``True``, then a non-``None`` value for this parameter *must* be
+ provided on extension object construction, otherwise ``ConfigError`` will be
+ raised.
+ :param default: The default value for this parameter. If no value is specified on extension
+ construction, this value will be used instead. (Note: if this is specified and
+ is not ``None``, then ``mandatory`` parameter will be ignored).
+ :param override: A ``bool`` that specifies whether a parameter of the same name further up the
+ hierarchy should be overridden. If this is ``False`` (the default), an exception
+ will be raised by the ``AttributeCollection`` instead.
+ :param allowed_values: This should be the complete list of allowed values for this parameter.
+ Note: ``None`` value will always be allowed, even if it is not in this list.
+ If you want to disallow ``None``, set ``mandatory`` to ``True``.
+ :param constraint: If specified, this must be a callable that takes the parameter value
+ as an argument and return a boolean indicating whether the constraint
+ has been satisfied. Alternatively, can be a two-tuple with said callable as
+ the first element and a string describing the constraint as the second.
+ :param global_alias: This is an alternative alias for this parameter, unlike the name, this
+ alias will not be namespaced under the owning extension's name (hence the
+ global part). This is introduced primarily for backward compatibility -- so
+ that old extension settings names still work. This should not be used for
+ new parameters.
+
+ """
+ self.name = identifier(name)
+ if kind is not None and not callable(kind):
+ raise ValueError('Kind must be callable.')
+ self.kind = kind
+ self.mandatory = mandatory
+ self.default = default
+ self.override = override
+ self.allowed_values = allowed_values
+ self.description = description
+ if self.kind is None and not self.override:
+ self.kind = str
+ if constraint is not None and not callable(constraint) and not isinstance(constraint, tuple):
+ raise ValueError('Constraint must be callable or a (callable, str) tuple.')
+ self.constraint = constraint
+ self.global_alias = global_alias
+
+ def set_value(self, obj, value=None):
+ if value is None:
+ if self.default is not None:
+ value = self.default
+ elif self.mandatory:
+ msg = 'No values specified for mandatory parameter {} in {}'
+ raise ConfigError(msg.format(self.name, obj.name))
+ else:
+ try:
+ value = self.kind(value)
+ except (ValueError, TypeError):
+ typename = self.get_type_name()
+ msg = 'Bad value "{}" for {}; must be {} {}'
+ article = get_article(typename)
+ raise ConfigError(msg.format(value, self.name, article, typename))
+ current_value = getattr(obj, self.name, None)
+ if current_value is None:
+ setattr(obj, self.name, value)
+ elif not isiterable(current_value):
+ setattr(obj, self.name, value)
+ else:
+ new_value = current_value + [value]
+ setattr(obj, self.name, new_value)
+
+ def validate(self, obj):
+ value = getattr(obj, self.name, None)
+ if value is not None:
+ if self.allowed_values:
+ self._validate_allowed_values(obj, value)
+ if self.constraint:
+ self._validate_constraint(obj, value)
+ else:
+ if self.mandatory:
+ msg = 'No value specified for mandatory parameter {} in {}.'
+ raise ConfigError(msg.format(self.name, obj.name))
+
+ def get_type_name(self):
+ typename = str(self.kind)
+ if '\'' in typename:
+ typename = typename.split('\'')[1]
+ elif typename.startswith('<function'):
+ typename = typename.split()[1]
+ return typename
+
+ def _validate_allowed_values(self, obj, value):
+ if 'list' in str(self.kind):
+ for v in value:
+ if v not in self.allowed_values:
+ msg = 'Invalid value {} for {} in {}; must be in {}'
+ raise ConfigError(msg.format(v, self.name, obj.name, self.allowed_values))
+ else:
+ if value not in self.allowed_values:
+ msg = 'Invalid value {} for {} in {}; must be in {}'
+ raise ConfigError(msg.format(value, self.name, obj.name, self.allowed_values))
+
+ def _validate_constraint(self, obj, value):
+ msg_vals = {'value': value, 'param': self.name, 'extension': obj.name}
+ if isinstance(self.constraint, tuple) and len(self.constraint) == 2:
+ constraint, msg = self.constraint # pylint: disable=unpacking-non-sequence
+ elif callable(self.constraint):
+ constraint = self.constraint
+ msg = '"{value}" failed constraint validation for {param} in {extension}.'
+ else:
+ raise ValueError('Invalid constraint for {}: must be callable or a 2-tuple'.format(self.name))
+ if not constraint(value):
+ raise ConfigError(value, msg.format(**msg_vals))
+
+ def __repr__(self):
+ d = copy(self.__dict__)
+ del d['description']
+ return 'Param({})'.format(d)
+
+ __str__ = __repr__
+
+
+Parameter = Param
+
+
+class Artifact(object):
+ """
+ This is an artifact generated during execution/post-processing of a workload.
+ Unlike metrics, this represents an actual artifact, such as a file, generated.
+ This may be "result", such as trace, or it could be "meta data" such as logs.
+ These are distinguished using the ``kind`` attribute, which also helps WA decide
+ how it should be handled. Currently supported kinds are:
+
+ :log: A log file. Not part of "results" as such but contains information about the
+ run/workload execution that be useful for diagnostics/meta analysis.
+ :meta: A file containing metadata. This is not part of "results", but contains
+ information that may be necessary to reproduce the results (contrast with
+ ``log`` artifacts which are *not* necessary).
+ :data: This file contains new data, not available otherwise and should be considered
+ part of the "results" generated by WA. Most traces would fall into this category.
+ :export: Exported version of results or some other artifact. This signifies that
+ this artifact does not contain any new data that is not available
+ elsewhere and that it may be safely discarded without losing information.
+ :raw: Signifies that this is a raw dump/log that is normally processed to extract
+ useful information and is then discarded. In a sense, it is the opposite of
+ ``export``, but in general may also be discarded.
+
+ .. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
+ how important it is to preserve this file, e.g. when archiving, vs
+ how much space it takes up. Unlike ``export`` artifacts which are
+ (almost) always ignored by other exporters as that would never result
+ in data loss, ``raw`` files *may* be processed by exporters if they
+ decided that the risk of losing potentially (though unlikely) useful
+ data is greater than the time/space cost of handling the artifact (e.g.
+ a database uploader may choose to ignore ``raw`` artifacts, where as a
+ network filer archiver may choose to archive them).
+
+ .. note: The kind parameter is intended to represent the logical function of a particular
+ artifact, not it's intended means of processing -- this is left entirely up to the
+ result processors.
+
+ """
+
+ RUN = 'run'
+ ITERATION = 'iteration'
+
+ valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
+
+ def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
+ """"
+ :param name: Name that uniquely identifies this artifact.
+ :param path: The *relative* path of the artifact. Depending on the ``level``
+ must be either relative to the run or iteration output directory.
+ Note: this path *must* be delimited using ``/`` irrespective of the
+ operating system.
+ :param kind: The type of the artifact this is (e.g. log file, result, etc.) this
+ will be used a hit to result processors. This must be one of ``'log'``,
+ ``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
+ :param level: The level at which the artifact will be generated. Must be either
+ ``'iteration'`` or ``'run'``.
+ :param mandatory: Boolean value indicating whether this artifact must be present
+ at the end of result processing for its level.
+ :param description: A free-form description of what this artifact is.
+
+ """
+ if kind not in self.valid_kinds:
+ raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
+ self.name = name
+ self.path = path.replace('/', os.sep) if path is not None else path
+ self.kind = kind
+ self.level = level
+ self.mandatory = mandatory
+ self.description = description
+
+ def exists(self, context):
+ """Returns ``True`` if artifact exists within the specified context, and
+ ``False`` otherwise."""
+ fullpath = os.path.join(context.output_directory, self.path)
+ return os.path.exists(fullpath)
+
+ def to_dict(self):
+ return copy(self.__dict__)
+
+
+class Alias(object):
+ """
+ This represents a configuration alias for an extension, mapping an alternative name to
+ a set of parameter values, effectively providing an alternative set of default values.
+
+ """
+
+ def __init__(self, name, **kwargs):
+ self.name = name
+ self.params = kwargs
+ self.extension_name = None # gets set by the MetaClass
+
+ def validate(self, ext):
+ ext_params = set(p.name for p in ext.parameters)
+ for param in self.params:
+ if param not in ext_params:
+ # Raising config error because aliases might have come through
+ # the config.
+ msg = 'Parameter {} (defined in alias {}) is invalid for {}'
+ raise ConfigError(msg.format(param, self.name, ext.name))
+
+
+class ExtensionMeta(type):
+ """
+ This basically adds some magic to extensions to make implementing new extensions, such as
+ workloads less complicated.
+
+ It ensures that certain class attributes (specified by the ``to_propagate``
+ attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption
+ is that the values of the attributes specified in the class are iterable; if that is not met,
+ Bad Things (tm) will happen.
+
+ This also provides virtual method implementation, similar to those in C-derived OO languages,
+ and alias specifications.
+
+ """
+
+ to_propagate = [
+ ('parameters', Parameter, AttributeCollection),
+ ('artifacts', Artifact, AttributeCollection),
+ ('core_modules', str, ListCollection),
+ ]
+
+ virtual_methods = ['validate']
+
+ def __new__(mcs, clsname, bases, attrs):
+ mcs._propagate_attributes(bases, attrs)
+ cls = type.__new__(mcs, clsname, bases, attrs)
+ mcs._setup_aliases(cls)
+ mcs._implement_virtual(cls, bases)
+ return cls
+
+ @classmethod
+ def _propagate_attributes(mcs, bases, attrs):
+ """
+ For attributes specified by to_propagate, their values will be a union of
+ that specified for cls and it's bases (cls values overriding those of bases
+ in case of conflicts).
+
+ """
+ for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate:
+ should_propagate = False
+ propagated = attr_collector_cls(attr_cls)
+ for base in bases:
+ if hasattr(base, prop_attr):
+ propagated += getattr(base, prop_attr) or []
+ should_propagate = True
+ if prop_attr in attrs:
+ propagated += attrs[prop_attr] or []
+ should_propagate = True
+ if should_propagate:
+ attrs[prop_attr] = propagated
+
+ @classmethod
+ def _setup_aliases(mcs, cls):
+ if hasattr(cls, 'aliases'):
+ aliases, cls.aliases = cls.aliases, AliasCollection()
+ for alias in aliases:
+ if isinstance(alias, basestring):
+ alias = Alias(alias)
+ alias.validate(cls)
+ alias.extension_name = cls.name
+ cls.aliases.add(alias)
+
+ @classmethod
+ def _implement_virtual(mcs, cls, bases):
+ """
+ This implements automatic method propagation to the bases, so
+ that you don't have to do something like
+
+ super(cls, self).vmname()
+
+ .. note:: current implementation imposes a restriction in that
+ parameters into the function *must* be passed as keyword
+ arguments. There *must not* be positional arguments on
+ virutal method invocation.
+
+ """
+ methods = {}
+ for vmname in mcs.virtual_methods:
+ clsmethod = getattr(cls, vmname, None)
+ if clsmethod:
+ basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)]
+ methods[vmname] = [bm for bm in basemethods if bm != clsmethod]
+ methods[vmname].append(clsmethod)
+
+ def wrapper(self, __name=vmname, **kwargs):
+ for dm in methods[__name]:
+ dm(self, **kwargs)
+
+ setattr(cls, vmname, wrapper)
+
+
+class Extension(object):
+ """
+ Base class for all WA extensions. An extension is basically a plug-in.
+ It extends the functionality of WA in some way. Extensions are discovered
+ and loaded dynamically by the extension loader upon invocation of WA scripts.
+ Adding an extension is a matter of placing a class that implements an appropriate
+ interface somewhere it would be discovered by the loader. That "somewhere" is
+ typically one of the extension subdirectories under ``~/.workload_automation/``.
+
+ """
+ __metaclass__ = ExtensionMeta
+
+ kind = None
+ name = None
+ parameters = [
+ Parameter('modules', kind=list,
+ description="""
+ Lists the modules to be loaded by this extension. A module is a plug-in that
+ further extends functionality of an extension.
+ """),
+ ]
+ artifacts = []
+ aliases = []
+ core_modules = []
+
+ @classmethod
+ def get_default_config(cls):
+ return {p.name: p.default for p in cls.parameters}
+
+ @property
+ def dependencies_directory(self):
+ return _d(os.path.join(settings.dependencies_directory, self.name))
+
+ @property
+ def _classname(self):
+ return self.__class__.__name__
+
+ def __init__(self, **kwargs):
+ self.__check_from_loader()
+ self.logger = logging.getLogger(self._classname)
+ self._modules = []
+ self.capabilities = getattr(self.__class__, 'capabilities', [])
+ for param in self.parameters:
+ param.set_value(self, kwargs.get(param.name))
+ for key in kwargs:
+ if key not in self.parameters:
+ message = 'Unexpected parameter "{}" for {}'
+ raise ConfigError(message.format(key, self.name))
+
+ def get_config(self):
+ """
+ Returns current configuration (i.e. parameter values) of this extension.
+
+ """
+ config = {}
+ for param in self.parameters:
+ config[param.name] = getattr(self, param.name, None)
+ return config
+
+ def validate(self):
+ """
+ Perform basic validation to ensure that this extension is capable of running.
+ This is intended as an early check to ensure the extension has not been mis-configured,
+ rather than a comprehensive check (that may, e.g., require access to the execution
+ context).
+
+ This method may also be used to enforce (i.e. set as well as check) inter-parameter
+ constraints for the extension (e.g. if valid values for parameter A depend on the value
+ of parameter B -- something that is not possible to enfroce using ``Parameter``\ 's
+ ``constraint`` attribute.
+
+ """
+ if self.name is None:
+ raise ValidationError('Name not set for {}'.format(self._classname))
+ for param in self.parameters:
+ param.validate(self)
+
+ def check_artifacts(self, context, level):
+ """
+ Make sure that all mandatory artifacts have been generated.
+
+ """
+ for artifact in self.artifacts:
+ if artifact.level != level or not artifact.mandatory:
+ continue
+ fullpath = os.path.join(context.output_directory, artifact.path)
+ if not os.path.exists(fullpath):
+ message = 'Mandatory "{}" has not been generated for {}.'
+ raise ValidationError(message.format(artifact.path, self.name))
+
+ def __getattr__(self, name):
+ if name == '_modules':
+ raise ValueError('_modules accessed too early!')
+ for module in self._modules:
+ if hasattr(module, name):
+ return getattr(module, name)
+ raise AttributeError(name)
+
+ def load_modules(self, loader):
+ """
+ Load the modules specified by the "modules" Parameter using the provided loader. A loader
+ can be any object that has an atribute called "get_module" that implements the following
+ signature::
+
+ get_module(name, owner, **kwargs)
+
+ and returns an instance of :class:`wlauto.core.extension.Module`. If the module with the
+ specified name is not found, the loader must raise an appropriate exception.
+
+ """
+ modules = list(reversed(self.core_modules)) + list(reversed(self.modules or []))
+ if not modules:
+ return
+ for module_spec in modules:
+ if not module_spec:
+ continue
+ if isinstance(module_spec, basestring):
+ name = module_spec
+ params = {}
+ elif isinstance(module_spec, dict):
+ if len(module_spec) != 1:
+ message = 'Invalid module spec: {}; dict must have exctly one key -- the module name.'
+ raise ValueError(message.format(module_spec))
+ name, params = module_spec.items()[0]
+ else:
+ message = 'Invalid module spec: {}; must be a string or a one-key dict.'
+ raise ValueError(message.format(module_spec))
+
+ if not isinstance(params, dict):
+ message = 'Invalid module spec: {}; dict value must also be a dict.'
+ raise ValueError(message.format(module_spec))
+
+ module = loader.get_module(name, owner=self, **params)
+ module.initialize()
+ for capability in module.capabilities:
+ if capability not in self.capabilities:
+ self.capabilities.append(capability)
+ self._modules.append(module)
+
+ def has(self, capability):
+ """Check if this extension has the specified capability. The alternative method ``can`` is
+ identical to this. Which to use is up to the caller depending on what makes semantic sense
+ in the context of the capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``."""
+ return capability in self.capabilities
+
+ can = has
+
+ def __check_from_loader(self):
+ """
+ There are a few things that need to happen in order to get a valide extension instance.
+ Not all of them are currently done through standard Python initialisation mechanisms
+ (specifically, the loading of modules and alias resolution). In order to avoid potential
+ problems with not fully loaded extensions, make sure that an extension is *only* instantiated
+ by the loader.
+
+ """
+ stack = inspect.stack()
+ stack.pop(0) # current frame
+ frame = stack.pop(0)
+ # skip throuth the init call chain
+ while stack and frame[3] == '__init__':
+ frame = stack.pop(0)
+ if frame[3] != '_instantiate':
+ message = 'Attempting to instantiate {} directly (must be done through an ExtensionLoader)'
+ raise RuntimeError(message.format(self.__class__.__name__))
+
+
+class Module(Extension):
+ """
+ This is a "plugin" for an extension this is intended to capture functionality that may be optional
+ for an extension, and so may or may not be present in a particular setup; or, conversely, functionality
+ that may be reusable between multiple devices, even if they are not with the same inheritance hierarchy.
+
+ In other words, a Module is roughly equivalent to a kernel module and its primary purpose is to
+ implement WA "drivers" for various peripherals that may or may not be present in a particular setup.
+
+ .. note:: A mudule is itself an Extension and can therefore have it's own modules.
+
+ """
+
+ capabilities = []
+
+ @property
+ def root_owner(self):
+ owner = self.owner
+ while isinstance(owner, Module) and owner is not self:
+ owner = owner.owner
+ return owner
+
+ def __init__(self, owner, **kwargs):
+ super(Module, self).__init__(**kwargs)
+ self.owner = owner
+ while isinstance(owner, Module):
+ if owner.name == self.name:
+ raise ValueError('Circular module import for {}'.format(self.name))
+
+ def initialize(self):
+ pass
+
diff --git a/wlauto/core/extension_loader.py b/wlauto/core/extension_loader.py
new file mode 100644
index 00000000..0263f830
--- /dev/null
+++ b/wlauto/core/extension_loader.py
@@ -0,0 +1,400 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import sys
+import inspect
+import imp
+import string
+import logging
+from functools import partial
+from collections import OrderedDict
+
+from wlauto.core.bootstrap import settings
+from wlauto.core.extension import Extension
+from wlauto.exceptions import NotFoundError, LoaderError
+from wlauto.utils.misc import walk_modules, load_class, merge_lists, merge_dicts, get_article
+from wlauto.utils.types import identifier
+
+
+MODNAME_TRANS = string.maketrans(':/\\.', '____')
+
+
+class ExtensionLoaderItem(object):
+
+ def __init__(self, ext_tuple):
+ self.name = ext_tuple.name
+ self.default_package = ext_tuple.default_package
+ self.default_path = ext_tuple.default_path
+ self.cls = load_class(ext_tuple.cls)
+
+
+class GlobalParameterAlias(object):
+ """
+ Represents a "global alias" for an extension parameter. A global alias
+ is specified at the top-level of config rather namespaced under an extension
+ name.
+
+ Multiple extensions may have parameters with the same global_alias if they are
+ part of the same inheritance hierarchy and one parameter is an override of the
+ other. This class keeps track of all such cases in its extensions dict.
+
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self.extensions = {}
+
+ def iteritems(self):
+ for ext in self.extensions.itervalues():
+ yield (self.get_param(ext), ext)
+
+ def get_param(self, ext):
+ for param in ext.parameters:
+ if param.global_alias == self.name:
+ return param
+ message = 'Extension {} does not have a parameter with global alias {}'
+ raise ValueError(message.format(ext.name, self.name))
+
+ def update(self, other_ext):
+ self._validate_ext(other_ext)
+ self.extensions[other_ext.name] = other_ext
+
+ def _validate_ext(self, other_ext):
+ other_param = self.get_param(other_ext)
+ for param, ext in self.iteritems():
+ if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
+ other_param.kind != param.kind):
+ message = 'Duplicate global alias {} declared in {} and {} extensions with different types'
+ raise LoaderError(message.format(self.name, ext.name, other_ext.name))
+ if not param.name == other_param.name:
+ message = 'Two params {} in {} and {} in {} both declare global alias {}'
+ raise LoaderError(message.format(param.name, ext.name,
+ other_param.name, other_ext.name, self.name))
+
+ def __str__(self):
+ text = 'GlobalAlias({} => {})'
+ extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
+ return text.format(self.name, extlist)
+
+
+class ExtensionLoader(object):
+ """
+ Discovers, enumerates and loads available devices, configs, etc.
+ The loader will attempt to discover things on construction by looking
+ in predetermined set of locations defined by default_paths. Optionally,
+ additional locations may specified through paths parameter that must
+ be a list of additional Python module paths (i.e. dot-delimited).
+
+ """
+
+ _instance = None
+
+ # Singleton
+ def __new__(cls, *args, **kwargs):
+ if not cls._instance:
+ cls._instance = super(ExtensionLoader, cls).__new__(cls, *args, **kwargs)
+ else:
+ for k, v in kwargs.iteritems():
+ if not hasattr(cls._instance, k):
+ raise ValueError('Invalid parameter for ExtensionLoader: {}'.format(k))
+ setattr(cls._instance, k, v)
+ return cls._instance
+
+ def set_load_defaults(self, value):
+ self._load_defaults = value
+ if value:
+ self.packages = merge_lists(self.default_packages, self.packages, duplicates='last')
+
+ def get_load_defaults(self):
+ return self._load_defaults
+
+ load_defaults = property(get_load_defaults, set_load_defaults)
+
+ def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False, load_defaults=True):
+ """
+ params::
+
+ :packages: List of packages to load extensions from.
+ :paths: List of paths to be searched for Python modules containing
+ WA extensions.
+ :ignore_paths: List of paths to ignore when search for WA extensions (these would
+ typically be subdirectories of one or more locations listed in
+ ``paths`` parameter.
+ :keep_going: Specifies whether to keep going if an error occurs while loading
+ extensions.
+ :load_defaults: Specifies whether extension should be loaded from default locations
+ (WA package, and user's WA directory) as well as the packages/paths
+ specified explicitly in ``packages`` and ``paths`` parameters.
+
+ """
+ self._load_defaults = None
+ self.logger = logging.getLogger('ExtensionLoader')
+ self.keep_going = keep_going
+ self.extension_kinds = {ext_tuple.name: ExtensionLoaderItem(ext_tuple)
+ for ext_tuple in settings.extensions}
+ self.default_packages = [ext.default_package for ext in self.extension_kinds.values()]
+
+ self.packages = packages or []
+ self.load_defaults = load_defaults
+ self.paths = paths or []
+ self.ignore_paths = ignore_paths or []
+ self.extensions = {}
+ self.aliases = {}
+ self.global_param_aliases = {}
+ # create an empty dict for each extension type to store discovered
+ # extensions.
+ for ext in self.extension_kinds.values():
+ setattr(self, '_' + ext.name, {})
+ self._load_from_packages(self.packages)
+ self._load_from_paths(self.paths, self.ignore_paths)
+
+ def update(self, packages=None, paths=None, ignore_paths=None):
+ """ Load extensions from the specified paths/packages
+ without clearing or reloading existing extension. """
+ if packages:
+ self.packages.extend(packages)
+ self._load_from_packages(packages)
+ if paths:
+ self.paths.extend(paths)
+ self.ignore_paths.extend(ignore_paths or [])
+ self._load_from_paths(paths, ignore_paths or [])
+
+ def clear(self):
+ """ Clear all discovered items. """
+ self.extensions.clear()
+ for ext in self.extension_kinds.values():
+ self._get_store(ext).clear()
+
+ def reload(self):
+ """ Clear all discovered items and re-run the discovery. """
+ self.clear()
+ self._load_from_packages(self.packages)
+ self._load_from_paths(self.paths, self.ignore_paths)
+
+ def get_extension_class(self, name, kind=None):
+ """
+ Return the class for the specified extension if found or raises ``ValueError``.
+
+ """
+ name, _ = self.resolve_alias(name)
+ if kind is None:
+ return self.extensions[name]
+ ext = self.extension_kinds.get(kind)
+ if ext is None:
+ raise ValueError('Unknown extension type: {}'.format(kind))
+ store = self._get_store(ext)
+ if name not in store:
+ raise NotFoundError('Extensions {} is not {} {}.'.format(name, get_article(kind), kind))
+ return store[name]
+
+ def get_extension(self, name, *args, **kwargs):
+ """
+ Return extension of the specified kind with the specified name. Any additional
+ parameters will be passed to the extension's __init__.
+
+ """
+ name, base_kwargs = self.resolve_alias(name)
+ kind = kwargs.pop('kind', None)
+ kwargs = merge_dicts(base_kwargs, kwargs, list_duplicates='last', dict_type=OrderedDict)
+ cls = self.get_extension_class(name, kind)
+ extension = _instantiate(cls, args, kwargs)
+ extension.load_modules(self)
+ return extension
+
+ def get_default_config(self, ext_name):
+ """
+ Returns the default configuration for the specified extension name. The name may be an alias,
+ in which case, the returned config will be augmented with appropriate alias overrides.
+
+ """
+ real_name, alias_config = self.resolve_alias(ext_name)
+ base_default_config = self.get_extension_class(real_name).get_default_config()
+ return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
+
+ def list_extensions(self, kind=None):
+ """
+ List discovered extension classes. Optionally, only list extensions of a
+ particular type.
+
+ """
+ if kind is None:
+ return self.extensions.values()
+ if kind not in self.extension_kinds:
+ raise ValueError('Unknown extension type: {}'.format(kind))
+ return self._get_store(self.extension_kinds[kind]).values()
+
+ def has_extension(self, name, kind=None):
+ """
+ Returns ``True`` if an extensions with the specified ``name`` has been
+ discovered by the loader. If ``kind`` was specified, only returns ``True``
+ if the extension has been found, *and* it is of the specified kind.
+
+ """
+ try:
+ self.get_extension_class(name, kind)
+ return True
+ except NotFoundError:
+ return False
+
+ def resolve_alias(self, alias_name):
+ """
+ Try to resolve the specified name as an extension alias. Returns a
+ two-tuple, the first value of which is actual extension name, and the
+ second is a dict of parameter values for this alias. If the name passed
+ is already an extension name, then the result is ``(alias_name, {})``.
+
+ """
+ alias_name = identifier(alias_name.lower())
+ if alias_name in self.extensions:
+ return (alias_name, {})
+ if alias_name in self.aliases:
+ alias = self.aliases[alias_name]
+ return (alias.extension_name, alias.params)
+ raise NotFoundError('Could not find extension or alias "{}"'.format(alias_name))
+
+ # Internal methods.
+
+ def __getattr__(self, name):
+ """
+ This resolves methods for specific extensions types based on corresponding
+ generic extension methods. So it's possible to say things like ::
+
+ loader.get_device('foo')
+
+ instead of ::
+
+ loader.get_extension('foo', kind='device')
+
+ """
+ if name.startswith('get_'):
+ name = name.replace('get_', '', 1)
+ if name in self.extension_kinds:
+ return partial(self.get_extension, kind=name)
+ if name.startswith('list_'):
+ name = name.replace('list_', '', 1).rstrip('s')
+ if name in self.extension_kinds:
+ return partial(self.list_extensions, kind=name)
+ if name.startswith('has_'):
+ name = name.replace('has_', '', 1)
+ if name in self.extension_kinds:
+ return partial(self.has_extension, kind=name)
+ raise AttributeError(name)
+
+ def _get_store(self, ext):
+ name = getattr(ext, 'name', ext)
+ return getattr(self, '_' + name)
+
+ def _load_from_packages(self, packages):
+ try:
+ for package in packages:
+ for module in walk_modules(package):
+ self._load_module(module)
+ except ImportError as e:
+ message = 'Problem loading extensions from extra packages: {}'
+ raise LoaderError(message.format(e.message))
+
+ def _load_from_paths(self, paths, ignore_paths):
+ self.logger.debug('Loading from paths.')
+ for path in paths:
+ self.logger.debug('Checking path %s', path)
+ for root, _, files in os.walk(path):
+ should_skip = False
+ for igpath in ignore_paths:
+ if root.startswith(igpath):
+ should_skip = True
+ break
+ if should_skip:
+ continue
+ for fname in files:
+ if not os.path.splitext(fname)[1].lower() == '.py':
+ continue
+ filepath = os.path.join(root, fname)
+ try:
+ modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
+ module = imp.load_source(modname, filepath)
+ self._load_module(module)
+ except (SystemExit, ImportError), e:
+ if self.keep_going:
+ self.logger.warn('Failed to load {}'.format(filepath))
+ self.logger.warn('Got: {}'.format(e))
+ else:
+ raise LoaderError('Failed to load {}'.format(filepath), sys.exc_info())
+
+ def _load_module(self, module): # NOQA pylint: disable=too-many-branches
+ self.logger.debug('Checking module %s', module.__name__)
+ for obj in vars(module).itervalues():
+ if inspect.isclass(obj):
+ if not issubclass(obj, Extension) or not hasattr(obj, 'name') or not obj.name:
+ continue
+ try:
+ for ext in self.extension_kinds.values():
+ if issubclass(obj, ext.cls):
+ self._add_found_extension(obj, ext)
+ break
+ else: # did not find a matching Extension type
+ message = 'Unknown extension type for {} (type: {})'
+ raise LoaderError(message.format(obj.name, obj.__class__.__name__))
+ except LoaderError as e:
+ if self.keep_going:
+ self.logger.warning(e)
+ else:
+ raise e
+
+ def _add_found_extension(self, obj, ext):
+ """
+ :obj: Found extension class
+ :ext: matching extension item.
+ """
+ self.logger.debug('\tAdding %s %s', ext.name, obj.name)
+ key = identifier(obj.name.lower())
+ obj.kind = ext.name
+ if key in self.extensions or key in self.aliases:
+ raise LoaderError('{} {} already exists.'.format(ext.name, obj.name))
+ # Extensions are tracked both, in a common extensions
+ # dict, and in per-extension kind dict (as retrieving
+ # extensions by kind is a common use case.
+ self.extensions[key] = obj
+ store = self._get_store(ext)
+ store[key] = obj
+ for alias in obj.aliases:
+ if alias in self.extensions or alias in self.aliases:
+ raise LoaderError('{} {} already exists.'.format(ext.name, obj.name))
+ self.aliases[alias.name] = alias
+
+ # Update global aliases list. If a global alias is already in the list,
+ # then make sure this extension is in the same parent/child hierarchy
+ # as the one already found.
+ for param in obj.parameters:
+ if param.global_alias:
+ if param.global_alias not in self.global_param_aliases:
+ ga = GlobalParameterAlias(param.global_alias)
+ ga.update(obj)
+ self.global_param_aliases[ga.name] = ga
+ else: # global alias already exists.
+ self.global_param_aliases[param.global_alias].update(obj)
+
+
+# Utility functions.
+
+def _instantiate(cls, args=None, kwargs=None):
+ args = [] if args is None else args
+ kwargs = {} if kwargs is None else kwargs
+ try:
+ return cls(*args, **kwargs)
+ except Exception:
+ raise LoaderError('Could not load {}'.format(cls), sys.exc_info())
+
diff --git a/wlauto/core/exttype.py b/wlauto/core/exttype.py
new file mode 100644
index 00000000..ad76bad4
--- /dev/null
+++ b/wlauto/core/exttype.py
@@ -0,0 +1,35 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# Separate module to avoid circular dependencies
+from wlauto.core.bootstrap import settings
+from wlauto.core.extension import Extension
+from wlauto.utils.misc import load_class
+
+
+_extension_bases = {ext.name: load_class(ext.cls) for ext in settings.extensions}
+
+
+def get_extension_type(ext):
+ """Given an instance of ``wlauto.core.Extension``, return a string representing
+ the type of the extension (e.g. ``'workload'`` for a Workload subclass instance)."""
+ if not isinstance(ext, Extension):
+ raise ValueError('{} is not an instance of Extension'.format(ext))
+ for name, cls in _extension_bases.iteritems():
+ if isinstance(ext, cls):
+ return name
+ raise ValueError('Unknown extension type: {}'.format(ext.__class__.__name__))
+
diff --git a/wlauto/core/instrumentation.py b/wlauto/core/instrumentation.py
new file mode 100644
index 00000000..8286e608
--- /dev/null
+++ b/wlauto/core/instrumentation.py
@@ -0,0 +1,374 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Adding New Instrument
+=====================
+
+Any new instrument should be a subclass of Instrument and it must have a name.
+When a new instrument is added to Workload Automation, the methods of the new
+instrument will be found automatically and hooked up to the supported signals.
+Once a signal is broadcasted, the corresponding registered method is invoked.
+
+Each method in Instrument must take two arguments, which are self and context.
+Supported signals can be found in [... link to signals ...] To make
+implementations easier and common, the basic steps to add new instrument is
+similar to the steps to add new workload.
+
+Hence, the following methods are sufficient to implement to add new instrument:
+
+ - setup: This method is invoked after the workload is setup. All the
+ necessary setups should go inside this method. Setup, includes operations
+ like, pushing the files to the target device, install them, clear logs,
+ etc.
+ - start: It is invoked just before the workload start execution. Here is
+ where instrument measures start being registered/taken.
+ - stop: It is invoked just after the workload execution stops. The measures
+ should stop being taken/registered.
+ - update_result: It is invoked after the workload updated its result.
+ update_result is where the taken measures are added to the result so it
+ can be processed by Workload Automation.
+ - teardown is invoked after the workload is teared down. It is a good place
+ to clean any logs generated by the instrument.
+
+For example, to add an instrument which will trace device errors, we subclass
+Instrument and overwrite the variable name.::
+
+ #BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
+ class TraceErrorsInstrument(Instrument):
+
+ name = 'trace-errors'
+
+ def __init__(self, device):
+ super(TraceErrorsInstrument, self).__init__(device)
+ self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
+
+We then declare and implement the aforementioned methods. For the setup method,
+we want to push the file to the target device and then change the file mode to
+755 ::
+
+ def setup(self, context):
+ self.device.push_file(BINARY_FILE, self.device.working_directory)
+ self.device.execute('chmod 755 {}'.format(self.trace_on_device))
+
+Then we implemented the start method, which will simply run the file to start
+tracing. ::
+
+ def start(self, context):
+ self.device.execute('{} start'.format(self.trace_on_device))
+
+Lastly, we need to stop tracing once the workload stops and this happens in the
+stop method::
+
+ def stop(self, context):
+ self.device.execute('{} stop'.format(self.trace_on_device))
+
+The generated result can be updated inside update_result, or if it is trace, we
+just pull the file to the host device. context has a result variable which
+has add_metric method. It can be used to add the instrumentation results metrics
+to the final result for the workload. The method can be passed 4 params, which
+are metric key, value, unit and lower_is_better, which is a boolean. ::
+
+ def update_result(self, context):
+ # pull the trace file to the device
+ result = os.path.join(self.device.working_directory, 'trace.txt')
+ self.device.pull_file(result, context.working_directory)
+
+ # parse the file if needs to be parsed, or add result to
+ # context.result
+
+At the end, we might want to delete any files generated by the instrumentation
+and the code to clear these file goes in teardown method. ::
+
+ def teardown(self, context):
+ self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt'))
+
+"""
+
+import logging
+import inspect
+from collections import OrderedDict
+
+import wlauto.core.signal as signal
+from wlauto.core.extension import Extension
+from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError
+from wlauto.utils.misc import get_traceback, isiterable
+
+
+logger = logging.getLogger('instrumentation')
+
+
+# Maps method names onto signals the should be registered to.
+# Note: the begin/end signals are paired -- if a begin_ signal is sent,
+# then the corresponding end_ signal is guaranteed to also be sent.
+# Note: using OrderedDict to preserve logical ordering for the table generated
+# in the documentation
+SIGNAL_MAP = OrderedDict([
+ # Below are "aliases" for some of the more common signals to allow
+ # instrumentation to have similar structure to workloads
+ ('initialize', signal.RUN_INIT),
+ ('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
+ ('start', signal.BEFORE_WORKLOAD_EXECUTION),
+ ('stop', signal.AFTER_WORKLOAD_EXECUTION),
+ ('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
+ ('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
+ ('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
+ ('finalize', signal.RUN_FIN),
+
+ ('on_run_start', signal.RUN_START),
+ ('on_run_end', signal.RUN_END),
+ ('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
+ ('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
+ ('on_iteration_start', signal.ITERATION_START),
+ ('on_iteration_end', signal.ITERATION_END),
+
+ ('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
+ ('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
+ ('after_initial_boot', signal.AFTER_INITIAL_BOOT),
+ ('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
+ ('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
+ ('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
+ ('before_boot', signal.BEFORE_BOOT),
+ ('on_successful_boot', signal.SUCCESSFUL_BOOT),
+ ('after_boot', signal.AFTER_BOOT),
+
+ ('on_spec_init', signal.SPEC_INIT),
+ ('on_run_init', signal.RUN_INIT),
+ ('on_iteration_init', signal.ITERATION_INIT),
+
+ ('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
+ ('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
+ ('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
+ ('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
+ ('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
+ ('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
+ ('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
+ ('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
+ ('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
+ ('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
+ ('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
+ ('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
+
+ ('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
+ ('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
+ ('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
+
+ ('on_error', signal.ERROR_LOGGED),
+ ('on_warning', signal.WARNING_LOGGED),
+])
+
+PRIORITY_MAP = OrderedDict([
+ ('very_fast_', 20),
+ ('fast_', 10),
+ ('normal_', 0),
+ ('slow_', -10),
+ ('very_slow_', -20),
+])
+
+installed = []
+
+
+def is_installed(instrument):
+ if isinstance(instrument, Instrument):
+ if instrument in installed:
+ return True
+ if instrument.name in [i.name for i in installed]:
+ return True
+ elif isinstance(instrument, type):
+ if instrument in [i.__class__ for i in installed]:
+ return True
+ else: # assume string
+ if instrument in [i.name for i in installed]:
+ return True
+ return False
+
+
+failures_detected = False
+
+
+def reset_failures():
+ global failures_detected # pylint: disable=W0603
+ failures_detected = False
+
+
+def check_failures():
+ result = failures_detected
+ reset_failures()
+ return result
+
+
+class ManagedCallback(object):
+ """
+ This wraps instruments' callbacks to ensure that errors do interfer
+ with run execution.
+
+ """
+
+ def __init__(self, instrument, callback):
+ self.instrument = instrument
+ self.callback = callback
+
+ def __call__(self, context):
+ if self.instrument.is_enabled:
+ try:
+ self.callback(context)
+ except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
+ raise
+ except Exception as e: # pylint: disable=W0703
+ logger.error('Error in insturment {}'.format(self.instrument.name))
+ global failures_detected # pylint: disable=W0603
+ failures_detected = True
+ if isinstance(e, WAError):
+ logger.error(e)
+ else:
+ tb = get_traceback()
+ logger.error(tb)
+ logger.error('{}({})'.format(e.__class__.__name__, e))
+ if not context.current_iteration:
+ # Error occureed outside of an iteration (most likely
+ # during intial setup or teardown). Since this would affect
+ # the rest of the run, mark the instument as broken so that
+ # it doesn't get re-enabled for subsequent iterations.
+ self.instrument.is_broken = True
+ disable(self.instrument)
+
+
+# Need this to keep track of callbacks, because the dispatcher only keeps
+# weak references, so if the callbacks aren't referenced elsewhere, they will
+# be deallocated before they've had a chance to be invoked.
+_callbacks = []
+
+
+def install(instrument):
+ """
+ This will look for methods (or any callable members) with specific names
+ in the instrument and hook them up to the corresponding signals.
+
+ :param instrument: Instrument instance to install.
+
+ """
+ logger.debug('Installing instrument %s.', instrument)
+ if is_installed(instrument):
+ raise ValueError('Instrument {} is already installed.'.format(instrument.name))
+ for attr_name in dir(instrument):
+ priority = 0
+ stripped_attr_name = attr_name
+ for key, value in PRIORITY_MAP.iteritems():
+ if attr_name.startswith(key):
+ stripped_attr_name = attr_name[len(key):]
+ priority = value
+ break
+ if stripped_attr_name in SIGNAL_MAP:
+ attr = getattr(instrument, attr_name)
+ if not callable(attr):
+ raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
+ arg_num = len(inspect.getargspec(attr).args)
+ if not arg_num == 2:
+ raise ValueError('{} must take exactly 2 arguments; {} given.'.format(attr_name, arg_num))
+
+ logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
+ mc = ManagedCallback(instrument, attr)
+ _callbacks.append(mc)
+ signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
+ installed.append(instrument)
+
+
+def uninstall(instrument):
+ instrument = get_instrument(instrument)
+ installed.remove(instrument)
+
+
+def validate():
+ for instrument in installed:
+ instrument.validate()
+
+
+def get_instrument(inst):
+ if isinstance(inst, Instrument):
+ return inst
+ for installed_inst in installed:
+ if installed_inst.name == inst:
+ return installed_inst
+ raise ValueError('Instrument {} is not installed'.format(inst))
+
+
+def disable_all():
+ for instrument in installed:
+ _disable_instrument(instrument)
+
+
+def enable_all():
+ for instrument in installed:
+ _enable_instrument(instrument)
+
+
+def enable(to_enable):
+ if isiterable(to_enable):
+ for inst in to_enable:
+ _enable_instrument(inst)
+ else:
+ _enable_instrument(to_enable)
+
+
+def disable(to_disable):
+ if isiterable(to_disable):
+ for inst in to_disable:
+ _disable_instrument(inst)
+ else:
+ _disable_instrument(to_disable)
+
+
+def _enable_instrument(inst):
+ inst = get_instrument(inst)
+ if not inst.is_broken:
+ logger.debug('Enabling instrument {}'.format(inst.name))
+ inst.is_enabled = True
+ else:
+ logger.debug('Not enabling broken instrument {}'.format(inst.name))
+
+
+def _disable_instrument(inst):
+ inst = get_instrument(inst)
+ if inst.is_enabled:
+ logger.debug('Disabling instrument {}'.format(inst.name))
+ inst.is_enabled = False
+
+
+def get_enabled():
+ return [i for i in installed if i.is_enabled]
+
+
+def get_disabled():
+ return [i for i in installed if not i.is_enabled]
+
+
+class Instrument(Extension):
+ """
+ Base class for instrumentation implementations.
+ """
+
+ def __init__(self, device, **kwargs):
+ super(Instrument, self).__init__(**kwargs)
+ self.device = device
+ self.is_enabled = True
+ self.is_broken = False
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return 'Instrument({})'.format(self.name)
+
diff --git a/wlauto/core/resolver.py b/wlauto/core/resolver.py
new file mode 100644
index 00000000..4a0d0295
--- /dev/null
+++ b/wlauto/core/resolver.py
@@ -0,0 +1,109 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Defines infrastructure for resource resolution. This is used to find
+various dependencies/assets/etc that WA objects rely on in a flexible way.
+
+"""
+import logging
+from collections import defaultdict
+
+# Note: this is the modified louie library in wlauto/external.
+# prioritylist does not exist in vanilla louie.
+from louie.prioritylist import PriorityList # pylint: disable=E0611,F0401
+
+from wlauto.exceptions import ResourceError
+
+
+class ResourceResolver(object):
+ """
+ Discovers and registers getters, and then handles requests for
+ resources using registered getters.
+
+ """
+
+ def __init__(self, config):
+ self.logger = logging.getLogger(self.__class__.__name__)
+ self.getters = defaultdict(PriorityList)
+ self.config = config
+
+ def load(self):
+ """
+ Discover getters under the specified source. The source could
+ be either a python package/module or a path.
+
+ """
+ for rescls in self.config.ext_loader.list_resource_getters():
+ getter = self.config.get_extension(rescls.name, self)
+ getter.register()
+
+ def get(self, resource, strict=True, *args, **kwargs):
+ """
+ Uses registered getters to attempt to discover a resource of the specified
+ kind and matching the specified criteria. Returns path to the resource that
+ has been discovered. If a resource has not been discovered, this will raise
+ a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return
+ ``None``.
+
+ """
+ self.logger.debug('Resolving {}'.format(resource))
+ for getter in self.getters[resource.name]:
+ self.logger.debug('Trying {}'.format(getter))
+ result = getter.get(resource, *args, **kwargs)
+ if result is not None:
+ self.logger.debug('Resource {} found using {}'.format(resource, getter))
+ return result
+ if strict:
+ raise ResourceError('{} could not be found'.format(resource))
+ self.logger.debug('Resource {} not found.'.format(resource))
+ return None
+
+ def register(self, getter, kind, priority=0):
+ """
+ Register the specified resource getter as being able to discover a resource
+ of the specified kind with the specified priority.
+
+ This method would typically be invoked by a getter inside its __init__.
+ The idea being that getters register themselves for resources they know
+ they can discover.
+
+ *priorities*
+
+ getters that are registered with the highest priority will be invoked first. If
+ multiple getters are registered under the same priority, they will be invoked
+ in the order they were registered (i.e. in the order they were discovered). This is
+ essentially non-deterministic.
+
+ Generally getters that are more likely to find a resource, or would find a
+ "better" version of the resource should register with higher (positive) priorities.
+ Fall-back getters that should only be invoked if a resource is not found by usual
+ means should register with lower (negative) priorities.
+
+ """
+ self.logger.debug('Registering {}'.format(getter.name))
+ self.getters[kind].add(getter, priority)
+
+ def unregister(self, getter, kind):
+ """
+ Unregister a getter that has been registered earlier.
+
+ """
+ self.logger.debug('Unregistering {}'.format(getter.name))
+ try:
+ self.getters[kind].remove(getter)
+ except ValueError:
+ raise ValueError('Resource getter {} is not installed.'.format(getter.name))
diff --git a/wlauto/core/resource.py b/wlauto/core/resource.py
new file mode 100644
index 00000000..661fefec
--- /dev/null
+++ b/wlauto/core/resource.py
@@ -0,0 +1,182 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from wlauto.core.extension import Extension
+
+
+class GetterPriority(object):
+ """
+ Enumerates standard ResourceGetter priorities. In general, getters should register
+ under one of these, rather than specifying other priority values.
+
+
+ :cached: The cached version of the resource. Look here first. This priority also implies
+ that the resource at this location is a "cache" and is not the only version of the
+ resource, so it may be cleared without losing access to the resource.
+ :preferred: Take this resource in favour of the environment resource.
+ :environment: Found somewhere under ~/.workload_automation/ or equivalent, or
+ from environment variables, external configuration files, etc.
+ These will override resource supplied with the package.
+ :external_package: Resource provided by another package.
+ :package: Resource provided with the package.
+ :remote: Resource will be downloaded from a remote location (such as an HTTP server
+ or a samba share). Try this only if no other getter was successful.
+
+ """
+ cached = 20
+ preferred = 10
+ environment = 0
+ external_package = -5
+ package = -10
+ remote = -20
+
+
+class Resource(object):
+ """
+ Represents a resource that needs to be resolved. This can be pretty much
+ anything: a file, environment variable, a Python object, etc. The only thing
+ a resource *has* to have is an owner (which would normally be the
+ Workload/Instrument/Device/etc object that needs the resource). In addition,
+ a resource have any number of attributes to identify, but all of them are resource
+ type specific.
+
+ """
+
+ name = None
+
+ def __init__(self, owner):
+ self.owner = owner
+
+ def delete(self, instance):
+ """
+ Delete an instance of this resource type. This must be implemented by the concrete
+ subclasses based on what the resource looks like, e.g. deleting a file or a directory
+ tree, or removing an entry from a database.
+
+ :note: Implementation should *not* contain any logic for deciding whether or not
+ a resource should be deleted, only the actual deletion. The assumption is
+ that if this method is invoked, then the decision has already been made.
+
+ """
+ raise NotImplementedError()
+
+ def __str__(self):
+ return '<{}\'s {}>'.format(self.owner, self.name)
+
+
+class ResourceGetter(Extension):
+ """
+ Base class for implementing resolvers. Defines resolver interface. Resolvers are
+ responsible for discovering resources (such as particular kinds of files) they know
+ about based on the parameters that are passed to them. Each resolver also has a dict of
+ attributes that describe it's operation, and may be used to determine which get invoked.
+ There is no pre-defined set of attributes and resolvers may define their own.
+
+ Class attributes:
+
+ :name: Name that uniquely identifies this getter. Must be set by any concrete subclass.
+ :resource_type: Identifies resource type(s) that this getter can handle. This must
+ be either a string (for a single type) or a list of strings for
+ multiple resource types. This must be set by any concrete subclass.
+ :priority: Priority with which this getter will be invoked. This should be one of
+ the standard priorities specified in ``GetterPriority`` enumeration. If not
+ set, this will default to ``GetterPriority.environment``.
+
+ """
+
+ name = None
+ resource_type = None
+ priority = GetterPriority.environment
+
+ def __init__(self, resolver, **kwargs):
+ super(ResourceGetter, self).__init__(**kwargs)
+ self.resolver = resolver
+
+ def register(self):
+ """
+ Registers with a resource resolver. Concrete implementations must override this
+ to invoke ``self.resolver.register()`` method to register ``self`` for specific
+ resource types.
+
+ """
+ if self.resource_type is None:
+ raise ValueError('No resource type specified for {}'.format(self.name))
+ elif isinstance(self.resource_type, list):
+ for rt in self.resource_type:
+ self.resolver.register(self, rt, self.priority)
+ else:
+ self.resolver.register(self, self.resource_type, self.priority)
+
+ def unregister(self):
+ """Unregister from a resource resolver."""
+ if self.resource_type is None:
+ raise ValueError('No resource type specified for {}'.format(self.name))
+ elif isinstance(self.resource_type, list):
+ for rt in self.resource_type:
+ self.resolver.unregister(self, rt)
+ else:
+ self.resolver.unregister(self, self.resource_type)
+
+ def get(self, resource, **kwargs):
+ """
+ This will get invoked by the resolver when attempting to resolve a resource, passing
+ in the resource to be resolved as the first parameter. Any additional parameters would
+ be specific to a particular resource type.
+
+ This method will only be invoked for resource types that the getter has registered for.
+
+ :param resource: an instance of :class:`wlauto.core.resource.Resource`.
+
+ :returns: Implementations of this method must return either the discovered resource or
+ ``None`` if the resource could not be discovered.
+
+ """
+ raise NotImplementedError()
+
+ def delete(self, resource, *args, **kwargs):
+ """
+ Delete the resource if it is discovered. All arguments are passed to a call
+ to``self.get()``. If that call returns a resource, it is deleted.
+
+ :returns: ``True`` if the specified resource has been discovered and deleted,
+ and ``False`` otherwise.
+
+ """
+ discovered = self.get(resource, *args, **kwargs)
+ if discovered:
+ resource.delete(discovered)
+ return True
+ else:
+ return False
+
+ def __str__(self):
+ return '<ResourceGetter {}>'.format(self.name)
+
+
+class __NullOwner(object):
+ """Represents an owner for a resource not owned by anyone."""
+
+ name = 'noone'
+
+ def __getattr__(self, name):
+ return None
+
+ def __str__(self):
+ return 'no-one'
+
+ __repr__ = __str__
+
+
+NO_ONE = __NullOwner()
diff --git a/wlauto/core/result.py b/wlauto/core/result.py
new file mode 100644
index 00000000..900cbeb9
--- /dev/null
+++ b/wlauto/core/result.py
@@ -0,0 +1,321 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=no-member
+
+"""
+This module defines the classes used to handle result
+processing inside Workload Automation. There will be a
+:class:`wlauto.core.workload.WorkloadResult` object generated for
+every workload iteration executed. This object will have a list of
+:class:`wlauto.core.workload.WorkloadMetric` objects. This list will be
+populated by the workload itself and may also be updated by instrumentation
+(e.g. to add power measurements). Once the result object has been fully
+populated, it will be passed into the ``process_iteration_result`` method of
+:class:`ResultProcessor`. Once the entire run has completed, a list containing
+result objects from all iterations will be passed into ``process_results``
+method of :class`ResultProcessor`.
+
+Which result processors will be active is defined by the ``result_processors``
+list in the ``~/.workload_automation/config.py``. Only the result_processors
+who's names appear in this list will be used.
+
+A :class:`ResultsManager` keeps track of active results processors.
+
+"""
+import logging
+import traceback
+from copy import copy
+from contextlib import contextmanager
+from datetime import datetime
+
+from wlauto.core.extension import Extension
+from wlauto.exceptions import WAError
+from wlauto.utils.types import numeric
+from wlauto.utils.misc import enum_metaclass
+
+
+class ResultManager(object):
+ """
+ Keeps track of result processors and passes on the results onto the individual processors.
+
+ """
+
+ def __init__(self):
+ self.logger = logging.getLogger('ResultsManager')
+ self.processors = []
+ self._bad = []
+
+ def install(self, processor):
+ self.logger.debug('Installing results processor %s', processor.name)
+ self.processors.append(processor)
+
+ def uninstall(self, processor):
+ if processor in self.processors:
+ self.logger.debug('Uninstalling results processor %s', processor.name)
+ self.processors.remove(processor)
+ else:
+ self.logger.warning('Attempting to uninstall results processor %s, which is not installed.',
+ processor.name)
+
+ def initialize(self, context):
+ # Errors aren't handled at this stage, because this gets executed
+ # before workload execution starts and we just want to propagte them
+ # and terminate (so that error can be corrected and WA restarted).
+ for processor in self.processors:
+ processor.initialize(context)
+
+ def add_result(self, result, context):
+ with self._manage_processors(context):
+ for processor in self.processors:
+ with self._handle_errors(processor):
+ processor.process_iteration_result(result, context)
+ for processor in self.processors:
+ with self._handle_errors(processor):
+ processor.export_iteration_result(result, context)
+
+ def process_run_result(self, result, context):
+ with self._manage_processors(context):
+ for processor in self.processors:
+ with self._handle_errors(processor):
+ processor.process_run_result(result, context)
+ for processor in self.processors:
+ with self._handle_errors(processor):
+ processor.export_run_result(result, context)
+
+ def finalize(self, context):
+ with self._manage_processors(context):
+ for processor in self.processors:
+ with self._handle_errors(processor):
+ processor.finalize(context)
+
+ def validate(self):
+ for processor in self.processors:
+ processor.validate()
+
+ @contextmanager
+ def _manage_processors(self, context, finalize_bad=True):
+ yield
+ for processor in self._bad:
+ if finalize_bad:
+ processor.finalize(context)
+ self.uninstall(processor)
+ self._bad = []
+
+ @contextmanager
+ def _handle_errors(self, processor):
+ try:
+ yield
+ except KeyboardInterrupt, e:
+ raise e
+ except WAError, we:
+ self.logger.error('"{}" result processor has encountered an error'.format(processor.name))
+ self.logger.error('{}("{}")'.format(we.__class__.__name__, we.message))
+ self._bad.append(processor)
+ except Exception, e: # pylint: disable=W0703
+ self.logger.error('"{}" result processor has encountered an error'.format(processor.name))
+ self.logger.error('{}("{}")'.format(e.__class__.__name__, e))
+ self.logger.error(traceback.format_exc())
+ self._bad.append(processor)
+
+
+class ResultProcessor(Extension):
+ """
+ Base class for result processors. Defines an interface that should be implemented
+ by the subclasses. A result processor can be used to do any kind of post-processing
+ of the results, from writing them out to a file, to uploading them to a database,
+ performing calculations, generating plots, etc.
+
+ """
+
+ def initialize(self, context):
+ pass
+
+ def process_iteration_result(self, result, context):
+ pass
+
+ def export_iteration_result(self, result, context):
+ pass
+
+ def process_run_result(self, result, context):
+ pass
+
+ def export_run_result(self, result, context):
+ pass
+
+ def finalize(self, context):
+ pass
+
+
+class RunResult(object):
+ """
+ Contains overall results for a run.
+
+ """
+
+ __metaclass__ = enum_metaclass('values', return_name=True)
+
+ values = [
+ 'OK',
+ 'OKISH',
+ 'PARTIAL',
+ 'FAILED',
+ 'UNKNOWN',
+ ]
+
+ @property
+ def status(self):
+ if not self.iteration_results or all([s.status == IterationResult.FAILED for s in self.iteration_results]):
+ return self.FAILED
+ elif any([s.status == IterationResult.FAILED for s in self.iteration_results]):
+ return self.PARTIAL
+ elif any([s.status == IterationResult.ABORTED for s in self.iteration_results]):
+ return self.PARTIAL
+ elif (any([s.status == IterationResult.PARTIAL for s in self.iteration_results]) or
+ self.non_iteration_errors):
+ return self.OKISH
+ elif all([s.status == IterationResult.OK for s in self.iteration_results]):
+ return self.OK
+ else:
+ return self.UNKNOWN # should never happen
+
+ def __init__(self, run_info):
+ self.info = run_info
+ self.iteration_results = []
+ self.artifacts = []
+ self.events = []
+ self.non_iteration_errors = False
+
+
+class RunEvent(object):
+ """
+ An event that occured during a run.
+
+ """
+ def __init__(self, message):
+ self.timestamp = datetime.utcnow()
+ self.message = message
+
+ def to_dict(self):
+ return copy(self.__dict__)
+
+ def __str__(self):
+ return '{} {}'.format(self.timestamp, self.message)
+
+ __repr__ = __str__
+
+
+class IterationResult(object):
+ """
+ Contains the result of running a single iteration of a workload. It is the
+ responsibility of a workload to instantiate a IterationResult, populate it,
+ and return it form its get_result() method.
+
+ Status explanations:
+
+ :NOT_STARTED: This iteration has not yet started.
+ :RUNNING: This iteration is currently running and no errors have been detected.
+ :OK: This iteration has completed and no errors have been detected
+ :PARTIAL: One or more instruments have failed (the iteration may still be running).
+ :FAILED: The workload itself has failed.
+ :ABORTED: The user interupted the workload
+ :SKIPPED: The iteration was skipped due to a previous failure
+
+ """
+
+ __metaclass__ = enum_metaclass('values', return_name=True)
+
+ values = [
+ 'NOT_STARTED',
+ 'RUNNING',
+
+ 'OK',
+ 'NONCRITICAL',
+ 'PARTIAL',
+ 'FAILED',
+ 'ABORTED',
+ 'SKIPPED',
+ ]
+
+ def __init__(self, spec):
+ self.spec = spec
+ self.id = spec.id
+ self.workload = spec.workload
+ self.iteration = None
+ self.status = self.NOT_STARTED
+ self.events = []
+ self.metrics = []
+ self.artifacts = []
+
+ def add_metric(self, name, value, units=None, lower_is_better=False):
+ self.metrics.append(Metric(name, value, units, lower_is_better))
+
+ def has_metric(self, name):
+ for metric in self.metrics:
+ if metric.name == name:
+ return True
+ return False
+
+ def add_event(self, message):
+ self.events.append(RunEvent(message))
+
+ def to_dict(self):
+ d = copy(self.__dict__)
+ d['events'] = [e.to_dict() for e in self.events]
+ return d
+
+ def __iter__(self):
+ return iter(self.metrics)
+
+ def __getitem__(self, name):
+ for metric in self.metrics:
+ if metric.name == name:
+ return metric
+ raise KeyError('Metric {} not found.'.format(name))
+
+
+class Metric(object):
+ """
+ This is a single metric collected from executing a workload.
+
+ :param name: the name of the metric. Uniquely identifies the metric
+ within the results.
+ :param value: The numerical value of the metric for this execution of
+ a workload. This can be either an int or a float.
+ :param units: Units for the collected value. Can be None if the value
+ has no units (e.g. it's a count or a standardised score).
+ :param lower_is_better: Boolean flag indicating where lower values are
+ better than higher ones. Defaults to False.
+
+ """
+
+ def __init__(self, name, value, units=None, lower_is_better=False):
+ self.name = name
+ self.value = numeric(value)
+ self.units = units
+ self.lower_is_better = lower_is_better
+
+ def to_dict(self):
+ return self.__dict__
+
+ def __str__(self):
+ result = '{}: {}'.format(self.name, self.value)
+ if self.units:
+ result += ' ' + self.units
+ result += ' ({})'.format('-' if self.lower_is_better else '+')
+ return '<{}>'.format(result)
+
+ __repr__ = __str__
+
diff --git a/wlauto/core/signal.py b/wlauto/core/signal.py
new file mode 100644
index 00000000..012bf0fd
--- /dev/null
+++ b/wlauto/core/signal.py
@@ -0,0 +1,189 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+This module wraps louie signalling mechanism. It relies on modified version of loiue
+that has prioritization added to handler invocation.
+
+"""
+from louie import dispatcher # pylint: disable=F0401
+
+
+class Signal(object):
+ """
+ This class implements the signals to be used for notifiying callbacks
+ registered to respond to different states and stages of the execution of workload
+ automation.
+
+ """
+
+ def __init__(self, name, invert_priority=False):
+ """
+ Instantiates a Signal.
+
+ :param name: name is the identifier of the Signal object. Signal instances with
+ the same name refer to the same execution stage/stage.
+ :param invert_priority: boolean parameter that determines whether multiple
+ callbacks for the same signal should be ordered with
+ ascending or descending priorities. Typically this flag
+ should be set to True if the Signal is triggered AFTER an
+ a state/stage has been reached. That way callbacks with high
+ priorities will be called right after the event has occured.
+ """
+ self.name = name
+ self.invert_priority = invert_priority
+
+ def __str__(self):
+ return self.name
+
+ __repr__ = __str__
+
+ def __hash__(self):
+ return id(self.name)
+
+
+# These are paired events -- if the before_event is sent, the after_ signal is
+# guaranteed to also be sent. In particular, the after_ signals will be sent
+# even if there is an error, so you cannot assume in the handler that the
+# device has booted successfully. In most cases, you should instead use the
+# non-paired signals below.
+BEFORE_FLASHING = Signal('before-flashing-signal', invert_priority=True)
+SUCCESSFUL_FLASHING = Signal('successful-flashing-signal')
+AFTER_FLASHING = Signal('after-flashing-signal')
+
+BEFORE_BOOT = Signal('before-boot-signal', invert_priority=True)
+SUCCESSFUL_BOOT = Signal('successful-boot-signal')
+AFTER_BOOT = Signal('after-boot-signal')
+
+BEFORE_INITIAL_BOOT = Signal('before-initial-boot-signal', invert_priority=True)
+SUCCESSFUL_INITIAL_BOOT = Signal('successful-initial-boot-signal')
+AFTER_INITIAL_BOOT = Signal('after-initial-boot-signal')
+
+BEFORE_FIRST_ITERATION_BOOT = Signal('before-first-iteration-boot-signal', invert_priority=True)
+SUCCESSFUL_FIRST_ITERATION_BOOT = Signal('successful-first-iteration-boot-signal')
+AFTER_FIRST_ITERATION_BOOT = Signal('after-first-iteration-boot-signal')
+
+BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup-signal', invert_priority=True)
+SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup-signal')
+AFTER_WORKLOAD_SETUP = Signal('after-workload-setup-signal')
+
+BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution-signal', invert_priority=True)
+SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution-signal')
+AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution-signal')
+
+BEFORE_WORKLOAD_RESULT_UPDATE = Signal('before-iteration-result-update-signal', invert_priority=True)
+SUCCESSFUL_WORKLOAD_RESULT_UPDATE = Signal('successful-iteration-result-update-signal')
+AFTER_WORKLOAD_RESULT_UPDATE = Signal('after-iteration-result-update-signal')
+
+BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown-signal', invert_priority=True)
+SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown-signal')
+AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown-signal')
+
+BEFORE_OVERALL_RESULTS_PROCESSING = Signal('before-overall-results-process-signal', invert_priority=True)
+SUCCESSFUL_OVERALL_RESULTS_PROCESSING = Signal('successful-overall-results-process-signal')
+AFTER_OVERALL_RESULTS_PROCESSING = Signal('after-overall-results-process-signal')
+
+# These are the not-paired signals; they are emitted independently. E.g. the
+# fact that RUN_START was emitted does not mean run end will be.
+RUN_START = Signal('start-signal', invert_priority=True)
+RUN_END = Signal('end-signal')
+WORKLOAD_SPEC_START = Signal('workload-spec-start-signal', invert_priority=True)
+WORKLOAD_SPEC_END = Signal('workload-spec-end-signal')
+ITERATION_START = Signal('iteration-start-signal', invert_priority=True)
+ITERATION_END = Signal('iteration-end-signal')
+
+RUN_INIT = Signal('run-init-signal')
+SPEC_INIT = Signal('spec-init-signal')
+ITERATION_INIT = Signal('iteration-init-signal')
+
+RUN_FIN = Signal('run-fin-signal')
+
+# These signals are used by the LoggerFilter to tell about logging events
+ERROR_LOGGED = Signal('error_logged')
+WARNING_LOGGED = Signal('warning_logged')
+
+
+def connect(handler, signal, sender=dispatcher.Any, priority=0):
+ """
+ Connects a callback to a signal, so that the callback will be automatically invoked
+ when that signal is sent.
+
+ Parameters:
+
+ :handler: This can be any callable that that takes the right arguments for
+ the signal. For most siginals this means a single argument that
+ will be an ``ExecutionContext`` instance. But please see documentaion
+ for individual signals in the :ref:`signals reference <instrumentation_method_map>`.
+ :signal: The signal to which the hanlder will be subscribed. Please see
+ :ref:`signals reference <instrumentation_method_map>` for the list of standard WA
+ signals.
+
+ .. note:: There is nothing that prevents instrumentation from sending their
+ own signals that are not part of the standard set. However the signal
+ must always be an :class:`wlauto.core.signal.Signal` instance.
+
+ :sender: The handler will be invoked only for the signals emitted by this sender. By
+ default, this is set to :class:`louie.dispatcher.Any`, so the handler will
+ be invoked for signals from any sentder.
+ :priority: An integer (positive or negative) the specifies the priority of the handler.
+ Handlers with higher priority will be called before handlers with lower
+ priority. The call order of handlers with the same priority is not specified.
+ Defaults to 0.
+
+ .. note:: Priorities for some signals are inverted (so highest priority
+ handlers get executed last). Please see :ref:`signals reference <instrumentation_method_map>`
+ for details.
+
+ """
+ if signal.invert_priority:
+ dispatcher.connect(handler, signal, sender, priority=-priority) # pylint: disable=E1123
+ else:
+ dispatcher.connect(handler, signal, sender, priority=priority) # pylint: disable=E1123
+
+
+def disconnect(handler, signal, sender=dispatcher.Any):
+ """
+ Disconnect a previously connected handler form the specified signal, optionally, only
+ for the specified sender.
+
+ Parameters:
+
+ :handler: The callback to be disconnected.
+ :signal: The signal the handler is to be disconnected form. It will
+ be an :class:`wlauto.core.signal.Signal` instance.
+ :sender: If specified, the handler will only be disconnected from the signal
+ sent by this sender.
+
+ """
+ dispatcher.disconnect(handler, signal, sender)
+
+
+def send(signal, sender, *args, **kwargs):
+ """
+ Sends a signal, causing connected handlers to be invoked.
+
+ Paramters:
+
+ :signal: Signal to be sent. This must be an instance of :class:`wlauto.core.signal.Signal`
+ or its subclasses.
+ :sender: The sender of the signal (typically, this would be ``self``). Some handlers may only
+ be subscribed to signals from a particular sender.
+
+ The rest of the parameters will be passed on as aruments to the handler.
+
+ """
+ dispatcher.send(signal, sender, *args, **kwargs)
+
diff --git a/wlauto/core/version.py b/wlauto/core/version.py
new file mode 100644
index 00000000..1ae12231
--- /dev/null
+++ b/wlauto/core/version.py
@@ -0,0 +1,26 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from collections import namedtuple
+
+VersionTuple = namedtuple('Version', ['major', 'minor', 'revision'])
+
+version = VersionTuple(2, 3, 0)
+
+
+def get_wa_version():
+ version_string = '{}.{}.{}'.format(version.major, version.minor, version.revision)
+ return version_string
diff --git a/wlauto/core/workload.py b/wlauto/core/workload.py
new file mode 100644
index 00000000..dad52aaa
--- /dev/null
+++ b/wlauto/core/workload.py
@@ -0,0 +1,94 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+A workload is the unit of execution. It represents a set of activities are are performed
+and measured together, as well as the necessary setup and teardown procedures. A single
+execution of a workload produces one :class:`wlauto.core.result.WorkloadResult` that is populated with zero or more
+:class:`wlauto.core.result.WorkloadMetric`\ s and/or
+:class:`wlauto.core.result.Artifact`\s by the workload and active instrumentation.
+
+"""
+from wlauto.core.extension import Extension
+from wlauto.exceptions import WorkloadError
+
+
+class Workload(Extension):
+ """
+ This is the base class for the workloads executed by the framework.
+ Each of the methods throwing NotImplementedError *must* be implemented
+ by the derived classes.
+
+ """
+
+ supported_devices = []
+ supported_platforms = []
+ summary_metrics = []
+
+ def __init__(self, device, **kwargs):
+ """
+ Creates a new Workload.
+
+ :param device: the Device on which the workload will be executed.
+ """
+ super(Workload, self).__init__(**kwargs)
+ if self.supported_devices and device.name not in self.supported_devices:
+ raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name))
+ if self.supported_platforms and device.platform not in self.supported_platforms:
+ raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.platform))
+ self.device = device
+
+ def init_resources(self, context):
+ """
+ May be optionally overridden by concrete instances in order to discover and initialise
+ necessary resources. This method will be invoked at most once during the execution:
+ before running any workloads, and before invocation of ``validate()``, but after it is
+ clear that this workload will run (i.e. this method will not be invoked for workloads
+ that have been discovered but have not been scheduled run in the agenda).
+
+ """
+ pass
+
+ def setup(self, context):
+ """
+ Perform the setup necessary to run the workload, such as copying the necessry files
+ to the device, configuring the environments, etc.
+
+ This is also the place to perform any on-device checks prior to attempting to execute
+ the workload.
+
+ """
+ pass
+
+ def run(self, context):
+ """Execute the workload. This is the method that performs the actual "work" of the"""
+ pass
+
+ def update_result(self, context):
+ """
+ Update the result within the specified execution context with the metrics
+ form this workload iteration.
+
+ """
+ pass
+
+ def teardown(self, context):
+ """ Perform any final clean up for the Workload. """
+ pass
+
+ def __str__(self):
+ return '<Workload {}>'.format(self.name)
+
diff --git a/wlauto/devices/__init__.py b/wlauto/devices/__init__.py
new file mode 100644
index 00000000..16224d6f
--- /dev/null
+++ b/wlauto/devices/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/devices/android/__init__.py b/wlauto/devices/android/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/devices/android/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/devices/android/generic/__init__.py b/wlauto/devices/android/generic/__init__.py
new file mode 100644
index 00000000..51a43948
--- /dev/null
+++ b/wlauto/devices/android/generic/__init__.py
@@ -0,0 +1,37 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import AndroidDevice, Parameter
+
+
+class GenericDevice(AndroidDevice):
+ name = 'generic_android'
+ description = """
+ Generic Android device. Use this if you do not have a device file for
+ your device.
+
+ This implements the minimum functionality that should be supported by
+ all android devices.
+
+ """
+
+ default_working_directory = '/storage/sdcard0/working'
+ has_gpu = True
+
+ parameters = [
+ Parameter('core_names', default=[], override=True),
+ Parameter('core_clusters', default=[], override=True),
+ ]
diff --git a/wlauto/devices/android/juno/__init__.py b/wlauto/devices/android/juno/__init__.py
new file mode 100644
index 00000000..712c4e1d
--- /dev/null
+++ b/wlauto/devices/android/juno/__init__.py
@@ -0,0 +1,173 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E1101
+import os
+import re
+import time
+
+import pexpect
+
+from wlauto import BigLittleDevice, Parameter
+from wlauto.exceptions import DeviceError
+from wlauto.utils.serial_port import open_serial_connection, pulse_dtr
+from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
+from wlauto.utils.uefi import UefiMenu
+
+
+AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
+
+
+class Juno(BigLittleDevice):
+
+ name = 'juno'
+ description = """
+ ARM Juno next generation big.LITTLE development platform.
+ """
+
+ capabilities = ['reset_power']
+
+ has_gpu = True
+
+ modules = [
+ 'vexpress',
+ ]
+
+ parameters = [
+ Parameter('retries', kind=int, default=2,
+ description="""Specifies the number of times the device will attempt to recover
+ (normally, with a hard reset) if it detects that something went wrong."""),
+
+ # VExpress flasher expects a device to have these:
+ Parameter('uefi_entry', default='WA',
+ description='The name of the entry to use (will be created if does not exist).'),
+ Parameter('microsd_mount_point', default='/media/JUNO',
+ description='Location at which the device\'s MicroSD card will be mounted.'),
+ Parameter('port', default='/dev/ttyS0', description='Serial port on which the device is connected.'),
+ Parameter('baudrate', kind=int, default=115200, description='Serial connection baud.'),
+ Parameter('timeout', kind=int, default=300, description='Serial connection timeout.'),
+ Parameter('core_names', default=['a53', 'a53', 'a53', 'a53', 'a57', 'a57'], override=True),
+ Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1], override=True),
+ ]
+
+ short_delay = 1
+ firmware_prompt = 'Cmd>'
+ # this is only used if there is no UEFI entry and one has to be created.
+ kernel_arguments = 'console=ttyAMA0,115200 earlyprintk=pl011,0x7ff80000 verbose debug init=/init root=/dev/sda1 rw ip=dhcp rootwait'
+
+ def boot(self, **kwargs):
+ self.logger.debug('Resetting the device.')
+ self.reset()
+ with open_serial_connection(port=self.port,
+ baudrate=self.baudrate,
+ timeout=self.timeout,
+ init_dtr=0) as target:
+ menu = UefiMenu(target)
+ self.logger.debug('Waiting for UEFI menu...')
+ menu.open(timeout=120)
+ try:
+ menu.select(self.uefi_entry)
+ except LookupError:
+ self.logger.debug('{} UEFI entry not found.'.format(self.uefi_entry))
+ self.logger.debug('Attempting to create one using default flasher configuration.')
+ self.flasher.image_args = self.kernel_arguments
+ self.flasher.create_uefi_enty(self, menu)
+ menu.select(self.uefi_entry)
+ self.logger.debug('Waiting for the Android prompt.')
+ target.expect(self.android_prompt, timeout=self.timeout)
+
+ def connect(self):
+ if not self._is_ready:
+ if not self.adb_name: # pylint: disable=E0203
+ with open_serial_connection(timeout=self.timeout,
+ port=self.port,
+ baudrate=self.baudrate,
+ init_dtr=0) as target:
+ target.sendline('')
+ self.logger.debug('Waiting for android prompt.')
+ target.expect(self.android_prompt)
+
+ self.logger.debug('Waiting for IP address...')
+ wait_start_time = time.time()
+ while True:
+ target.sendline('ip addr list eth0')
+ time.sleep(1)
+ try:
+ target.expect('inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
+ self.adb_name = target.match.group(1) + ':5555' # pylint: disable=W0201
+ break
+ except pexpect.TIMEOUT:
+ pass # We have our own timeout -- see below.
+ if (time.time() - wait_start_time) > self.ready_timeout:
+ raise DeviceError('Could not acquire IP address.')
+
+ if self.adb_name in adb_list_devices():
+ adb_disconnect(self.adb_name)
+ adb_connect(self.adb_name, timeout=self.timeout)
+ super(Juno, self).connect() # wait for boot to complete etc.
+ self._is_ready = True
+
+ def disconnect(self):
+ if self._is_ready:
+ super(Juno, self).disconnect()
+ adb_disconnect(self.adb_name)
+ self._is_ready = False
+
+ def reset(self):
+ # Currently, reboot is not working in Android on Juno, so
+ # perfrom a ahard reset instead
+ self.hard_reset()
+
+ def get_cpuidle_states(self, cpu=0):
+ return {}
+
+ def hard_reset(self):
+ self.disconnect()
+ self.adb_name = None # Force re-acquire IP address on reboot. pylint: disable=attribute-defined-outside-init
+ with open_serial_connection(port=self.port,
+ baudrate=self.baudrate,
+ timeout=self.timeout,
+ init_dtr=0,
+ get_conn=True) as (target, conn):
+ pulse_dtr(conn, state=True, duration=0.1) # TRM specifies a pulse of >=100ms
+
+ i = target.expect([AUTOSTART_MESSAGE, self.firmware_prompt])
+ if i:
+ self.logger.debug('Saw firmware prompt.')
+ time.sleep(self.short_delay)
+ target.sendline('reboot')
+ else:
+ self.logger.debug('Saw auto boot message.')
+
+ def wait_for_microsd_mount_point(self, target, timeout=100):
+ attempts = 1 + self.retries
+ if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')):
+ return
+
+ self.logger.debug('Waiting for VExpress MicroSD to mount...')
+ for i in xrange(attempts):
+ if i: # Do not reboot on the first attempt.
+ target.sendline('reboot')
+ for _ in xrange(timeout):
+ time.sleep(self.short_delay)
+ if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')):
+ return
+ raise DeviceError('Did not detect MicroSD mount on {}'.format(self.microsd_mount_point))
+
+ def get_android_id(self):
+ # Android ID currenlty not set properly in Juno Android builds.
+ return 'abad1deadeadbeef'
+
diff --git a/wlauto/devices/android/nexus10/__init__.py b/wlauto/devices/android/nexus10/__init__.py
new file mode 100644
index 00000000..ad6f2555
--- /dev/null
+++ b/wlauto/devices/android/nexus10/__init__.py
@@ -0,0 +1,48 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import time
+
+from wlauto import AndroidDevice, Parameter
+
+
+class Nexus10Device(AndroidDevice):
+
+ name = 'Nexus10'
+ description = """
+ Nexus10 is a 10 inch tablet device, which has dual-core A15.
+
+ To be able to use Nexus10 in WA, the following must be true:
+
+ - USB Debugging Mode is enabled.
+ - Generate USB debugging authorisation for the host machine
+
+ """
+
+ default_working_directory = '/sdcard/working'
+ has_gpu = True
+ max_cores = 2
+
+ parameters = [
+ Parameter('core_names', default=['A15', 'A15'], override=True),
+ Parameter('core_clusters', default=[0, 0], override=True),
+ ]
+
+ def init(self, context, *args, **kwargs):
+ time.sleep(self.long_delay)
+ self.execute('svc power stayon true', check_exit_code=False)
+ time.sleep(self.long_delay)
+ self.execute('input keyevent 82')
diff --git a/wlauto/devices/android/nexus5/__init__.py b/wlauto/devices/android/nexus5/__init__.py
new file mode 100644
index 00000000..cd2f09db
--- /dev/null
+++ b/wlauto/devices/android/nexus5/__init__.py
@@ -0,0 +1,40 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import AndroidDevice, Parameter
+
+
+class Nexus5Device(AndroidDevice):
+
+ name = 'Nexus5'
+ description = """
+ Adapter for Nexus 5.
+
+ To be able to use Nexus5 in WA, the following must be true:
+
+ - USB Debugging Mode is enabled.
+ - Generate USB debugging authorisation for the host machine
+
+ """
+
+ default_working_directory = '/storage/sdcard0/working'
+ has_gpu = True
+ max_cores = 4
+
+ parameters = [
+ Parameter('core_names', default=['krait400', 'krait400', 'krait400', 'krait400'], override=True),
+ Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
+ ]
diff --git a/wlauto/devices/android/note3/__init__.py b/wlauto/devices/android/note3/__init__.py
new file mode 100644
index 00000000..9c8f42ae
--- /dev/null
+++ b/wlauto/devices/android/note3/__init__.py
@@ -0,0 +1,76 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import time
+
+from wlauto import AndroidDevice, Parameter
+from wlauto.exceptions import TimeoutError
+from wlauto.utils.android import adb_shell
+
+
+class Note3Device(AndroidDevice):
+
+ name = 'Note3'
+ description = """
+ Adapter for Galaxy Note 3.
+
+ To be able to use Note3 in WA, the following must be true:
+
+ - USB Debugging Mode is enabled.
+ - Generate USB debugging authorisation for the host machine
+
+ """
+
+ parameters = [
+ Parameter('core_names', default=['A15', 'A15', 'A15', 'A15'], override=True),
+ Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
+ Parameter('working_directory', default='/storage/sdcard0/wa-working', override=True),
+ ]
+
+ def __init__(self, **kwargs):
+ super(Note3Device, self).__init__(**kwargs)
+ self._just_rebooted = False
+
+ def init(self, context):
+ self.execute('svc power stayon true', check_exit_code=False)
+
+ def reset(self):
+ super(Note3Device, self).reset()
+ self._just_rebooted = True
+
+ def hard_reset(self):
+ super(Note3Device, self).hard_reset()
+ self._just_rebooted = True
+
+ def connect(self): # NOQA pylint: disable=R0912
+ super(Note3Device, self).connect()
+ if self._just_rebooted:
+ self.logger.debug('Waiting for boot to complete...')
+ # On the Note 3, adb connection gets reset some time after booting.
+ # This causes errors during execution. To prevent this, open a shell
+ # session and wait for it to be killed. Once its killed, give adb
+ # enough time to restart, and then the device should be ready.
+ try:
+ adb_shell(self.adb_name, '', timeout=20) # pylint: disable=no-member
+ time.sleep(5) # give adb time to re-initialize
+ except TimeoutError:
+ pass # timed out waiting for the session to be killed -- assume not going to be.
+
+ self.logger.debug('Boot completed.')
+ self._just_rebooted = False
+ # Swipe upwards to unlock the screen.
+ time.sleep(self.long_delay)
+ self.execute('input touchscreen swipe 540 1600 560 800 ')
diff --git a/wlauto/devices/android/odroidxu3/__init__.py b/wlauto/devices/android/odroidxu3/__init__.py
new file mode 100644
index 00000000..60f780b7
--- /dev/null
+++ b/wlauto/devices/android/odroidxu3/__init__.py
@@ -0,0 +1,38 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import AndroidDevice, Parameter
+
+
+class OdroidXU3(AndroidDevice):
+
+ name = "odroidxu3"
+ description = 'HardKernel Odroid XU3 development board.'
+
+ core_modules = [
+ 'odroidxu3-fan',
+ ]
+
+ parameters = [
+ Parameter('adb_name', default='BABABEEFBABABEEF', override=True),
+ Parameter('working_directory', default='/data/local/wa-working', override=True),
+ Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
+ Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
+ Parameter('port', default='/dev/ttyUSB0', kind=str,
+ description='Serial port on which the device is connected'),
+ Parameter('baudrate', default=115200, kind=int, description='Serial connection baud rate'),
+ ]
+
diff --git a/wlauto/devices/android/tc2/__init__.py b/wlauto/devices/android/tc2/__init__.py
new file mode 100644
index 00000000..9d3f92b9
--- /dev/null
+++ b/wlauto/devices/android/tc2/__init__.py
@@ -0,0 +1,847 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import sys
+import re
+import string
+import shutil
+import time
+from collections import Counter
+
+import pexpect
+
+from wlauto import BigLittleDevice, RuntimeParameter, Parameter, settings
+from wlauto.exceptions import ConfigError, DeviceError
+from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
+from wlauto.utils.serial_port import open_serial_connection
+from wlauto.utils.misc import merge_dicts
+from wlauto.utils.types import boolean
+
+
+BOOT_FIRMWARE = {
+ 'uefi': {
+ 'SCC_0x010': '0x000003E0',
+ 'reboot_attempts': 0,
+ },
+ 'bootmon': {
+ 'SCC_0x010': '0x000003D0',
+ 'reboot_attempts': 2,
+ },
+}
+
+MODES = {
+ 'mp_a7_only': {
+ 'images_file': 'images_mp.txt',
+ 'dtb': 'mp_a7',
+ 'initrd': 'init_mp',
+ 'kernel': 'kern_mp',
+ 'SCC_0x700': '0x1032F003',
+ 'cpus': ['a7', 'a7', 'a7'],
+ },
+ 'mp_a7_bootcluster': {
+ 'images_file': 'images_mp.txt',
+ 'dtb': 'mp_a7bc',
+ 'initrd': 'init_mp',
+ 'kernel': 'kern_mp',
+ 'SCC_0x700': '0x1032F003',
+ 'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
+ },
+ 'mp_a15_only': {
+ 'images_file': 'images_mp.txt',
+ 'dtb': 'mp_a15',
+ 'initrd': 'init_mp',
+ 'kernel': 'kern_mp',
+ 'SCC_0x700': '0x0032F003',
+ 'cpus': ['a15', 'a15'],
+ },
+ 'mp_a15_bootcluster': {
+ 'images_file': 'images_mp.txt',
+ 'dtb': 'mp_a15bc',
+ 'initrd': 'init_mp',
+ 'kernel': 'kern_mp',
+ 'SCC_0x700': '0x0032F003',
+ 'cpus': ['a15', 'a15', 'a7', 'a7', 'a7'],
+ },
+ 'iks_cpu': {
+ 'images_file': 'images_iks.txt',
+ 'dtb': 'iks',
+ 'initrd': 'init_iks',
+ 'kernel': 'kern_iks',
+ 'SCC_0x700': '0x1032F003',
+ 'cpus': ['a7', 'a7'],
+ },
+ 'iks_a15': {
+ 'images_file': 'images_iks.txt',
+ 'dtb': 'iks',
+ 'initrd': 'init_iks',
+ 'kernel': 'kern_iks',
+ 'SCC_0x700': '0x0032F003',
+ 'cpus': ['a15', 'a15'],
+ },
+ 'iks_a7': {
+ 'images_file': 'images_iks.txt',
+ 'dtb': 'iks',
+ 'initrd': 'init_iks',
+ 'kernel': 'kern_iks',
+ 'SCC_0x700': '0x0032F003',
+ 'cpus': ['a7', 'a7'],
+ },
+ 'iks_ns_a15': {
+ 'images_file': 'images_iks.txt',
+ 'dtb': 'iks',
+ 'initrd': 'init_iks',
+ 'kernel': 'kern_iks',
+ 'SCC_0x700': '0x0032F003',
+ 'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
+ },
+ 'iks_ns_a7': {
+ 'images_file': 'images_iks.txt',
+ 'dtb': 'iks',
+ 'initrd': 'init_iks',
+ 'kernel': 'kern_iks',
+ 'SCC_0x700': '0x0032F003',
+ 'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
+ },
+}
+
+A7_ONLY_MODES = ['mp_a7_only', 'iks_a7', 'iks_cpu']
+A15_ONLY_MODES = ['mp_a15_only', 'iks_a15']
+
+DEFAULT_A7_GOVERNOR_TUNABLES = {
+ 'interactive': {
+ 'above_hispeed_delay': 80000,
+ 'go_hispeed_load': 85,
+ 'hispeed_freq': 800000,
+ 'min_sample_time': 80000,
+ 'timer_rate': 20000,
+ },
+ 'ondemand': {
+ 'sampling_rate': 50000,
+ },
+}
+
+DEFAULT_A15_GOVERNOR_TUNABLES = {
+ 'interactive': {
+ 'above_hispeed_delay': 80000,
+ 'go_hispeed_load': 85,
+ 'hispeed_freq': 1000000,
+ 'min_sample_time': 80000,
+ 'timer_rate': 20000,
+ },
+ 'ondemand': {
+ 'sampling_rate': 50000,
+ },
+}
+
+ADB_SHELL_TIMEOUT = 30
+
+
+class _TC2DeviceConfig(object):
+
+ name = 'TC2 Configuration'
+ device_name = 'TC2'
+
+ def __init__(self, # pylint: disable=R0914,W0613
+ root_mount='/media/VEMSD',
+
+ disable_boot_configuration=False,
+ boot_firmware=None,
+ mode=None,
+
+ fs_medium='usb',
+
+ device_working_directory='/data/local/usecase',
+
+ bm_image='bm_v519r.axf',
+
+ serial_device='/dev/ttyS0',
+ serial_baud=38400,
+ serial_max_timeout=600,
+ serial_log=sys.stdout,
+
+ init_timeout=120,
+
+ always_delete_uefi_entry=True,
+ psci_enable=True,
+
+ host_working_directory=None,
+
+ a7_governor_tunables=None,
+ a15_governor_tunables=None,
+
+ adb_name=None,
+ # Compatibility with other android devices.
+ enable_screen_check=None, # pylint: disable=W0613
+ **kwargs
+ ):
+ self.root_mount = root_mount
+ self.disable_boot_configuration = disable_boot_configuration
+ if not disable_boot_configuration:
+ self.boot_firmware = boot_firmware or 'uefi'
+ self.default_mode = mode or 'mp_a7_bootcluster'
+ elif boot_firmware or mode:
+ raise ConfigError('boot_firmware and/or mode cannot be specified when disable_boot_configuration is enabled.')
+
+ self.mode = self.default_mode
+ self.working_directory = device_working_directory
+ self.serial_device = serial_device
+ self.serial_baud = serial_baud
+ self.serial_max_timeout = serial_max_timeout
+ self.serial_log = serial_log
+ self.bootmon_prompt = re.compile('^([KLM]:\\\)?>', re.MULTILINE)
+
+ self.fs_medium = fs_medium.lower()
+
+ self.bm_image = bm_image
+
+ self.init_timeout = init_timeout
+
+ self.always_delete_uefi_entry = always_delete_uefi_entry
+ self.psci_enable = psci_enable
+
+ self.resource_dir = os.path.join(os.path.dirname(__file__), 'resources')
+ self.board_dir = os.path.join(self.root_mount, 'SITE1', 'HBI0249A')
+ self.board_file = 'board.txt'
+ self.board_file_bak = 'board.bak'
+ self.images_file = 'images.txt'
+
+ self.host_working_directory = host_working_directory or settings.meta_directory
+
+ if not a7_governor_tunables:
+ self.a7_governor_tunables = DEFAULT_A7_GOVERNOR_TUNABLES
+ else:
+ self.a7_governor_tunables = merge_dicts(DEFAULT_A7_GOVERNOR_TUNABLES, a7_governor_tunables)
+
+ if not a15_governor_tunables:
+ self.a15_governor_tunables = DEFAULT_A15_GOVERNOR_TUNABLES
+ else:
+ self.a15_governor_tunables = merge_dicts(DEFAULT_A15_GOVERNOR_TUNABLES, a15_governor_tunables)
+
+ self.adb_name = adb_name
+
+ @property
+ def src_images_template_file(self):
+ return os.path.join(self.resource_dir, MODES[self.mode]['images_file'])
+
+ @property
+ def src_images_file(self):
+ return os.path.join(self.host_working_directory, 'images.txt')
+
+ @property
+ def src_board_template_file(self):
+ return os.path.join(self.resource_dir, 'board_template.txt')
+
+ @property
+ def src_board_file(self):
+ return os.path.join(self.host_working_directory, 'board.txt')
+
+ @property
+ def kernel_arguments(self):
+ kernel_args = ' console=ttyAMA0,38400 androidboot.console=ttyAMA0 selinux=0'
+ if self.fs_medium == 'usb':
+ kernel_args += ' androidboot.hardware=arm-versatileexpress-usb'
+ if 'iks' in self.mode:
+ kernel_args += ' no_bL_switcher=0'
+ return kernel_args
+
+ @property
+ def kernel(self):
+ return MODES[self.mode]['kernel']
+
+ @property
+ def initrd(self):
+ return MODES[self.mode]['initrd']
+
+ @property
+ def dtb(self):
+ return MODES[self.mode]['dtb']
+
+ @property
+ def SCC_0x700(self):
+ return MODES[self.mode]['SCC_0x700']
+
+ @property
+ def SCC_0x010(self):
+ return BOOT_FIRMWARE[self.boot_firmware]['SCC_0x010']
+
+ @property
+ def reboot_attempts(self):
+ return BOOT_FIRMWARE[self.boot_firmware]['reboot_attempts']
+
+ def validate(self):
+ valid_modes = MODES.keys()
+ if self.mode not in valid_modes:
+ message = 'Invalid mode: {}; must be in {}'.format(
+ self.mode, valid_modes)
+ raise ConfigError(message)
+
+ valid_boot_firmware = BOOT_FIRMWARE.keys()
+ if self.boot_firmware not in valid_boot_firmware:
+ message = 'Invalid boot_firmware: {}; must be in {}'.format(
+ self.boot_firmware,
+ valid_boot_firmware)
+ raise ConfigError(message)
+
+ if self.fs_medium not in ['usb', 'sdcard']:
+ message = 'Invalid filesystem medium: {} allowed values : usb, sdcard '.format(self.fs_medium)
+ raise ConfigError(message)
+
+
+class TC2Device(BigLittleDevice):
+
+ name = 'TC2'
+ description = """
+ TC2 is a development board, which has three A7 cores and two A15 cores.
+
+ TC2 has a number of boot parameters which are:
+
+ :root_mount: Defaults to '/media/VEMSD'
+ :boot_firmware: It has only two boot firmware options, which are
+ uefi and bootmon. Defaults to 'uefi'.
+ :fs_medium: Defaults to 'usb'.
+ :device_working_directory: The direcitory that WA will be using to copy
+ files to. Defaults to 'data/local/usecase'
+ :serial_device: The serial device which TC2 is connected to. Defaults to
+ '/dev/ttyS0'.
+ :serial_baud: Defaults to 38400.
+ :serial_max_timeout: Serial timeout value in seconds. Defaults to 600.
+ :serial_log: Defaults to standard output.
+ :init_timeout: The timeout in seconds to init the device. Defaults set
+ to 30.
+ :always_delete_uefi_entry: If true, it will delete the ufi entry.
+ Defaults to True.
+ :psci_enable: Enabling the psci. Defaults to True.
+ :host_working_directory: The host working directory. Defaults to None.
+ :disable_boot_configuration: Disables boot configuration through images.txt and board.txt. When
+ this is ``True``, those two files will not be overwritten in VEMSD.
+ This option may be necessary if the firmware version in the ``TC2``
+ is not compatible with the templates in WA. Please note that enabling
+ this will prevent you form being able to set ``boot_firmware`` and
+ ``mode`` parameters. Defaults to ``False``.
+
+ TC2 can also have a number of different booting mode, which are:
+
+ :mp_a7_only: Only the A7 cluster.
+ :mp_a7_bootcluster: Both A7 and A15 clusters, but it boots on A7
+ cluster.
+ :mp_a15_only: Only the A15 cluster.
+ :mp_a15_bootcluster: Both A7 and A15 clusters, but it boots on A15
+ clusters.
+ :iks_cpu: Only A7 cluster with only 2 cpus.
+ :iks_a15: Only A15 cluster.
+ :iks_a7: Same as iks_cpu
+ :iks_ns_a15: Both A7 and A15 clusters.
+ :iks_ns_a7: Both A7 and A15 clusters.
+
+ The difference between mp and iks is the scheduling policy.
+
+ TC2 takes the following runtime parameters
+
+ :a7_cores: Number of active A7 cores.
+ :a15_cores: Number of active A15 cores.
+ :a7_governor: CPUFreq governor for the A7 cluster.
+ :a15_governor: CPUFreq governor for the A15 cluster.
+ :a7_min_frequency: Minimum CPU frequency for the A7 cluster.
+ :a15_min_frequency: Minimum CPU frequency for the A15 cluster.
+ :a7_max_frequency: Maximum CPU frequency for the A7 cluster.
+ :a15_max_frequency: Maximum CPU frequency for the A7 cluster.
+ :irq_affinity: lambda x: Which cluster will receive IRQs.
+ :cpuidle: Whether idle states should be enabled.
+ :sysfile_values: A dict mapping a complete file path to the value that
+ should be echo'd into it. By default, the file will be
+ subsequently read to verify that the value was written
+ into it with DeviceError raised otherwise. For write-only
+ files, this check can be disabled by appending a ``!`` to
+ the end of the file path.
+
+ """
+
+ has_gpu = False
+ a15_only_modes = A15_ONLY_MODES
+ a7_only_modes = A7_ONLY_MODES
+ not_configurable_modes = ['iks_a7', 'iks_cpu', 'iks_a15']
+
+ parameters = [
+ Parameter('core_names', mandatory=False, override=True,
+ description='This parameter will be ignored for TC2'),
+ Parameter('core_clusters', mandatory=False, override=True,
+ description='This parameter will be ignored for TC2'),
+ ]
+
+ runtime_parameters = [
+ RuntimeParameter('irq_affinity', lambda d, x: d.set_irq_affinity(x.lower()), lambda: None),
+ RuntimeParameter('cpuidle', lambda d, x: d.enable_idle_states() if boolean(x) else d.disable_idle_states(),
+ lambda d: d.get_cpuidle())
+ ]
+
+ def get_mode(self):
+ return self.config.mode
+
+ def set_mode(self, mode):
+ if self._has_booted:
+ raise DeviceError('Attempting to set boot mode when already booted.')
+ valid_modes = MODES.keys()
+ if mode is None:
+ mode = self.config.default_mode
+ if mode not in valid_modes:
+ message = 'Invalid mode: {}; must be in {}'.format(mode, valid_modes)
+ raise ConfigError(message)
+ self.config.mode = mode
+
+ mode = property(get_mode, set_mode)
+
+ def _get_core_names(self):
+ return MODES[self.mode]['cpus']
+
+ def _set_core_names(self, value):
+ pass
+
+ core_names = property(_get_core_names, _set_core_names)
+
+ def _get_core_clusters(self):
+ seen = set([])
+ core_clusters = []
+ cluster_id = -1
+ for core in MODES[self.mode]['cpus']:
+ if core not in seen:
+ seen.add(core)
+ cluster_id += 1
+ core_clusters.append(cluster_id)
+ return core_clusters
+
+ def _set_core_clusters(self, value):
+ pass
+
+ core_clusters = property(_get_core_clusters, _set_core_clusters)
+
+ @property
+ def cpu_cores(self):
+ return MODES[self.mode]['cpus']
+
+ @property
+ def max_a7_cores(self):
+ return Counter(MODES[self.mode]['cpus'])['a7']
+
+ @property
+ def max_a15_cores(self):
+ return Counter(MODES[self.mode]['cpus'])['a15']
+
+ @property
+ def a7_governor_tunables(self):
+ return self.config.a7_governor_tunables
+
+ @property
+ def a15_governor_tunables(self):
+ return self.config.a15_governor_tunables
+
+ def __init__(self, **kwargs):
+ super(TC2Device, self).__init__()
+ self.config = _TC2DeviceConfig(**kwargs)
+ self.working_directory = self.config.working_directory
+ self._serial = None
+ self._has_booted = None
+
+ def boot(self, **kwargs): # NOQA
+ mode = kwargs.get('os_mode', None)
+ self._is_ready = False
+ self._has_booted = False
+
+ self.mode = mode
+ self.logger.debug('Booting in {} mode'.format(self.mode))
+
+ with open_serial_connection(timeout=self.config.serial_max_timeout,
+ port=self.config.serial_device,
+ baudrate=self.config.serial_baud) as target:
+ if self.config.boot_firmware == 'bootmon':
+ self._boot_using_bootmon(target)
+ elif self.config.boot_firmware == 'uefi':
+ self._boot_using_uefi(target)
+ else:
+ message = 'Unexpected boot firmware: {}'.format(self.config.boot_firmware)
+ raise ConfigError(message)
+
+ try:
+ target.sendline('')
+ self.logger.debug('Waiting for the Android prompt.')
+ target.expect(self.android_prompt, timeout=40) # pylint: disable=E1101
+ except pexpect.TIMEOUT:
+ # Try a second time before giving up.
+ self.logger.debug('Did not get Android prompt, retrying...')
+ target.sendline('')
+ target.expect(self.android_prompt, timeout=10) # pylint: disable=E1101
+
+ self.logger.debug('Waiting for OS to initialize...')
+ started_waiting_time = time.time()
+ time.sleep(20) # we know it's not going to to take less time than this.
+ boot_completed, got_ip_address = False, False
+ while True:
+ try:
+ if not boot_completed:
+ target.sendline('getprop sys.boot_completed')
+ boot_completed = target.expect(['0.*', '1.*'], timeout=10)
+ if not got_ip_address:
+ target.sendline('getprop dhcp.eth0.ipaddress')
+ # regexes are processed in order, so ip regex has to
+ # come first (as we only want to match new line if we
+ # don't match the IP). We do a "not" make the logic
+ # consistent with boot_completed.
+ got_ip_address = not target.expect(['[1-9]\d*.\d+.\d+.\d+', '\n'], timeout=10)
+ except pexpect.TIMEOUT:
+ pass # We have our own timeout -- see below.
+ if boot_completed and got_ip_address:
+ break
+ time.sleep(5)
+ if (time.time() - started_waiting_time) > self.config.init_timeout:
+ raise DeviceError('Timed out waiting for the device to initialize.')
+
+ self._has_booted = True
+
+ def connect(self):
+ if not self._is_ready:
+ if self.config.adb_name:
+ self.adb_name = self.config.adb_name # pylint: disable=attribute-defined-outside-init
+ else:
+ with open_serial_connection(timeout=self.config.serial_max_timeout,
+ port=self.config.serial_device,
+ baudrate=self.config.serial_baud) as target:
+ # Get IP address and push the Gator and PMU logger.
+ target.sendline('su') # as of Android v5.0.2, Linux does not boot into root shell
+ target.sendline('netcfg')
+ ipaddr_re = re.compile('eth0 +UP +(.+)/.+', re.MULTILINE)
+ target.expect(ipaddr_re)
+ output = target.after
+ match = re.search('eth0 +UP +(.+)/.+', output)
+ if not match:
+ raise DeviceError('Could not get adb IP address.')
+ ipaddr = match.group(1)
+
+ # Connect to device using adb.
+ target.expect(self.android_prompt) # pylint: disable=E1101
+ self.adb_name = ipaddr + ":5555" # pylint: disable=W0201
+
+ if self.adb_name in adb_list_devices():
+ adb_disconnect(self.adb_name)
+ adb_connect(self.adb_name)
+ self._is_ready = True
+ self.execute("input keyevent 82", timeout=ADB_SHELL_TIMEOUT)
+ self.execute("svc power stayon true", timeout=ADB_SHELL_TIMEOUT)
+
+ def disconnect(self):
+ adb_disconnect(self.adb_name)
+ self._is_ready = False
+
+ # TC2-specific methods. You should avoid calling these in
+ # Workloads/Instruments as that would tie them to TC2 (and if that is
+ # the case, then you should set the supported_devices parameter in the
+ # Workload/Instrument accordingly). Most of these can be replace with a
+ # call to set_runtime_parameters.
+
+ def get_cpuidle(self):
+ return self.get_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable')
+
+ def enable_idle_states(self):
+ """
+ Fully enables idle states on TC2.
+ See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
+ and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
+
+ """
+ # Enable C1 (cluster shutdown).
+ self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 0, verify=False)
+ # Enable C0 on A15 cluster.
+ self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0, verify=False)
+ # Enable C0 on A7 cluster.
+ self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 1, verify=False)
+
+ def disable_idle_states(self):
+ """
+ Disable idle states on TC2.
+ See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
+ and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
+
+ """
+ # Disable C1 (cluster shutdown).
+ self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 1, verify=False)
+ # Disable C0.
+ self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0xFF, verify=False)
+
+ def set_irq_affinity(self, cluster):
+ """
+ Set's IRQ affinity to the specified cluster.
+
+ This method will only work if the device mode is mp_a7_bootcluster or
+ mp_a15_bootcluster. This operation does not make sense if there is only one
+ cluster active (all IRQs will obviously go to that), and it will not work for
+ IKS kernel because clusters are not exposed to sysfs.
+
+ :param cluster: must be either 'a15' or 'a7'.
+
+ """
+ if self.config.mode not in ('mp_a7_bootcluster', 'mp_a15_bootcluster'):
+ raise ConfigError('Cannot set IRQ affinity with mode {}'.format(self.config.mode))
+ if cluster == 'a7':
+ self.execute('/sbin/set_irq_affinity.sh 0xc07', check_exit_code=False)
+ elif cluster == 'a15':
+ self.execute('/sbin/set_irq_affinity.sh 0xc0f', check_exit_code=False)
+ else:
+ raise ConfigError('cluster must either "a15" or "a7"; got {}'.format(cluster))
+
+ def _boot_using_uefi(self, target):
+ self.logger.debug('Booting using UEFI.')
+ self._wait_for_vemsd_mount(target)
+ self._setup_before_reboot()
+ self._perform_uefi_reboot(target)
+
+ # Get to the UEFI menu.
+ self.logger.debug('Waiting for UEFI default selection.')
+ target.sendline('reboot')
+ target.expect('The default boot selection will start in'.rstrip())
+ time.sleep(1)
+ target.sendline(''.rstrip())
+
+ # If delete every time is specified, try to delete entry.
+ if self.config.always_delete_uefi_entry:
+ self._delete_uefi_entry(target, entry='workload_automation_MP')
+ self.config.always_delete_uefi_entry = False
+
+ # Specify argument to be passed specifying that psci is (or is not) enabled
+ if self.config.psci_enable:
+ psci_enable = ' psci=enable'
+ else:
+ psci_enable = ''
+
+ # Identify the workload automation entry.
+ selection_pattern = r'\[([0-9]*)\] '
+
+ try:
+ target.expect(re.compile(selection_pattern + 'workload_automation_MP'), timeout=5)
+ wl_menu_item = target.match.group(1)
+ except pexpect.TIMEOUT:
+ self._create_uefi_entry(target, psci_enable, entry_name='workload_automation_MP')
+ # At this point the board should be rebooted so we need to retry to boot
+ self._boot_using_uefi(target)
+ else: # Did not time out.
+ try:
+ #Identify the boot manager menu item
+ target.expect(re.compile(selection_pattern + 'Boot Manager'))
+ boot_manager_menu_item = target.match.group(1)
+
+ #Update FDT
+ target.sendline(boot_manager_menu_item)
+ target.expect(re.compile(selection_pattern + 'Update FDT path'), timeout=15)
+ update_fdt_menu_item = target.match.group(1)
+ target.sendline(update_fdt_menu_item)
+ target.expect(re.compile(selection_pattern + 'NOR Flash .*'), timeout=15)
+ bootmonfs_menu_item = target.match.group(1)
+ target.sendline(bootmonfs_menu_item)
+ target.expect('File path of the FDT blob:')
+ target.sendline(self.config.dtb)
+
+ #Return to main manu and boot from wl automation
+ target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
+ return_to_main_menu_item = target.match.group(1)
+ target.sendline(return_to_main_menu_item)
+ target.sendline(wl_menu_item)
+ except pexpect.TIMEOUT:
+ raise DeviceError('Timed out')
+
+ def _setup_before_reboot(self):
+ if not self.config.disable_boot_configuration:
+ self.logger.debug('Performing pre-boot setup.')
+ substitution = {
+ 'SCC_0x010': self.config.SCC_0x010,
+ 'SCC_0x700': self.config.SCC_0x700,
+ }
+ with open(self.config.src_board_template_file, 'r') as fh:
+ template_board_txt = string.Template(fh.read())
+ with open(self.config.src_board_file, 'w') as wfh:
+ wfh.write(template_board_txt.substitute(substitution))
+
+ with open(self.config.src_images_template_file, 'r') as fh:
+ template_images_txt = string.Template(fh.read())
+ with open(self.config.src_images_file, 'w') as wfh:
+ wfh.write(template_images_txt.substitute({'bm_image': self.config.bm_image}))
+
+ shutil.copyfile(self.config.src_board_file,
+ os.path.join(self.config.board_dir, self.config.board_file))
+ shutil.copyfile(self.config.src_images_file,
+ os.path.join(self.config.board_dir, self.config.images_file))
+ os.system('sync') # make sure everything is flushed to microSD
+ else:
+ self.logger.debug('Boot configuration disabled proceeding with existing board.txt and images.txt.')
+
+ def _delete_uefi_entry(self, target, entry): # pylint: disable=R0201
+ """
+ this method deletes the entry specified as parameter
+ as a precondition serial port input needs to be parsed AT MOST up to
+ the point BEFORE recognizing this entry (both entry and boot manager has
+ not yet been parsed)
+
+ """
+ try:
+ selection_pattern = r'\[([0-9]+)\] *'
+
+ try:
+ target.expect(re.compile(selection_pattern + entry), timeout=5)
+ wl_menu_item = target.match.group(1)
+ except pexpect.TIMEOUT:
+ return # Entry does not exist, nothing to delete here...
+
+ # Identify and select boot manager menu item
+ target.expect(selection_pattern + 'Boot Manager', timeout=15)
+ bootmanager_item = target.match.group(1)
+ target.sendline(bootmanager_item)
+
+ # Identify and select 'Remove entry'
+ target.expect(selection_pattern + 'Remove Boot Device Entry', timeout=15)
+ new_entry_item = target.match.group(1)
+ target.sendline(new_entry_item)
+
+ # Delete entry
+ target.expect(re.compile(selection_pattern + entry), timeout=5)
+ wl_menu_item = target.match.group(1)
+ target.sendline(wl_menu_item)
+
+ # Return to main manu
+ target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
+ return_to_main_menu_item = target.match.group(1)
+ target.sendline(return_to_main_menu_item)
+ except pexpect.TIMEOUT:
+ raise DeviceError('Timed out while deleting UEFI entry.')
+
+ def _create_uefi_entry(self, target, psci_enable, entry_name):
+ """
+ Creates the default boot entry that is expected when booting in uefi mode.
+
+ """
+ self._wait_for_vemsd_mount(target)
+ try:
+ selection_pattern = '\[([0-9]+)\] *'
+
+ # Identify and select boot manager menu item.
+ target.expect(selection_pattern + 'Boot Manager', timeout=15)
+ bootmanager_item = target.match.group(1)
+ target.sendline(bootmanager_item)
+
+ # Identify and select 'add new entry'.
+ target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15)
+ new_entry_item = target.match.group(1)
+ target.sendline(new_entry_item)
+
+ # Identify and select BootMonFs.
+ target.expect(selection_pattern + 'NOR Flash .*', timeout=15)
+ BootMonFs_item = target.match.group(1)
+ target.sendline(BootMonFs_item)
+
+ # Specify the parameters of the new entry.
+ target.expect('.+the kernel', timeout=5)
+ target.sendline(self.config.kernel) # kernel path
+ target.expect('Has FDT support\?.*\[y\/n\].*', timeout=5)
+ time.sleep(0.5)
+ target.sendline('y') # Has Fdt support? -> y
+ target.expect('Add an initrd.*\[y\/n\].*', timeout=5)
+ time.sleep(0.5)
+ target.sendline('y') # add an initrd? -> y
+ target.expect('.+the initrd.*', timeout=5)
+ time.sleep(0.5)
+ target.sendline(self.config.initrd) # initrd path
+ target.expect('.+to the binary.*', timeout=5)
+ time.sleep(0.5)
+ _slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary
+ time.sleep(0.5)
+ target.expect('.+new Entry.+', timeout=5)
+ _slow_sendline(target, entry_name) # Entry name
+ target.expect('Choice.+', timeout=15)
+ time.sleep(2)
+ except pexpect.TIMEOUT:
+ raise DeviceError('Timed out while creating UEFI entry.')
+ self._perform_uefi_reboot(target)
+
+ def _perform_uefi_reboot(self, target):
+ self._wait_for_vemsd_mount(target)
+ open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
+
+ def _wait_for_vemsd_mount(self, target, timeout=100):
+ attempts = 1 + self.config.reboot_attempts
+ if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
+ return
+
+ self.logger.debug('Waiting for VEMSD to mount...')
+ for i in xrange(attempts):
+ if i: # Do not reboot on the first attempt.
+ target.sendline('reboot')
+ target.sendline('usb_on')
+ for _ in xrange(timeout):
+ time.sleep(1)
+ if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
+ return
+
+ raise DeviceError('Timed out waiting for VEMSD to mount.')
+
+ def _boot_using_bootmon(self, target):
+ """
+ This method Boots TC2 using the bootmon interface.
+ """
+ self.logger.debug('Booting using bootmon.')
+
+ try:
+ self._wait_for_vemsd_mount(target, timeout=20)
+ except DeviceError:
+ # OK, something's wrong. Reboot the board and try again.
+ self.logger.debug('VEMSD not mounted, attempting to power cycle device.')
+ target.sendline(' ')
+ state = target.expect(['Cmd> ', self.config.bootmon_prompt, self.android_prompt]) # pylint: disable=E1101
+
+ if state == 0 or state == 1:
+ # Reboot - Bootmon
+ target.sendline('reboot')
+ target.expect('Powering up system...')
+ elif state == 2:
+ target.sendline('reboot -n')
+ target.expect('Powering up system...')
+ else:
+ raise DeviceError('Unexpected board state {}; should be 0, 1 or 2'.format(state))
+
+ self._wait_for_vemsd_mount(target)
+
+ self._setup_before_reboot()
+
+ # Reboot - Bootmon
+ self.logger.debug('Rebooting into bootloader...')
+ open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
+ target.expect('Powering up system...')
+ target.expect(self.config.bootmon_prompt)
+
+ # Wait for VEMSD to mount
+ self._wait_for_vemsd_mount(target)
+
+ #Boot Linux - Bootmon
+ target.sendline('fl linux fdt ' + self.config.dtb)
+ target.expect(self.config.bootmon_prompt)
+ target.sendline('fl linux initrd ' + self.config.initrd)
+ target.expect(self.config.bootmon_prompt)
+ target.sendline('fl linux boot ' + self.config.kernel + self.config.kernel_arguments)
+
+
+# Utility functions.
+
+def _slow_sendline(target, line):
+ for c in line:
+ target.send(c)
+ time.sleep(0.1)
+ target.sendline('')
+
diff --git a/wlauto/devices/android/tc2/resources/board_template.txt b/wlauto/devices/android/tc2/resources/board_template.txt
new file mode 100644
index 00000000..39535d13
--- /dev/null
+++ b/wlauto/devices/android/tc2/resources/board_template.txt
@@ -0,0 +1,96 @@
+BOARD: HBI0249
+TITLE: V2P-CA15_A7 Configuration File
+
+[DCCS]
+TOTALDCCS: 1 ;Total Number of DCCS
+M0FILE: dbb_v110.ebf ;DCC0 Filename
+M0MODE: MICRO ;DCC0 Programming Mode
+
+[FPGAS]
+TOTALFPGAS: 0 ;Total Number of FPGAs
+
+[TAPS]
+TOTALTAPS: 3 ;Total Number of TAPs
+T0NAME: STM32TMC ;TAP0 Device Name
+T0FILE: NONE ;TAP0 Filename
+T0MODE: NONE ;TAP0 Programming Mode
+T1NAME: STM32CM3 ;TAP1 Device Name
+T1FILE: NONE ;TAP1 Filename
+T1MODE: NONE ;TAP1 Programming Mode
+T2NAME: CORTEXA15 ;TAP2 Device Name
+T2FILE: NONE ;TAP2 Filename
+T2MODE: NONE ;TAP2 Programming Mode
+
+[OSCCLKS]
+TOTALOSCCLKS: 9 ;Total Number of OSCCLKS
+OSC0: 50.0 ;CPUREFCLK0 A15 CPU (20:1 - 1.0GHz)
+OSC1: 50.0 ;CPUREFCLK1 A15 CPU (20:1 - 1.0GHz)
+OSC2: 40.0 ;CPUREFCLK0 A7 CPU (20:1 - 800MHz)
+OSC3: 40.0 ;CPUREFCLK1 A7 CPU (20:1 - 800MHz)
+OSC4: 40.0 ;HSBM AXI (40MHz)
+OSC5: 23.75 ;HDLCD (23.75MHz - TC PLL is in bypass)
+OSC6: 50.0 ;SMB (50MHz)
+OSC7: 50.0 ;SYSREFCLK (20:1 - 1.0GHz, ACLK - 500MHz)
+OSC8: 50.0 ;DDR2 (8:1 - 400MHz)
+
+[SCC REGISTERS]
+TOTALSCCS: 33 ;Total Number of SCC registers
+
+;SCC: 0x010 0x000003D0 ;Remap to NOR0
+SCC: 0x010 $SCC_0x010 ;Switch between NOR0/NOR1
+SCC: 0x01C 0xFF00FF00 ;CFGRW3 - SMC CS6/7 N/U
+SCC: 0x118 0x01CD1011 ;CFGRW17 - HDLCD PLL external bypass
+;SCC: 0x700 0x00320003 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0)
+SCC: 0x700 $SCC_0x700 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0)
+ ; Bootmon configuration:
+ ; [15]: A7 Event stream generation (default: disabled)
+ ; [14]: A15 Event stream generation (default: disabled)
+ ; [13]: Power down the non-boot cluster (default: disabled)
+ ; [12]: Use per-cpu mailboxes for power management (default: disabled)
+ ; [11]: A15 executes WFEs as nops (default: disabled)
+
+SCC: 0x400 0x33330c00 ;CFGREG41 - A15 configuration register 0 (Default 0x33330c80)
+ ; [29:28] SPNIDEN
+ ; [25:24] SPIDEN
+ ; [21:20] NIDEN
+ ; [17:16] DBGEN
+ ; [13:12] CFGTE
+ ; [9:8] VINITHI_CORE
+ ; [7] IMINLN
+ ; [3:0] CLUSTER_ID
+
+ ;Set the CPU clock PLLs
+SCC: 0x120 0x022F1010 ;CFGRW19 - CA15_0 PLL control - 20:1 (lock OFF)
+SCC: 0x124 0x0011710D ;CFGRW20 - CA15_0 PLL value
+SCC: 0x128 0x022F1010 ;CFGRW21 - CA15_1 PLL control - 20:1 (lock OFF)
+SCC: 0x12C 0x0011710D ;CFGRW22 - CA15_1 PLL value
+SCC: 0x130 0x022F1010 ;CFGRW23 - CA7_0 PLL control - 20:1 (lock OFF)
+SCC: 0x134 0x0011710D ;CFGRW24 - CA7_0 PLL value
+SCC: 0x138 0x022F1010 ;CFGRW25 - CA7_1 PLL control - 20:1 (lock OFF)
+SCC: 0x13C 0x0011710D ;CFGRW26 - CA7_1 PLL value
+
+ ;Power management interface
+SCC: 0xC00 0x00000005 ;Control: [0]PMI_EN [1]DBG_EN [2]SPC_SYSCFG
+SCC: 0xC04 0x060E0356 ;Latency in uS max: [15:0]DVFS [31:16]PWRUP
+SCC: 0xC08 0x00000000 ;Reserved
+SCC: 0xC0C 0x00000000 ;Reserved
+
+ ;CA15 performance values: 0xVVVFFFFF
+SCC: 0xC10 0x384061A8 ;CA15 PERFVAL0, 900mV, 20,000*20= 500MHz
+SCC: 0xC14 0x38407530 ;CA15 PERFVAL1, 900mV, 25,000*20= 600MHz
+SCC: 0xC18 0x384088B8 ;CA15 PERFVAL2, 900mV, 30,000*20= 700MHz
+SCC: 0xC1C 0x38409C40 ;CA15 PERFVAL3, 900mV, 35,000*20= 800MHz
+SCC: 0xC20 0x3840AFC8 ;CA15 PERFVAL4, 900mV, 40,000*20= 900MHz
+SCC: 0xC24 0x3840C350 ;CA15 PERFVAL5, 900mV, 45,000*20=1000MHz
+SCC: 0xC28 0x3CF0D6D8 ;CA15 PERFVAL6, 975mV, 50,000*20=1100MHz
+SCC: 0xC2C 0x41A0EA60 ;CA15 PERFVAL7, 1050mV, 55,000*20=1200MHz
+
+ ;CA7 performance values: 0xVVVFFFFF
+SCC: 0xC30 0x3840445C ;CA7 PERFVAL0, 900mV, 10,000*20= 350MHz
+SCC: 0xC34 0x38404E20 ;CA7 PERFVAL1, 900mV, 15,000*20= 400MHz
+SCC: 0xC38 0x384061A8 ;CA7 PERFVAL2, 900mV, 20,000*20= 500MHz
+SCC: 0xC3C 0x38407530 ;CA7 PERFVAL3, 900mV, 25,000*20= 600MHz
+SCC: 0xC40 0x384088B8 ;CA7 PERFVAL4, 900mV, 30,000*20= 700MHz
+SCC: 0xC44 0x38409C40 ;CA7 PERFVAL5, 900mV, 35,000*20= 800MHz
+SCC: 0xC48 0x3CF0AFC8 ;CA7 PERFVAL6, 975mV, 40,000*20= 900MHz
+SCC: 0xC4C 0x41A0C350 ;CA7 PERFVAL7, 1050mV, 45,000*20=1000MHz
diff --git a/wlauto/devices/android/tc2/resources/images_iks.txt b/wlauto/devices/android/tc2/resources/images_iks.txt
new file mode 100644
index 00000000..05707092
--- /dev/null
+++ b/wlauto/devices/android/tc2/resources/images_iks.txt
@@ -0,0 +1,25 @@
+TITLE: Versatile Express Images Configuration File
+
+[IMAGES]
+TOTALIMAGES: 4 ;Number of Images (Max : 32)
+NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE
+NOR0ADDRESS: BOOT ;Image Flash Address
+NOR0FILE: \SOFTWARE\$bm_image ;Image File Name
+
+NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR1ADDRESS: 0x00000000 ;Image Flash Address
+NOR1FILE: \SOFTWARE\kern_iks.bin ;Image File Name
+NOR1LOAD: 0x80008000
+NOR1ENTRY: 0x80008000
+
+NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR2ADDRESS: 0x00000000 ;Image Flash Address
+NOR2FILE: \SOFTWARE\iks.dtb ;Image File Name for booting in A7 cluster
+NOR2LOAD: 0x84000000
+NOR2ENTRY: 0x84000000
+
+NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR3ADDRESS: 0x00000000 ;Image Flash Address
+NOR3FILE: \SOFTWARE\init_iks.bin ;Image File Name
+NOR3LOAD: 0x90100000
+NOR3ENTRY: 0x90100000
diff --git a/wlauto/devices/android/tc2/resources/images_mp.txt b/wlauto/devices/android/tc2/resources/images_mp.txt
new file mode 100644
index 00000000..e671a74b
--- /dev/null
+++ b/wlauto/devices/android/tc2/resources/images_mp.txt
@@ -0,0 +1,55 @@
+TITLE: Versatile Express Images Configuration File
+[IMAGES]
+TOTALIMAGES: 9 ;Number of Images (Max: 32)
+NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE
+NOR0ADDRESS: BOOT ;Image Flash Address
+NOR0FILE: \SOFTWARE\$bm_image ;Image File Name
+
+NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR1ADDRESS: 0x0E000000 ;Image Flash Address
+NOR1FILE: \SOFTWARE\kern_mp.bin ;Image File Name
+NOR1LOAD: 0x80008000
+NOR1ENTRY: 0x80008000
+
+NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR2ADDRESS: 0x0E800000 ;Image Flash Address
+NOR2FILE: \SOFTWARE\mp_a7.dtb ;Image File Name for booting in A7 cluster
+NOR2LOAD: 0x84000000
+NOR2ENTRY: 0x84000000
+
+NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR3ADDRESS: 0x0E900000 ;Image Flash Address
+NOR3FILE: \SOFTWARE\mp_a15.dtb ;Image File Name
+NOR3LOAD: 0x84000000
+NOR3ENTRY: 0x84000000
+
+NOR4UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR4ADDRESS: 0x0EA00000 ;Image Flash Address
+NOR4FILE: \SOFTWARE\mp_a7bc.dtb ;Image File Name
+NOR4LOAD: 0x84000000
+NOR4ENTRY: 0x84000000
+
+NOR5UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR5ADDRESS: 0x0EB00000 ;Image Flash Address
+NOR5FILE: \SOFTWARE\mp_a15bc.dtb ;Image File Name
+NOR5LOAD: 0x84000000
+NOR5ENTRY: 0x84000000
+
+NOR6UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR6ADDRESS: 0x0EC00000 ;Image Flash Address
+NOR6FILE: \SOFTWARE\init_mp.bin ;Image File Name
+NOR6LOAD: 0x85000000
+NOR6ENTRY: 0x85000000
+
+NOR7UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR7ADDRESS: 0x0C000000 ;Image Flash Address
+NOR7FILE: \SOFTWARE\tc2_sec.bin ;Image File Name
+NOR7LOAD: 0
+NOR7ENTRY: 0
+
+NOR8UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
+NOR8ADDRESS: 0x0D000000 ;Image Flash Address
+NOR8FILE: \SOFTWARE\tc2_uefi.bin ;Image File Name
+NOR8LOAD: 0
+NOR8ENTRY: 0
+
diff --git a/wlauto/devices/linux/__init__.py b/wlauto/devices/linux/__init__.py
new file mode 100644
index 00000000..16224d6f
--- /dev/null
+++ b/wlauto/devices/linux/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/devices/linux/generic/__init__.py b/wlauto/devices/linux/generic/__init__.py
new file mode 100644
index 00000000..d6fb67a5
--- /dev/null
+++ b/wlauto/devices/linux/generic/__init__.py
@@ -0,0 +1,37 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import LinuxDevice, Parameter
+
+
+class GenericDevice(LinuxDevice):
+ name = 'generic_linux'
+ description = """
+ Generic Linux device. Use this if you do not have a device file for
+ your device.
+
+ This implements the minimum functionality that should be supported by
+ all Linux devices.
+
+ """
+
+ abi = 'armeabi'
+ has_gpu = True
+
+ parameters = [
+ Parameter('core_names', default=[], override=True),
+ Parameter('core_clusters', default=[], override=True),
+ ]
diff --git a/wlauto/devices/linux/odroidxu3_linux/__init__.py b/wlauto/devices/linux/odroidxu3_linux/__init__.py
new file mode 100644
index 00000000..f174950a
--- /dev/null
+++ b/wlauto/devices/linux/odroidxu3_linux/__init__.py
@@ -0,0 +1,35 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import LinuxDevice, Parameter
+
+
+class OdroidXU3LinuxDevice(LinuxDevice):
+
+ name = "odroidxu3_linux"
+ description = 'HardKernel Odroid XU3 development board (Ubuntu image).'
+
+ core_modules = [
+ 'odroidxu3-fan',
+ ]
+
+ parameters = [
+ Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
+ Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
+ ]
+
+ abi = 'armeabi'
+
diff --git a/wlauto/exceptions.py b/wlauto/exceptions.py
new file mode 100644
index 00000000..36f3050a
--- /dev/null
+++ b/wlauto/exceptions.py
@@ -0,0 +1,143 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto.utils.misc import get_traceback, TimeoutError # NOQA pylint: disable=W0611
+
+
+class WAError(Exception):
+ """Base class for all Workload Automation exceptions."""
+ pass
+
+
+class NotFoundError(WAError):
+ """Raised when the specified item is not found."""
+ pass
+
+
+class ValidationError(WAError):
+ """Raised on failure to validate an extension."""
+ pass
+
+
+class DeviceError(WAError):
+ """General Device error."""
+ pass
+
+
+class DeviceNotRespondingError(WAError):
+ """The device is not responding."""
+
+ def __init__(self, device):
+ super(DeviceNotRespondingError, self).__init__('Device {} is not responding.'.format(device))
+
+
+class WorkloadError(WAError):
+ """General Workload error."""
+ pass
+
+
+class HostError(WAError):
+ """Problem with the host on which WA is running."""
+ pass
+
+
+class ModuleError(WAError):
+ """
+ Problem with a module.
+
+ .. note:: Modules for specific extension types should raise execeptions
+ appropriate to that extension. E.g. a ``Device`` module should raise
+ ``DeviceError``. This is intended for situation where a module is
+ unsure (and/or doesn't care) what its owner is.
+
+ """
+ pass
+
+
+class InstrumentError(WAError):
+ """General Instrument error."""
+ pass
+
+
+class ResultProcessorError(WAError):
+ """General ResultProcessor error."""
+ pass
+
+
+class ResourceError(WAError):
+ """General Resolver error."""
+ pass
+
+
+class CommandError(WAError):
+ """Raised by commands when they have encountered an error condition
+ during execution."""
+ pass
+
+
+class ToolError(WAError):
+ """Raised by tools when they have encountered an error condition
+ during execution."""
+ pass
+
+
+class LoaderError(WAError):
+ """Raised when there is an error loading an extension or
+ an external resource. Apart form the usual message, the __init__
+ takes an exc_info parameter which should be the result of
+ sys.exc_info() for the original exception (if any) that
+ caused the error."""
+
+ def __init__(self, message, exc_info=None):
+ super(LoaderError, self).__init__(message)
+ self.exc_info = exc_info
+
+ def __str__(self):
+ if self.exc_info:
+ orig = self.exc_info[1]
+ orig_name = type(orig).__name__
+ if isinstance(orig, WAError):
+ reason = 'because of:\n{}: {}'.format(orig_name, orig)
+ else:
+ reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
+ return '\n'.join([self.message, reason])
+ else:
+ return self.message
+
+
+class ConfigError(WAError):
+ """Raised when configuration provided is invalid. This error suggests that
+ the user should modify their config and try again."""
+ pass
+
+
+class WorkerThreadError(WAError):
+ """
+ This should get raised in the main thread if a non-WAError-derived exception occurs on
+ a worker/background thread. If a WAError-derived exception is raised in the worker, then
+ it that exception should be re-raised on the main thread directly -- the main point of this is
+ to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors.
+
+ """
+
+ def __init__(self, thread, exc_info):
+ self.thread = thread
+ self.exc_info = exc_info
+ orig = self.exc_info[1]
+ orig_name = type(orig).__name__
+ message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
+ message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
+ super(WorkerThreadError, self).__init__(message)
diff --git a/wlauto/external/README b/wlauto/external/README
new file mode 100644
index 00000000..16d22a8e
--- /dev/null
+++ b/wlauto/external/README
@@ -0,0 +1,74 @@
+This directory contains external libraries and standalone utilities which have
+been written/modified to work with Workload Automation (and thus need to be
+included with WA rather than obtained from orignal sources).
+
+
+bbench_server
+=============
+
+This is a small sever that is used to detect when ``bbench`` workload has completed.
+``bbench`` navigates though a bunch of web pages in a browser using javascript.
+It will cause the browser to sent a GET request to the port the bbench_server is
+listening on, indicating the end of workload.
+
+
+daq_server
+==========
+
+Contains Daq server files that will run on a Windows machine. Please refer to
+daq instrument documentation.
+
+
+louie (third party)
+=====
+
+Python package that is itself a fork (and now, a replacement for) pydispatcher.
+This library provides a signal dispatching mechanism. This has been modified for
+WA to add prioritization to callbacks.
+
+
+pmu_logger
+==========
+
+Source for the kernel driver that enable the logging of CCI counters to ftrace
+on periodic basis. This driver is required by the ``cci_pmu_logger`` instrument.
+
+
+readenergy
+==========
+
+Outputs Juno internal energy/power/voltage/current measurments by reading APB
+regesiters from memory. This is used by ``juno_energy`` instrument.
+
+
+revent
+======
+
+This is a tool that is used to both record and playback key press and screen tap
+events. It is used to record UI manipulation for some workloads (such as games)
+where it is not possible to use the Android UI Automator.
+
+The tools is also included in binary form in wlauto/common/. In order to build
+the tool from source, you will need to have Android NDK in your PATH.
+
+
+stacktracer.py (third party)
+==============
+
+A module based on an ActiveState recipe that allows tracing thread stacks during
+execution of a Python program. This is used through the ``--debug`` flag in WA
+to ease debuging multi-threaded parts of the code.
+
+
+terminalsize.py (third party)
+===============
+
+Implements a platform-agnostic way of determining terminal window size. Taken
+from a public Github gist.
+
+
+uiauto
+======
+
+Contains the utilities library for UI automation.
+
diff --git a/wlauto/external/bbench_server/build.sh b/wlauto/external/bbench_server/build.sh
new file mode 100755
index 00000000..0c36467a
--- /dev/null
+++ b/wlauto/external/bbench_server/build.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+BUILD_COMMAND=ndk-build
+
+if [[ $(which $BUILD_COMMAND) ]] ; then
+ $BUILD_COMMAND
+ if [[ $? ]]; then
+ echo Coping to ../../workloads/bbench/
+ cp libs/armeabi/bbench_server ../../workloads/bbench/bin/armeabi/bbench_server
+ fi
+else
+ echo Please make sure you have Android NDK in your PATH.
+ exit 1
+fi
+
diff --git a/wlauto/external/bbench_server/jni/Android.mk b/wlauto/external/bbench_server/jni/Android.mk
new file mode 100644
index 00000000..d6d40a08
--- /dev/null
+++ b/wlauto/external/bbench_server/jni/Android.mk
@@ -0,0 +1,9 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES:= bbench_server.cpp
+LOCAL_MODULE := bbench_server
+LOCAL_MODULE_TAGS := optional
+LOCAL_STATIC_LIBRARIES := libc
+LOCAL_SHARED_LIBRARIES :=
+include $(BUILD_EXECUTABLE)
diff --git a/wlauto/external/bbench_server/jni/bbench_server.cpp b/wlauto/external/bbench_server/jni/bbench_server.cpp
new file mode 100755
index 00000000..9b1e87d4
--- /dev/null
+++ b/wlauto/external/bbench_server/jni/bbench_server.cpp
@@ -0,0 +1,151 @@
+/* Copyright 2012-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+/**************************************************************************/
+/* Simple HTTP server program that will return on accepting connection */
+/**************************************************************************/
+
+/* Tested on Android ICS browser and FireFox browser */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netdb.h>
+#include <arpa/inet.h>
+#include <sys/wait.h>
+
+#define SERVERPORT "3030"
+
+void ExitOnError(int condition, const char *msg)
+{
+ if(condition) { printf("Server: %s\n", msg); exit(1);}
+}
+
+void *GetInetAddr(struct sockaddr *sa)
+{
+ if (sa->sa_family == AF_INET)
+ {
+ return &(((struct sockaddr_in*)sa)->sin_addr);
+ }
+ else
+ {
+ return &(((struct sockaddr_in6*)sa)->sin6_addr);
+ }
+}
+
+int main(int argc, char *argv[])
+{
+
+ socklen_t addr_size;
+ struct addrinfo hints, *res;
+ int server_fd, client_fd;
+ int retval;
+ int timeout_in_seconds;
+
+ // Get the timeout value in seconds
+ if(argc < 2)
+ {
+ printf("Usage %s <timeout in seconds>\n", argv[0]);
+ exit(1);
+ }
+ else
+ {
+ timeout_in_seconds = atoi(argv[1]);
+ printf("Server: Waiting for connection on port %s with timeout of %d seconds\n", SERVERPORT, timeout_in_seconds);
+
+ }
+
+ /**************************************************************************/
+ /* Listen to a socket */
+ /**************************************************************************/
+ memset(&hints, 0, sizeof hints);
+ hints.ai_family = AF_UNSPEC; // use IPv4 or IPv6, whichever
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_PASSIVE; // fill in my IP for me
+
+ getaddrinfo(NULL, SERVERPORT, &hints, &res);
+
+
+ server_fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
+ ExitOnError(server_fd < 0, "Socket creation failed");
+
+ retval = bind(server_fd, res->ai_addr, res->ai_addrlen);
+ ExitOnError(retval < 0, "Bind failed");
+
+ retval = listen(server_fd, 10);
+ ExitOnError(retval < 0, "Listen failed");
+
+ /**************************************************************************/
+ /* Wait for connection to arrive or time out */
+ /**************************************************************************/
+ fd_set readfds;
+ FD_ZERO(&readfds);
+ FD_SET(server_fd, &readfds);
+
+ // Timeout parameter
+ timeval tv;
+ tv.tv_sec = timeout_in_seconds;
+ tv.tv_usec = 0;
+
+ int ret = select(server_fd+1, &readfds, NULL, NULL, &tv);
+ ExitOnError(ret <= 0, "No connection established, timed out");
+ ExitOnError(FD_ISSET(server_fd, &readfds) == 0, "Error occured in select");
+
+ /**************************************************************************/
+ /* Accept connection and print the information */
+ /**************************************************************************/
+ {
+ struct sockaddr_storage client_addr;
+ char client_addr_string[INET6_ADDRSTRLEN];
+ addr_size = sizeof client_addr;
+ client_fd = accept(server_fd, (struct sockaddr *)&client_addr, &addr_size);
+ ExitOnError(client_fd < 0, "Accept failed");
+
+ inet_ntop(client_addr.ss_family,
+ GetInetAddr((struct sockaddr *)&client_addr),
+ client_addr_string,
+ sizeof client_addr_string);
+ printf("Server: Received connection from %s\n", client_addr_string);
+ }
+
+
+ /**************************************************************************/
+ /* Send a acceptable HTTP response */
+ /**************************************************************************/
+ {
+
+ char response[] = "HTTP/1.1 200 OK\r\n"
+ "Content-Type: text/html\r\n"
+ "Connection: close\r\n"
+ "\r\n"
+ "<html>"
+ "<head>Local Server: Connection Accepted</head>"
+ "<body></body>"
+ "</html>";
+ int bytes_sent;
+ bytes_sent = send(client_fd, response, strlen(response), 0);
+ ExitOnError(bytes_sent < 0, "Sending Response failed");
+ }
+
+
+ close(client_fd);
+ close(server_fd);
+ return 0;
+}
diff --git a/wlauto/external/daq_server/daqpower-1.0.1.tar.gz b/wlauto/external/daq_server/daqpower-1.0.1.tar.gz
new file mode 100644
index 00000000..671a45e8
--- /dev/null
+++ b/wlauto/external/daq_server/daqpower-1.0.1.tar.gz
Binary files differ
diff --git a/wlauto/external/daq_server/src/MANIFEST.in b/wlauto/external/daq_server/src/MANIFEST.in
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/external/daq_server/src/MANIFEST.in
diff --git a/wlauto/external/daq_server/src/README b/wlauto/external/daq_server/src/README
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/external/daq_server/src/README
diff --git a/wlauto/external/daq_server/src/build.sh b/wlauto/external/daq_server/src/build.sh
new file mode 100755
index 00000000..ef3be06e
--- /dev/null
+++ b/wlauto/external/daq_server/src/build.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+python setup.py sdist
+rm -rf build
+rm -f MANIFEST
+if [[ -d dist ]]; then
+ mv dist/*.tar.gz ..
+ rm -rf dist
+fi
+find . -iname \*.pyc -delete
diff --git a/wlauto/external/daq_server/src/daqpower/__init__.py b/wlauto/external/daq_server/src/daqpower/__init__.py
new file mode 100644
index 00000000..ed442117
--- /dev/null
+++ b/wlauto/external/daq_server/src/daqpower/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+__version__ = '1.0.1'
diff --git a/wlauto/external/daq_server/src/daqpower/client.py b/wlauto/external/daq_server/src/daqpower/client.py
new file mode 100644
index 00000000..b129dc77
--- /dev/null
+++ b/wlauto/external/daq_server/src/daqpower/client.py
@@ -0,0 +1,380 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E1101,E1103
+import os
+import sys
+
+from twisted.internet import reactor
+from twisted.internet.protocol import Protocol, ClientFactory, ReconnectingClientFactory
+from twisted.internet.error import ConnectionLost, ConnectionDone
+from twisted.protocols.basic import LineReceiver
+
+if __name__ == '__main__': # for debugging
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+from daqpower import log
+from daqpower.common import DaqServerRequest, DaqServerResponse, Status
+from daqpower.config import get_config_parser
+
+
+__all__ = ['execute_command', 'run_send_command', 'Status']
+
+
+class Command(object):
+
+ def __init__(self, name, **params):
+ self.name = name
+ self.params = params
+
+
+class CommandResult(object):
+
+ def __init__(self):
+ self.status = None
+ self.message = None
+ self.data = None
+
+ def __str__(self):
+ return '{} {}'.format(self.status, self.message)
+
+
+class CommandExecutorProtocol(Protocol):
+
+ def __init__(self, command, timeout=10, retries=1):
+ self.command = command
+ self.sent_request = None
+ self.waiting_for_response = False
+ self.keep_going = None
+ self.ports_to_pull = None
+ self.factory = None
+ self.timeoutCallback = None
+ self.timeout = timeout
+ self.retries = retries
+ self.retry_count = 0
+
+ def connectionMade(self):
+ if self.command.name == 'get_data':
+ self.sendRequest('list_port_files')
+ else:
+ self.sendRequest(self.command.name, **self.command.params)
+
+ def connectionLost(self, reason=ConnectionDone):
+ if isinstance(reason, ConnectionLost):
+ self.errorOut('connection lost: {}'.format(reason))
+ elif self.waiting_for_response:
+ self.errorOut('Server closed connection without sending a response.')
+ else:
+ log.debug('connection terminated.')
+
+ def sendRequest(self, command, **params):
+ self.sent_request = DaqServerRequest(command, params)
+ request_string = self.sent_request.serialize()
+ log.debug('sending request: {}'.format(request_string))
+ self.transport.write(''.join([request_string, '\r\n']))
+ self.timeoutCallback = reactor.callLater(self.timeout, self.requestTimedOut)
+ self.waiting_for_response = True
+
+ def dataReceived(self, data):
+ self.keep_going = False
+ if self.waiting_for_response:
+ self.waiting_for_response = False
+ self.timeoutCallback.cancel()
+ try:
+ response = DaqServerResponse.deserialize(data)
+ except Exception, e: # pylint: disable=W0703
+ self.errorOut('Invalid response: {} ({})'.format(data, e))
+ else:
+ if response.status != Status.ERROR:
+ self.processResponse(response) # may set self.keep_going
+ if not self.keep_going:
+ self.commandCompleted(response.status, response.message, response.data)
+ else:
+ self.errorOut(response.message)
+ else:
+ self.errorOut('unexpected data received: {}\n'.format(data))
+
+ def processResponse(self, response):
+ if self.sent_request.command in ['list_ports', 'list_port_files']:
+ self.processPortsResponse(response)
+ elif self.sent_request.command == 'list_devices':
+ self.processDevicesResponse(response)
+ elif self.sent_request.command == 'pull':
+ self.processPullResponse(response)
+
+ def processPortsResponse(self, response):
+ if 'ports' not in response.data:
+ self.errorOut('Response did not containt ports data: {} ({}).'.format(response, response.data))
+ ports = response.data['ports']
+ response.data = ports
+ if self.command.name == 'get_data':
+ if ports:
+ self.ports_to_pull = ports
+ self.sendPullRequest(self.ports_to_pull.pop())
+ else:
+ response.status = Status.OKISH
+ response.message = 'No ports were returned.'
+
+ def processDevicesResponse(self, response):
+ if 'devices' not in response.data:
+ self.errorOut('Response did not containt devices data: {} ({}).'.format(response, response.data))
+ ports = response.data['devices']
+ response.data = ports
+
+ def sendPullRequest(self, port_id):
+ self.sendRequest('pull', port_id=port_id)
+ self.keep_going = True
+
+ def processPullResponse(self, response):
+ if 'port_number' not in response.data:
+ self.errorOut('Response does not contain port number: {} ({}).'.format(response, response.data))
+ port_number = response.data.pop('port_number')
+ filename = self.sent_request.params['port_id'] + '.csv'
+ self.factory.initiateFileTransfer(filename, port_number)
+ if self.ports_to_pull:
+ self.sendPullRequest(self.ports_to_pull.pop())
+
+ def commandCompleted(self, status, message=None, data=None):
+ self.factory.result.status = status
+ self.factory.result.message = message
+ self.factory.result.data = data
+ self.transport.loseConnection()
+
+ def requestTimedOut(self):
+ self.retry_count += 1
+ if self.retry_count > self.retries:
+ self.errorOut("Request timed out; server failed to respond.")
+ else:
+ log.debug('Retrying...')
+ self.connectionMade()
+
+ def errorOut(self, message):
+ self.factory.errorOut(message)
+
+
+class CommandExecutorFactory(ClientFactory):
+
+ protocol = CommandExecutorProtocol
+ wait_delay = 1
+
+ def __init__(self, config, command, timeout=10, retries=1):
+ self.config = config
+ self.command = command
+ self.timeout = timeout
+ self.retries = retries
+ self.result = CommandResult()
+ self.done = False
+ self.transfers_in_progress = {}
+ if command.name == 'get_data':
+ if 'output_directory' not in command.params:
+ self.errorOut('output_directory not specifed for get_data command.')
+ self.output_directory = command.params['output_directory']
+ if not os.path.isdir(self.output_directory):
+ log.debug('Creating output directory {}'.format(self.output_directory))
+ os.makedirs(self.output_directory)
+
+ def buildProtocol(self, addr):
+ protocol = CommandExecutorProtocol(self.command, self.timeout, self.retries)
+ protocol.factory = self
+ return protocol
+
+ def initiateFileTransfer(self, filename, port):
+ log.debug('Downloading {} from port {}'.format(filename, port))
+ filepath = os.path.join(self.output_directory, filename)
+ session = FileReceiverFactory(filepath, self)
+ connector = reactor.connectTCP(self.config.host, port, session)
+ self.transfers_in_progress[session] = connector
+
+ def transferComplete(self, session):
+ connector = self.transfers_in_progress[session]
+ log.debug('Transfer on port {} complete.'.format(connector.port))
+ del self.transfers_in_progress[session]
+
+ def clientConnectionLost(self, connector, reason):
+ if self.transfers_in_progress:
+ log.debug('Waiting for the transfer(s) to complete.')
+ self.waitForTransfersToCompleteAndExit()
+
+ def clientConnectionFailed(self, connector, reason):
+ self.result.status = Status.ERROR
+ self.result.message = 'Could not connect to server.'
+ self.waitForTransfersToCompleteAndExit()
+
+ def waitForTransfersToCompleteAndExit(self):
+ if self.transfers_in_progress:
+ reactor.callLater(self.wait_delay, self.waitForTransfersToCompleteAndExit)
+ else:
+ log.debug('Stopping the reactor.')
+ reactor.stop()
+
+ def errorOut(self, message):
+ self.result.status = Status.ERROR
+ self.result.message = message
+ reactor.crash()
+
+ def __str__(self):
+ return '<CommandExecutorProtocol {}>'.format(self.command.name)
+
+ __repr__ = __str__
+
+
+class FileReceiver(LineReceiver): # pylint: disable=W0223
+
+ def __init__(self, path):
+ self.path = path
+ self.fh = None
+ self.factory = None
+
+ def connectionMade(self):
+ if os.path.isfile(self.path):
+ log.warning('overriding existing file.')
+ os.remove(self.path)
+ self.fh = open(self.path, 'w')
+
+ def connectionLost(self, reason=ConnectionDone):
+ if self.fh:
+ self.fh.close()
+
+ def lineReceived(self, line):
+ line = line.rstrip('\r\n') + '\n'
+ self.fh.write(line)
+
+
+class FileReceiverFactory(ReconnectingClientFactory):
+
+ def __init__(self, path, owner):
+ self.path = path
+ self.owner = owner
+
+ def buildProtocol(self, addr):
+ protocol = FileReceiver(self.path)
+ protocol.factory = self
+ self.resetDelay()
+ return protocol
+
+ def clientConnectionLost(self, conector, reason):
+ if isinstance(reason, ConnectionLost):
+ log.error('Connection lost: {}'.format(reason))
+ ReconnectingClientFactory.clientConnectionLost(self, conector, reason)
+ else:
+ self.owner.transferComplete(self)
+
+ def clientConnectionFailed(self, conector, reason):
+ if isinstance(reason, ConnectionLost):
+ log.error('Connection failed: {}'.format(reason))
+ ReconnectingClientFactory.clientConnectionFailed(self, conector, reason)
+
+ def __str__(self):
+ return '<FileReceiver {}>'.format(self.path)
+
+ __repr__ = __str__
+
+
+def execute_command(server_config, command, **kwargs):
+ before_fds = _get_open_fds() # see the comment in the finally clause below
+ if isinstance(command, basestring):
+ command = Command(command, **kwargs)
+ timeout = 300 if command.name in ['stop', 'pull'] else 10
+ factory = CommandExecutorFactory(server_config, command, timeout)
+
+ # reactors aren't designed to be re-startable. In order to be
+ # able to call execute_command multiple times, we need to froce
+ # re-installation of the reactor; hence this hackery.
+ # TODO: look into implementing restartable reactors. According to the
+ # Twisted FAQ, there is no good reason why there isn't one:
+ # http://twistedmatrix.com/trac/wiki/FrequentlyAskedQuestions#WhycanttheTwistedsreactorberestarted
+ from twisted.internet import default
+ del sys.modules['twisted.internet.reactor']
+ default.install()
+ global reactor # pylint: disable=W0603
+ reactor = sys.modules['twisted.internet.reactor']
+
+ try:
+ reactor.connectTCP(server_config.host, server_config.port, factory)
+ reactor.run()
+ return factory.result
+ finally:
+ # re-startable reactor hack part 2.
+ # twisted hijacks SIGINT and doesn't bother to un-hijack it when the reactor
+ # stops. So we have to do it for it *rolls eye*.
+ import signal
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+ # OK, the reactor is also leaking file descriptors. Tracking down all
+ # of them is non trivial, so instead we're just comparing the before
+ # and after lists of open FDs for the current process, and closing all
+ # new ones, as execute_command should never leave anything open after
+ # it exits (even when downloading data files from the server).
+ # TODO: This is way too hacky even compared to the rest of this function.
+ # Additionally, the current implementation ties this to UNIX,
+ # so in the long run, we need to do this properly and get the FDs
+ # from the reactor.
+ after_fds = _get_open_fds()
+ for fd in (after_fds - before_fds):
+ try:
+ os.close(int(fd[1:]))
+ except OSError:
+ pass
+ # Below is the alternative code that gets FDs from the reactor, however
+ # at the moment it doesn't seem to get everything, which is why code
+ # above is used instead.
+ #for fd in readtor._selectables:
+ # os.close(fd)
+ #reactor._poller.close()
+
+
+def _get_open_fds():
+ if os.name == 'posix':
+ import subprocess
+ pid = os.getpid()
+ procs = subprocess.check_output(
+ [ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
+ return set(procs.split())
+ else:
+ # TODO: Implement the Windows equivalent.
+ return []
+
+
+def run_send_command():
+ """Main entry point when running as a script -- should not be invoked form another module."""
+ parser = get_config_parser()
+ parser.add_argument('command')
+ parser.add_argument('-o', '--output-directory', metavar='DIR', default='.',
+ help='Directory used to output data files (defaults to the current directory).')
+ parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False)
+ args = parser.parse_args()
+ if not args.device_config.labels:
+ args.device_config.labels = ['PORT_{}'.format(i) for i in xrange(len(args.device_config.resistor_values))]
+
+ if args.verbose:
+ log.start_logging('DEBUG')
+ else:
+ log.start_logging('INFO', fmt='%(levelname)-8s %(message)s')
+
+ if args.command == 'configure':
+ args.device_config.validate()
+ command = Command(args.command, config=args.device_config)
+ elif args.command == 'get_data':
+ command = Command(args.command, output_directory=args.output_directory)
+ else:
+ command = Command(args.command)
+
+ result = execute_command(args.server_config, command)
+ print result
+ if result.data:
+ print result.data
+
+
+if __name__ == '__main__':
+ run_send_command()
diff --git a/wlauto/external/daq_server/src/daqpower/common.py b/wlauto/external/daq_server/src/daqpower/common.py
new file mode 100644
index 00000000..3e64c16e
--- /dev/null
+++ b/wlauto/external/daq_server/src/daqpower/common.py
@@ -0,0 +1,99 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E1101
+import json
+
+
+class Serializer(json.JSONEncoder):
+
+ def default(self, o): # pylint: disable=E0202
+ if isinstance(o, Serializable):
+ return o.serialize()
+ if isinstance(o, Enum.EnumEntry):
+ return o.name
+ return json.JSONEncoder.default(self, o)
+
+
+class Serializable(object):
+
+ @classmethod
+ def deserialize(cls, text):
+ return cls(**json.loads(text))
+
+ def serialize(self, d=None):
+ if d is None:
+ d = self.__dict__
+ return json.dumps(d, cls=Serializer)
+
+
+class DaqServerRequest(Serializable):
+
+ def __init__(self, command, params=None): # pylint: disable=W0231
+ self.command = command
+ self.params = params or {}
+
+
+class DaqServerResponse(Serializable):
+
+ def __init__(self, status, message=None, data=None): # pylint: disable=W0231
+ self.status = status
+ self.message = message.strip().replace('\r\n', ' ') if message else ''
+ self.data = data or {}
+
+ def __str__(self):
+ return '{} {}'.format(self.status, self.message or '')
+
+
+class Enum(object):
+ """
+ Assuming MyEnum = Enum('A', 'B'),
+
+ MyEnum.A and MyEnum.B are valid values.
+
+ a = MyEnum.A
+ (a == MyEnum.A) == True
+ (a in MyEnum) == True
+
+ MyEnum('A') == MyEnum.A
+
+ str(MyEnum.A) == 'A'
+
+ """
+
+ class EnumEntry(object):
+ def __init__(self, name):
+ self.name = name
+ def __str__(self):
+ return self.name
+ def __cmp__(self, other):
+ return cmp(self.name, str(other))
+
+ def __init__(self, *args):
+ for a in args:
+ setattr(self, a, self.EnumEntry(a))
+
+ def __call__(self, value):
+ if value not in self.__dict__:
+ raise ValueError('Not enum value: {}'.format(value))
+ return self.__dict__[value]
+
+ def __iter__(self):
+ for e in self.__dict__:
+ yield self.__dict__[e]
+
+
+Status = Enum('OK', 'OKISH', 'ERROR')
diff --git a/wlauto/external/daq_server/src/daqpower/config.py b/wlauto/external/daq_server/src/daqpower/config.py
new file mode 100644
index 00000000..bfc3280f
--- /dev/null
+++ b/wlauto/external/daq_server/src/daqpower/config.py
@@ -0,0 +1,154 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+
+from daqpower.common import Serializable
+
+
+class ConfigurationError(Exception):
+ """Raised when configuration passed into DaqServer is invaid."""
+ pass
+
+
+class DeviceConfiguration(Serializable):
+ """Encapulates configuration for the DAQ, typically, passed from
+ the client."""
+
+ valid_settings = ['device_id', 'v_range', 'dv_range', 'sampling_rate', 'resistor_values', 'labels']
+
+ default_device_id = 'Dev1'
+ default_v_range = 2.5
+ default_dv_range = 0.2
+ default_sampling_rate = 10000
+ # Channel map used in DAQ 6363 and similar.
+ default_channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
+
+ @property
+ def number_of_ports(self):
+ return len(self.resistor_values)
+
+ def __init__(self, **kwargs): # pylint: disable=W0231
+ try:
+ self.device_id = kwargs.pop('device_id') or self.default_device_id
+ self.v_range = float(kwargs.pop('v_range') or self.default_v_range)
+ self.dv_range = float(kwargs.pop('dv_range') or self.default_dv_range)
+ self.sampling_rate = int(kwargs.pop('sampling_rate') or self.default_sampling_rate)
+ self.resistor_values = kwargs.pop('resistor_values') or []
+ self.channel_map = kwargs.pop('channel_map') or self.default_channel_map
+ self.labels = (kwargs.pop('labels') or
+ ['PORT_{}.csv'.format(i) for i in xrange(len(self.resistor_values))])
+ except KeyError, e:
+ raise ConfigurationError('Missing config: {}'.format(e.message))
+ if kwargs:
+ raise ConfigurationError('Unexpected config: {}'.format(kwargs))
+
+ def validate(self):
+ if not self.number_of_ports:
+ raise ConfigurationError('No resistor values were specified.')
+ if not len(self.resistor_values) == len(self.labels):
+ message = 'The number of resistors ({}) does not match the number of labels ({})'
+ raise ConfigurationError(message.format(len(self.resistor_values), len(self.labels)))
+
+ def __str__(self):
+ return self.serialize()
+
+ __repr__ = __str__
+
+
+class ServerConfiguration(object):
+ """Client-side server configuration."""
+
+ valid_settings = ['host', 'port']
+
+ default_host = '127.0.0.1'
+ default_port = 45677
+
+ def __init__(self, **kwargs):
+ self.host = kwargs.pop('host', None) or self.default_host
+ self.port = kwargs.pop('port', None) or self.default_port
+ if kwargs:
+ raise ConfigurationError('Unexpected config: {}'.format(kwargs))
+
+ def validate(self):
+ if not self.host:
+ raise ConfigurationError('Server host not specified.')
+ if not self.port:
+ raise ConfigurationError('Server port not specified.')
+ elif not isinstance(self.port, int):
+ raise ConfigurationError('Server port must be an integer.')
+
+
+class UpdateDeviceConfig(argparse.Action):
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setting = option_string.strip('-').replace('-', '_')
+ if setting not in DeviceConfiguration.valid_settings:
+ raise ConfigurationError('Unkown option: {}'.format(option_string))
+ setattr(namespace._device_config, setting, values)
+
+
+class UpdateServerConfig(argparse.Action):
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setting = option_string.strip('-').replace('-', '_')
+ if setting not in namespace.server_config.valid_settings:
+ raise ConfigurationError('Unkown option: {}'.format(option_string))
+ setattr(namespace.server_config, setting, values)
+
+
+class ConfigNamespace(object):
+
+ class _N(object):
+ def __init__(self):
+ self.device_id = None
+ self.v_range = None
+ self.dv_range = None
+ self.sampling_rate = None
+ self.resistor_values = None
+ self.labels = None
+ self.channel_map = None
+
+ @property
+ def device_config(self):
+ return DeviceConfiguration(**self._device_config.__dict__)
+
+ def __init__(self):
+ self._device_config = self._N()
+ self.server_config = ServerConfiguration()
+
+
+class ConfigArgumentParser(argparse.ArgumentParser):
+
+ def parse_args(self, *args, **kwargs):
+ kwargs['namespace'] = ConfigNamespace()
+ return super(ConfigArgumentParser, self).parse_args(*args, **kwargs)
+
+
+def get_config_parser(server=True, device=True):
+ parser = ConfigArgumentParser()
+ if device:
+ parser.add_argument('--device-id', action=UpdateDeviceConfig)
+ parser.add_argument('--v-range', action=UpdateDeviceConfig, type=float)
+ parser.add_argument('--dv-range', action=UpdateDeviceConfig, type=float)
+ parser.add_argument('--sampling-rate', action=UpdateDeviceConfig, type=int)
+ parser.add_argument('--resistor-values', action=UpdateDeviceConfig, type=float, nargs='*')
+ parser.add_argument('--labels', action=UpdateDeviceConfig, nargs='*')
+ if server:
+ parser.add_argument('--host', action=UpdateServerConfig)
+ parser.add_argument('--port', action=UpdateServerConfig, type=int)
+ return parser
+
diff --git a/wlauto/external/daq_server/src/daqpower/daq.py b/wlauto/external/daq_server/src/daqpower/daq.py
new file mode 100644
index 00000000..12689541
--- /dev/null
+++ b/wlauto/external/daq_server/src/daqpower/daq.py
@@ -0,0 +1,265 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Creates a new DAQ device class. This class assumes that there is a
+DAQ connected and mapped as Dev1. It assumes a specific syndesmology on the DAQ (it is not
+meant to be a generic DAQ interface). The following diagram shows the wiring for one DaqDevice
+port::
+
+Port 0
+========
+| A0+ <--- Vr -------------------------|
+| |
+| A0- <--- GND -------------------// |
+| |
+| A1+ <--- V+ ------------|-------V+ |
+| r | |
+| A1- <--- Vr --/\/\/\----| |
+| | |
+| | |
+| |--------------------------|
+========
+
+:number_of_ports: The number of ports connected on the DAQ. Each port requires 2 DAQ Channels
+ one for the source voltage and one for the Voltage drop over the
+ resistor r (V+ - Vr) allows us to detect the current.
+:resistor_value: The resistance of r. Typically a few milliOhm
+:downsample: The number of samples combined to create one Power point. If set to one
+ each sample corresponds to one reported power point.
+:sampling_rate: The rate at which DAQ takes a sample from each channel.
+
+"""
+# pylint: disable=F0401,E1101,W0621
+import os
+import sys
+import csv
+import time
+import threading
+from Queue import Queue, Empty
+
+import numpy
+
+from PyDAQmx import Task
+from PyDAQmx.DAQmxFunctions import DAQmxGetSysDevNames
+from PyDAQmx.DAQmxTypes import int32, byref, create_string_buffer
+from PyDAQmx.DAQmxConstants import (DAQmx_Val_Diff, DAQmx_Val_Volts, DAQmx_Val_GroupByScanNumber, DAQmx_Val_Auto,
+ DAQmx_Val_Acquired_Into_Buffer, DAQmx_Val_Rising, DAQmx_Val_ContSamps)
+
+from daqpower import log
+
+def list_available_devices():
+ """Returns the list of DAQ devices visible to the driver."""
+ bufsize = 2048 # Should be plenty for all but the most pathalogical of situations.
+ buf = create_string_buffer('\000' * bufsize)
+ DAQmxGetSysDevNames(buf, bufsize)
+ return buf.value.split(',')
+
+
+class ReadSamplesTask(Task):
+
+ def __init__(self, config, consumer):
+ Task.__init__(self)
+ self.config = config
+ self.consumer = consumer
+ self.sample_buffer_size = (self.config.sampling_rate + 1) * self.config.number_of_ports * 2
+ self.samples_read = int32()
+ self.remainder = []
+ # create voltage channels
+ for i in xrange(0, 2 * self.config.number_of_ports, 2):
+ self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i]),
+ '', DAQmx_Val_Diff,
+ -config.v_range, config.v_range,
+ DAQmx_Val_Volts, None)
+ self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i + 1]),
+ '', DAQmx_Val_Diff,
+ -config.dv_range, config.dv_range,
+ DAQmx_Val_Volts, None)
+ # configure sampling rate
+ self.CfgSampClkTiming('',
+ self.config.sampling_rate,
+ DAQmx_Val_Rising,
+ DAQmx_Val_ContSamps,
+ self.config.sampling_rate)
+ # register callbacks
+ self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, self.config.sampling_rate // 2, 0)
+ self.AutoRegisterDoneEvent(0)
+
+ def EveryNCallback(self):
+ samples_buffer = numpy.zeros((self.sample_buffer_size,), dtype=numpy.float64)
+ self.ReadAnalogF64(DAQmx_Val_Auto, 0.0, DAQmx_Val_GroupByScanNumber, samples_buffer,
+ self.sample_buffer_size, byref(self.samples_read), None)
+ self.consumer.write((samples_buffer, self.samples_read.value))
+
+ def DoneCallback(self, status): # pylint: disable=W0613,R0201
+ return 0 # The function should return an integer
+
+
+class AsyncWriter(threading.Thread):
+
+ def __init__(self, wait_period=1):
+ super(AsyncWriter, self).__init__()
+ self.daemon = True
+ self.wait_period = wait_period
+ self.running = threading.Event()
+ self._stop_signal = threading.Event()
+ self._queue = Queue()
+
+ def write(self, stuff):
+ if self._stop_signal.is_set():
+ raise IOError('Attempting to writer to {} after it has been closed.'.format(self.__class__.__name__))
+ self._queue.put(stuff)
+
+ def do_write(self, stuff):
+ raise NotImplementedError()
+
+ def run(self):
+ self.running.set()
+ while True:
+ if self._stop_signal.is_set() and self._queue.empty():
+ break
+ try:
+ self.do_write(self._queue.get(block=True, timeout=self.wait_period))
+ except Empty:
+ pass # carry on
+ self.running.clear()
+
+ def stop(self):
+ self._stop_signal.set()
+
+ def wait(self):
+ while self.running.is_set():
+ time.sleep(self.wait_period)
+
+
+class PortWriter(object):
+
+ def __init__(self, path):
+ self.path = path
+ self.fh = open(path, 'w', 0)
+ self.writer = csv.writer(self.fh)
+ self.writer.writerow(['power', 'voltage'])
+
+ def write(self, row):
+ self.writer.writerow(row)
+
+ def close(self):
+ self.fh.close()
+
+ def __del__(self):
+ self.close()
+
+
+class SamplePorcessorError(Exception):
+ pass
+
+
+class SampleProcessor(AsyncWriter):
+
+ def __init__(self, resistor_values, output_directory, labels):
+ super(SampleProcessor, self).__init__()
+ self.resistor_values = resistor_values
+ self.output_directory = output_directory
+ self.labels = labels
+ self.number_of_ports = len(resistor_values)
+ if len(self.labels) != self.number_of_ports:
+ message = 'Number of labels ({}) does not match number of ports ({}).'
+ raise SamplePorcessorError(message.format(len(self.labels), self.number_of_ports))
+ self.port_writers = []
+
+ def do_write(self, sample_tuple):
+ samples, number_of_samples = sample_tuple
+ for i in xrange(0, number_of_samples * self.number_of_ports * 2, self.number_of_ports * 2):
+ for j in xrange(self.number_of_ports):
+ V = float(samples[i + 2 * j])
+ DV = float(samples[i + 2 * j + 1])
+ P = V * (DV / self.resistor_values[j])
+ self.port_writers[j].write([P, V])
+
+ def start(self):
+ for label in self.labels:
+ port_file = self.get_port_file_path(label)
+ writer = PortWriter(port_file)
+ self.port_writers.append(writer)
+ super(SampleProcessor, self).start()
+
+ def stop(self):
+ super(SampleProcessor, self).stop()
+ self.wait()
+ for writer in self.port_writers:
+ writer.close()
+
+ def get_port_file_path(self, port_id):
+ if port_id in self.labels:
+ return os.path.join(self.output_directory, port_id + '.csv')
+ else:
+ raise SamplePorcessorError('Invalid port ID: {}'.format(port_id))
+
+ def __del__(self):
+ self.stop()
+
+
+class DaqRunner(object):
+
+ @property
+ def number_of_ports(self):
+ return self.config.number_of_ports
+
+ def __init__(self, config, output_directory):
+ self.config = config
+ self.processor = SampleProcessor(config.resistor_values, output_directory, config.labels)
+ self.task = ReadSamplesTask(config, self.processor)
+ self.is_running = False
+
+ def start(self):
+ log.debug('Starting sample processor.')
+ self.processor.start()
+ log.debug('Starting DAQ Task.')
+ self.task.StartTask()
+ self.is_running = True
+ log.debug('Runner started.')
+
+ def stop(self):
+ self.is_running = False
+ log.debug('Stopping DAQ Task.')
+ self.task.StopTask()
+ log.debug('Stopping sample processor.')
+ self.processor.stop()
+ log.debug('Runner stopped.')
+
+ def get_port_file_path(self, port_id):
+ return self.processor.get_port_file_path(port_id)
+
+
+if __name__ == '__main__':
+ from collections import namedtuple
+ DeviceConfig = namedtuple('DeviceConfig', ['device_id', 'channel_map', 'resistor_values',
+ 'v_range', 'dv_range', 'sampling_rate',
+ 'number_of_ports', 'labels'])
+ channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
+ resistor_values = [0.005]
+ labels = ['PORT_0']
+ dev_config = DeviceConfig('Dev1', channel_map, resistor_values, 2.5, 0.2, 10000, len(resistor_values), labels)
+ if not len(sys.argv) == 3:
+ print 'Usage: {} OUTDIR DURATION'.format(os.path.basename(__file__))
+ sys.exit(1)
+ output_directory = sys.argv[1]
+ duration = float(sys.argv[2])
+
+ print "Avialable devices:", list_availabe_devices()
+ runner = DaqRunner(dev_config, output_directory)
+ runner.start()
+ time.sleep(duration)
+ runner.stop()
diff --git a/wlauto/external/daq_server/src/daqpower/log.py b/wlauto/external/daq_server/src/daqpower/log.py
new file mode 100644
index 00000000..c9b215ae
--- /dev/null
+++ b/wlauto/external/daq_server/src/daqpower/log.py
@@ -0,0 +1,53 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import logging
+
+from twisted.python import log
+
+__all__ = ['debug', 'info', 'warning', 'error', 'critical', 'start_logging']
+
+debug = lambda x: log.msg(x, logLevel=logging.DEBUG)
+info = lambda x: log.msg(x, logLevel=logging.INFO)
+warning = lambda x: log.msg(x, logLevel=logging.WARNING)
+error = lambda x: log.msg(x, logLevel=logging.ERROR)
+critical = lambda x: log.msg(x, logLevel=logging.CRITICAL)
+
+
+class CustomLoggingObserver(log.PythonLoggingObserver):
+
+ def emit(self, eventDict):
+ if 'logLevel' in eventDict:
+ level = eventDict['logLevel']
+ elif eventDict['isError']:
+ level = logging.ERROR
+ else:
+ # All of that just just to override this one line from
+ # default INFO level...
+ level = logging.DEBUG
+ text = log.textFromEventDict(eventDict)
+ if text is None:
+ return
+ self.logger.log(level, text)
+
+
+logObserver = CustomLoggingObserver()
+logObserver.start()
+
+
+def start_logging(level, fmt='%(asctime)s %(levelname)-8s: %(message)s'):
+ logging.basicConfig(level=getattr(logging, level), format=fmt)
+
diff --git a/wlauto/external/daq_server/src/daqpower/server.py b/wlauto/external/daq_server/src/daqpower/server.py
new file mode 100644
index 00000000..9aac51a2
--- /dev/null
+++ b/wlauto/external/daq_server/src/daqpower/server.py
@@ -0,0 +1,480 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E1101,W0613
+from __future__ import division
+import os
+import sys
+import socket
+import argparse
+import shutil
+import time
+from datetime import datetime
+
+from zope.interface import implements
+from twisted.protocols.basic import LineReceiver
+from twisted.internet.protocol import Factory, Protocol
+from twisted.internet import reactor, interfaces
+from twisted.internet.error import ConnectionLost, ConnectionDone
+
+
+if __name__ == "__main__": # for debugging
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+from daqpower import log
+from daqpower.config import DeviceConfiguration
+from daqpower.common import DaqServerRequest, DaqServerResponse, Status
+try:
+ from daqpower.daq import DaqRunner, list_available_devices
+except ImportError:
+ # May be using debug mode.
+ DaqRunner = None
+ list_available_devices = lambda : ['Dev1']
+
+
+class ProtocolError(Exception):
+ pass
+
+
+class DummyDaqRunner(object):
+ """Dummy stub used when running in debug mode."""
+
+ num_rows = 200
+
+ @property
+ def number_of_ports(self):
+ return self.config.number_of_ports
+
+ def __init__(self, config, output_directory):
+ log.info('Creating runner with {} {}'.format(config, output_directory))
+ self.config = config
+ self.output_directory = output_directory
+ self.is_running = False
+
+ def start(self):
+ import csv, random
+ log.info('runner started')
+ for i in xrange(self.config.number_of_ports):
+ rows = [['power', 'voltage']] + [[random.gauss(1.0, 1.0), random.gauss(1.0, 0.1)]
+ for j in xrange(self.num_rows)]
+ with open(self.get_port_file_path(self.config.labels[i]), 'wb') as wfh:
+ writer = csv.writer(wfh)
+ writer.writerows(rows)
+
+ self.is_running = True
+
+ def stop(self):
+ self.is_running = False
+ log.info('runner stopped')
+
+ def get_port_file_path(self, port_id):
+ if port_id in self.config.labels:
+ return os.path.join(self.output_directory, '{}.csv'.format(port_id))
+ else:
+ raise Exception('Invalid port id: {}'.format(port_id))
+
+
+class DaqServer(object):
+
+ def __init__(self, base_output_directory):
+ self.base_output_directory = os.path.abspath(base_output_directory)
+ if os.path.isdir(self.base_output_directory):
+ log.info('Using output directory: {}'.format(self.base_output_directory))
+ else:
+ log.info('Creating new output directory: {}'.format(self.base_output_directory))
+ os.makedirs(self.base_output_directory)
+ self.runner = None
+ self.output_directory = None
+ self.labels = None
+
+ def configure(self, config_string):
+ message = None
+ if self.runner:
+ message = 'Configuring a new session before previous session has been terminated.'
+ log.warning(message)
+ if self.runner.is_running:
+ self.runner.stop()
+ config = DeviceConfiguration.deserialize(config_string)
+ config.validate()
+ self.output_directory = self._create_output_directory()
+ self.labels = config.labels
+ log.info('Writing port files to {}'.format(self.output_directory))
+ self.runner = DaqRunner(config, self.output_directory)
+ return message
+
+ def start(self):
+ if self.runner:
+ if not self.runner.is_running:
+ self.runner.start()
+ else:
+ message = 'Calling start() before stop() has been called. Data up to this point will be lost.'
+ log.warning(message)
+ self.runner.stop()
+ self.runner.start()
+ return message
+ else:
+ raise ProtocolError('Start called before a session has been configured.')
+
+ def stop(self):
+ if self.runner:
+ if self.runner.is_running:
+ self.runner.stop()
+ else:
+ message = 'Attempting to stop() before start() was invoked.'
+ log.warning(message)
+ self.runner.stop()
+ return message
+ else:
+ raise ProtocolError('Stop called before a session has been configured.')
+
+ def list_devices(self):
+ return list_available_devices()
+
+ def list_ports(self):
+ return self.labels
+
+ def list_port_files(self):
+ if not self.runner:
+ raise ProtocolError('Attempting to list port files before session has been configured.')
+ ports_with_files = []
+ for port_id in self.labels:
+ path = self.get_port_file_path(port_id)
+ if os.path.isfile(path):
+ ports_with_files.append(port_id)
+ return ports_with_files
+
+ def get_port_file_path(self, port_id):
+ if not self.runner:
+ raise ProtocolError('Attepting to get port file path before session has been configured.')
+ return self.runner.get_port_file_path(port_id)
+
+ def terminate(self):
+ message = None
+ if self.runner:
+ if self.runner.is_running:
+ message = 'Terminating session before runner has been stopped.'
+ log.warning(message)
+ self.runner.stop()
+ self.runner = None
+ if self.output_directory and os.path.isdir(self.output_directory):
+ shutil.rmtree(self.output_directory)
+ self.output_directory = None
+ log.info('Session terminated.')
+ else: # Runner has not been created.
+ message = 'Attempting to close session before it has been configured.'
+ log.warning(message)
+ return message
+
+ def _create_output_directory(self):
+ basename = datetime.now().strftime('%Y-%m-%d_%H%M%S%f')
+ dirname = os.path.join(self.base_output_directory, basename)
+ os.makedirs(dirname)
+ return dirname
+
+ def __del__(self):
+ if self.runner:
+ self.runner.stop()
+
+ def __str__(self):
+ return '({})'.format(self.base_output_directory)
+
+ __repr__ = __str__
+
+
+class DaqControlProtocol(LineReceiver): # pylint: disable=W0223
+
+ def __init__(self, daq_server):
+ self.daq_server = daq_server
+ self.factory = None
+
+ def lineReceived(self, line):
+ line = line.strip()
+ log.info('Received: {}'.format(line))
+ try:
+ request = DaqServerRequest.deserialize(line)
+ except Exception, e: # pylint: disable=W0703
+ self.sendError('Received bad request ({}: {})'.format(e.__class__.__name__, e.message))
+ else:
+ self.processRequest(request)
+
+ def processRequest(self, request):
+ try:
+ if request.command == 'configure':
+ self.configure(request)
+ elif request.command == 'start':
+ self.start(request)
+ elif request.command == 'stop':
+ self.stop(request)
+ elif request.command == 'list_devices':
+ self.list_devices(request)
+ elif request.command == 'list_ports':
+ self.list_ports(request)
+ elif request.command == 'list_port_files':
+ self.list_port_files(request)
+ elif request.command == 'pull':
+ self.pull_port_data(request)
+ elif request.command == 'close':
+ self.terminate(request)
+ else:
+ self.sendError('Received unknown command: {}'.format(request.command))
+ except Exception, e: # pylint: disable=W0703
+ self.sendError('{}: {}'.format(e.__class__.__name__, e.message))
+
+ def configure(self, request):
+ if 'config' in request.params:
+ result = self.daq_server.configure(request.params['config'])
+ if not result:
+ self.sendResponse(Status.OK)
+ else:
+ self.sendResponse(Status.OKISH, message=result)
+ else:
+ self.sendError('Invalid config; config string not provided.')
+
+ def start(self, request):
+ result = self.daq_server.start()
+ if not result:
+ self.sendResponse(Status.OK)
+ else:
+ self.sendResponse(Status.OKISH, message=result)
+
+ def stop(self, request):
+ result = self.daq_server.stop()
+ if not result:
+ self.sendResponse(Status.OK)
+ else:
+ self.sendResponse(Status.OKISH, message=result)
+
+ def pull_port_data(self, request):
+ if 'port_id' in request.params:
+ port_id = request.params['port_id']
+ port_file = self.daq_server.get_port_file_path(port_id)
+ if os.path.isfile(port_file):
+ port = self._initiate_file_transfer(port_file)
+ self.sendResponse(Status.OK, data={'port_number': port})
+ else:
+ self.sendError('File for port {} does not exist.'.format(port_id))
+ else:
+ self.sendError('Invalid pull request; port id not provided.')
+
+ def list_devices(self, request):
+ devices = self.daq_server.list_devices()
+ self.sendResponse(Status.OK, data={'devices': devices})
+
+ def list_ports(self, request):
+ port_labels = self.daq_server.list_ports()
+ self.sendResponse(Status.OK, data={'ports': port_labels})
+
+ def list_port_files(self, request):
+ port_labels = self.daq_server.list_port_files()
+ self.sendResponse(Status.OK, data={'ports': port_labels})
+
+ def terminate(self, request):
+ status = Status.OK
+ message = ''
+ if self.factory.transfer_sessions:
+ message = 'Terminating with file tranfer sessions in progress. '
+ log.warning(message)
+ for session in self.factory.transfer_sessions:
+ self.factory.transferComplete(session)
+ message += self.daq_server.terminate() or ''
+ if message:
+ status = Status.OKISH
+ self.sendResponse(status, message)
+
+ def sendError(self, message):
+ log.error(message)
+ self.sendResponse(Status.ERROR, message)
+
+ def sendResponse(self, status, message=None, data=None):
+ response = DaqServerResponse(status, message=message, data=data)
+ self.sendLine(response.serialize())
+
+ def sendLine(self, line):
+ log.info('Responding: {}'.format(line))
+ LineReceiver.sendLine(self, line.replace('\r\n',''))
+
+ def _initiate_file_transfer(self, filepath):
+ sender_factory = FileSenderFactory(filepath, self.factory)
+ connector = reactor.listenTCP(0, sender_factory)
+ self.factory.transferInitiated(sender_factory, connector)
+ return connector.getHost().port
+
+
+class DaqFactory(Factory):
+
+ protocol = DaqControlProtocol
+ check_alive_period = 5 * 60
+ max_transfer_lifetime = 30 * 60
+
+ def __init__(self, server):
+ self.server = server
+ self.transfer_sessions = {}
+
+ def buildProtocol(self, addr):
+ proto = DaqControlProtocol(self.server)
+ proto.factory = self
+ reactor.callLater(self.check_alive_period, self.pulse)
+ return proto
+
+ def clientConnectionLost(self, connector, reason):
+ log.msg('client connection lost: {}.'.format(reason))
+ if not isinstance(reason, ConnectionLost):
+ log.msg('ERROR: Client terminated connection mid-transfer.')
+ for session in self.transfer_sessions:
+ self.transferComplete(session)
+
+ def transferInitiated(self, session, connector):
+ self.transfer_sessions[session] = (time.time(), connector)
+
+ def transferComplete(self, session, reason='OK'):
+ if reason != 'OK':
+ log.error(reason)
+ self.transfer_sessions[session][1].stopListening()
+ del self.transfer_sessions[session]
+
+ def pulse(self):
+ """Close down any file tranfer sessions that have been open for too long."""
+ current_time = time.time()
+ for session in self.transfer_sessions:
+ start_time, conn = self.transfer_sessions[session]
+ if (current_time - start_time) > self.max_transfer_lifetime:
+ message = '{} session on port {} timed out'
+ self.transferComplete(session, message.format(session, conn.getHost().port))
+ if self.transfer_sessions:
+ reactor.callLater(self.check_alive_period, self.pulse)
+
+ def __str__(self):
+ return '<DAQ {}>'.format(self.server)
+
+ __repr__ = __str__
+
+
+class FileReader(object):
+
+ implements(interfaces.IPushProducer)
+
+ def __init__(self, filepath):
+ self.fh = open(filepath)
+ self.proto = None
+ self.done = False
+ self._paused = True
+
+ def setProtocol(self, proto):
+ self.proto = proto
+
+ def resumeProducing(self):
+ if not self.proto:
+ raise ProtocolError('resumeProducing called with no protocol set.')
+ self._paused = False
+ try:
+ while not self._paused:
+ line = self.fh.next().rstrip('\n') + '\r\n'
+ self.proto.transport.write(line)
+ except StopIteration:
+ log.debug('Sent everything.')
+ self.stopProducing()
+
+ def pauseProducing(self):
+ self._paused = True
+
+ def stopProducing(self):
+ self.done = True
+ self.fh.close()
+ self.proto.transport.unregisterProducer()
+ self.proto.transport.loseConnection()
+
+
+class FileSenderProtocol(Protocol):
+
+ def __init__(self, reader):
+ self.reader = reader
+ self.factory = None
+
+ def connectionMade(self):
+ self.transport.registerProducer(self.reader, True)
+ self.reader.resumeProducing()
+
+ def connectionLost(self, reason=ConnectionDone):
+ if self.reader.done:
+ self.factory.transferComplete()
+ else:
+ self.reader.pauseProducing()
+ self.transport.unregisterProducer()
+
+
+class FileSenderFactory(Factory):
+
+ @property
+ def done(self):
+ if self.reader:
+ return self.reader.done
+ else:
+ return None
+
+ def __init__(self, path, owner):
+ self.path = os.path.abspath(path)
+ self.reader = None
+ self.owner = owner
+
+ def buildProtocol(self, addr):
+ if not self.reader:
+ self.reader = FileReader(self.path)
+ proto = FileSenderProtocol(self.reader)
+ proto.factory = self
+ self.reader.setProtocol(proto)
+ return proto
+
+ def transferComplete(self):
+ self.owner.transferComplete(self)
+
+ def __hash__(self):
+ return hash(self.path)
+
+ def __str__(self):
+ return '<FileSender {}>'.format(self.path)
+
+ __repr__ = __str__
+
+
+def run_server():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-d', '--directory', help='Working directory', metavar='DIR', default='.')
+ parser.add_argument('-p', '--port', help='port the server will listen on.',
+ metavar='PORT', default=45677, type=int)
+ parser.add_argument('--debug', help='Run in debug mode (no DAQ connected).',
+ action='store_true', default=False)
+ parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False)
+ args = parser.parse_args()
+
+ if args.debug:
+ global DaqRunner # pylint: disable=W0603
+ DaqRunner = DummyDaqRunner
+ else:
+ if not DaqRunner:
+ raise ImportError('DaqRunner')
+ if args.verbose or args.debug:
+ log.start_logging('DEBUG')
+ else:
+ log.start_logging('INFO')
+
+ server = DaqServer(args.directory)
+ reactor.listenTCP(args.port, DaqFactory(server)).getHost()
+ hostname = socket.gethostbyname(socket.gethostname())
+ log.info('Listening on {}:{}'.format(hostname, args.port))
+ reactor.run()
+
+
+if __name__ == "__main__":
+ run_server()
diff --git a/wlauto/external/daq_server/src/scripts/run-daq-server b/wlauto/external/daq_server/src/scripts/run-daq-server
new file mode 100644
index 00000000..b20d6caf
--- /dev/null
+++ b/wlauto/external/daq_server/src/scripts/run-daq-server
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+from daqpower.server import run_server
+run_server()
diff --git a/wlauto/external/daq_server/src/scripts/send-daq-command b/wlauto/external/daq_server/src/scripts/send-daq-command
new file mode 100644
index 00000000..a4656a67
--- /dev/null
+++ b/wlauto/external/daq_server/src/scripts/send-daq-command
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+from daqpower.client import run_send_command
+run_send_command()
diff --git a/wlauto/external/daq_server/src/setup.py b/wlauto/external/daq_server/src/setup.py
new file mode 100644
index 00000000..3c892aa8
--- /dev/null
+++ b/wlauto/external/daq_server/src/setup.py
@@ -0,0 +1,52 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import warnings
+from distutils.core import setup
+
+import daqpower
+
+
+warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
+
+params = dict(
+ name='daqpower',
+ version=daqpower.__version__,
+ packages=[
+ 'daqpower',
+ ],
+ scripts=[
+ 'scripts/run-daq-server',
+ 'scripts/send-daq-command',
+ ],
+ url='N/A',
+ maintainer='workload-automation',
+ maintainer_email='workload-automation@arm.com',
+ install_requires=[
+ 'twisted',
+ 'PyDAQmx',
+ ],
+ # https://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[
+ 'Development Status :: 3 - Alpha',
+ 'Environment :: Console',
+ 'License :: Other/Proprietary License',
+ 'Operating System :: Unix',
+ 'Programming Language :: Python :: 2.7',
+ ],
+)
+
+setup(**params)
diff --git a/wlauto/external/louie/LICENSE b/wlauto/external/louie/LICENSE
new file mode 100644
index 00000000..5b432357
--- /dev/null
+++ b/wlauto/external/louie/LICENSE
@@ -0,0 +1,12 @@
+This directory contains Louie package that has been modified by ARM Ltd.
+Original Louie package is licensed under BSD license. ARM Ltd. changes are
+licensed under Apache version 2 license.
+
+Original Louie package may be found here:
+
+https://pypi.python.org/pypi/Louie/1.1
+
+The text of the BSD License may be viewed here:
+
+http://opensource.org/licenses/bsd-license.php
+
diff --git a/wlauto/external/louie/__init__.py b/wlauto/external/louie/__init__.py
new file mode 100644
index 00000000..c269dd27
--- /dev/null
+++ b/wlauto/external/louie/__init__.py
@@ -0,0 +1,46 @@
+__all__ = [
+ 'dispatcher',
+ 'error',
+ 'plugin',
+ 'robustapply',
+ 'saferef',
+ 'sender',
+ 'signal',
+ 'version',
+
+ 'connect',
+ 'disconnect',
+ 'get_all_receivers',
+ 'reset',
+ 'send',
+ 'send_exact',
+ 'send_minimal',
+ 'send_robust',
+
+ 'install_plugin',
+ 'remove_plugin',
+ 'Plugin',
+ 'QtWidgetPlugin',
+ 'TwistedDispatchPlugin',
+
+ 'Anonymous',
+ 'Any',
+
+ 'All',
+ 'Signal',
+ ]
+
+import louie.dispatcher, louie.error, louie.plugin, louie.robustapply, \
+ louie.saferef, louie.sender, louie.signal, louie.version
+
+from louie.dispatcher import \
+ connect, disconnect, get_all_receivers, reset, \
+ send, send_exact, send_minimal, send_robust
+
+from louie.plugin import \
+ install_plugin, remove_plugin, Plugin, \
+ QtWidgetPlugin, TwistedDispatchPlugin
+
+from louie.sender import Anonymous, Any
+
+from louie.signal import All, Signal
diff --git a/wlauto/external/louie/dispatcher.py b/wlauto/external/louie/dispatcher.py
new file mode 100644
index 00000000..1136b3f1
--- /dev/null
+++ b/wlauto/external/louie/dispatcher.py
@@ -0,0 +1,591 @@
+"""Multiple-producer-multiple-consumer signal-dispatching.
+
+``dispatcher`` is the core of Louie, providing the primary API and the
+core logic for the system.
+
+Internal attributes:
+
+- ``WEAKREF_TYPES``: Tuple of types/classes which represent weak
+ references to receivers, and thus must be dereferenced on retrieval
+ to retrieve the callable object
+
+- ``connections``::
+
+ { senderkey (id) : { signal : [receivers...] } }
+
+- ``senders``: Used for cleaning up sender references on sender
+ deletion::
+
+ { senderkey (id) : weakref(sender) }
+
+- ``senders_back``: Used for cleaning up receiver references on receiver
+ deletion::
+
+ { receiverkey (id) : [senderkey (id)...] }
+"""
+
+import os
+import weakref
+
+try:
+ set
+except NameError:
+ from sets import Set as set, ImmutableSet as frozenset
+
+from louie import error
+from louie import robustapply
+from louie import saferef
+from louie.sender import Any, Anonymous
+from louie.signal import All
+from prioritylist import PriorityList
+
+
+# Support for statistics.
+if __debug__:
+ connects = 0
+ disconnects = 0
+ sends = 0
+
+ def print_stats():
+ print ('\n'
+ 'Louie connects: %i\n'
+ 'Louie disconnects: %i\n'
+ 'Louie sends: %i\n'
+ '\n') % (connects, disconnects, sends)
+
+ if 'PYDISPATCH_STATS' in os.environ:
+ import atexit
+ atexit.register(print_stats)
+
+
+
+WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
+
+
+connections = {}
+senders = {}
+senders_back = {}
+plugins = []
+
+def reset():
+ """Reset the state of Louie.
+
+ Useful during unit testing. Should be avoided otherwise.
+ """
+ global connections, senders, senders_back, plugins
+ connections = {}
+ senders = {}
+ senders_back = {}
+ plugins = []
+
+
+def connect(receiver, signal=All, sender=Any, weak=True, priority=0):
+ """Connect ``receiver`` to ``sender`` for ``signal``.
+
+ - ``receiver``: A callable Python object which is to receive
+ messages/signals/events. Receivers must be hashable objects.
+
+ If weak is ``True``, then receiver must be weak-referencable (more
+ precisely ``saferef.safe_ref()`` must be able to create a
+ reference to the receiver).
+
+ Receivers are fairly flexible in their specification, as the
+ machinery in the ``robustapply`` module takes care of most of the
+ details regarding figuring out appropriate subsets of the sent
+ arguments to apply to a given receiver.
+
+ Note: If ``receiver`` is itself a weak reference (a callable), it
+ will be de-referenced by the system's machinery, so *generally*
+ weak references are not suitable as receivers, though some use
+ might be found for the facility whereby a higher-level library
+ passes in pre-weakrefed receiver references.
+
+ - ``signal``: The signal to which the receiver should respond.
+
+ If ``All``, receiver will receive all signals from the indicated
+ sender (which might also be ``All``, but is not necessarily
+ ``All``).
+
+ Otherwise must be a hashable Python object other than ``None``
+ (``DispatcherError`` raised on ``None``).
+
+ - ``sender``: The sender to which the receiver should respond.
+
+ If ``Any``, receiver will receive the indicated signals from any
+ sender.
+
+ If ``Anonymous``, receiver will only receive indicated signals
+ from ``send``/``send_exact`` which do not specify a sender, or
+ specify ``Anonymous`` explicitly as the sender.
+
+ Otherwise can be any python object.
+
+ - ``weak``: Whether to use weak references to the receiver.
+
+ By default, the module will attempt to use weak references to
+ the receiver objects. If this parameter is ``False``, then strong
+ references will be used.
+
+ - ``priority``: specifies the priority by which a reciever should
+ get notified
+
+ Returns ``None``, may raise ``DispatcherTypeError``.
+ """
+ if signal is None:
+ raise error.DispatcherTypeError(
+ 'Signal cannot be None (receiver=%r sender=%r)'
+ % (receiver, sender))
+ if weak:
+ receiver = saferef.safe_ref(receiver, on_delete=_remove_receiver)
+ senderkey = id(sender)
+ if connections.has_key(senderkey):
+ signals = connections[senderkey]
+ else:
+ connections[senderkey] = signals = {}
+ # Keep track of senders for cleanup.
+ # Is Anonymous something we want to clean up?
+ if sender not in (None, Anonymous, Any):
+ def remove(object, senderkey=senderkey):
+ _remove_sender(senderkey=senderkey)
+ # Skip objects that can not be weakly referenced, which means
+ # they won't be automatically cleaned up, but that's too bad.
+ try:
+ weak_sender = weakref.ref(sender, remove)
+ senders[senderkey] = weak_sender
+ except:
+ pass
+ receiver_id = id(receiver)
+ # get current set, remove any current references to
+ # this receiver in the set, including back-references
+ if signals.has_key(signal):
+ receivers = signals[signal]
+ _remove_old_back_refs(senderkey, signal, receiver, receivers)
+ else:
+ receivers = signals[signal] = PriorityList()
+ try:
+ current = senders_back.get(receiver_id)
+ if current is None:
+ senders_back[receiver_id] = current = []
+ if senderkey not in current:
+ current.append(senderkey)
+ except:
+ pass
+ receivers.add(receiver, priority)
+ # Update stats.
+ if __debug__:
+ global connects
+ connects += 1
+
+
+def disconnect(receiver, signal=All, sender=Any, weak=True):
+ """Disconnect ``receiver`` from ``sender`` for ``signal``.
+
+ - ``receiver``: The registered receiver to disconnect.
+
+ - ``signal``: The registered signal to disconnect.
+
+ - ``sender``: The registered sender to disconnect.
+
+ - ``weak``: The weakref state to disconnect.
+
+ ``disconnect`` reverses the process of ``connect``, the semantics for
+ the individual elements are logically equivalent to a tuple of
+ ``(receiver, signal, sender, weak)`` used as a key to be deleted
+ from the internal routing tables. (The actual process is slightly
+ more complex but the semantics are basically the same).
+
+ Note: Using ``disconnect`` is not required to cleanup routing when
+ an object is deleted; the framework will remove routes for deleted
+ objects automatically. It's only necessary to disconnect if you
+ want to stop routing to a live object.
+
+ Returns ``None``, may raise ``DispatcherTypeError`` or
+ ``DispatcherKeyError``.
+ """
+ if signal is None:
+ raise error.DispatcherTypeError(
+ 'Signal cannot be None (receiver=%r sender=%r)'
+ % (receiver, sender))
+ if weak:
+ receiver = saferef.safe_ref(receiver)
+ senderkey = id(sender)
+ try:
+ signals = connections[senderkey]
+ receivers = signals[signal]
+ except KeyError:
+ raise error.DispatcherKeyError(
+ 'No receivers found for signal %r from sender %r'
+ % (signal, sender)
+ )
+ try:
+ # also removes from receivers
+ _remove_old_back_refs(senderkey, signal, receiver, receivers)
+ except ValueError:
+ raise error.DispatcherKeyError(
+ 'No connection to receiver %s for signal %s from sender %s'
+ % (receiver, signal, sender)
+ )
+ _cleanup_connections(senderkey, signal)
+ # Update stats.
+ if __debug__:
+ global disconnects
+ disconnects += 1
+
+
+def get_receivers(sender=Any, signal=All):
+ """Get list of receivers from global tables.
+
+ This function allows you to retrieve the raw list of receivers
+ from the connections table for the given sender and signal pair.
+
+ Note: There is no guarantee that this is the actual list stored in
+ the connections table, so the value should be treated as a simple
+ iterable/truth value rather than, for instance a list to which you
+ might append new records.
+
+ Normally you would use ``live_receivers(get_receivers(...))`` to
+ retrieve the actual receiver objects as an iterable object.
+ """
+ try:
+ return connections[id(sender)][signal]
+ except KeyError:
+ return []
+
+
+def live_receivers(receivers):
+ """Filter sequence of receivers to get resolved, live receivers.
+
+ This is a generator which will iterate over the passed sequence,
+ checking for weak references and resolving them, then returning
+ all live receivers.
+ """
+ for receiver in receivers:
+ if isinstance(receiver, WEAKREF_TYPES):
+ # Dereference the weak reference.
+ receiver = receiver()
+ if receiver is not None:
+ # Check installed plugins to make sure this receiver is
+ # live.
+ live = True
+ for plugin in plugins:
+ if not plugin.is_live(receiver):
+ live = False
+ break
+ if live:
+ yield receiver
+
+
+def get_all_receivers(sender=Any, signal=All):
+ """Get list of all receivers from global tables.
+
+ This gets all receivers which should receive the given signal from
+ sender, each receiver should be produced only once by the
+ resulting generator.
+ """
+ yielded = set()
+ for receivers in (
+ # Get receivers that receive *this* signal from *this* sender.
+ get_receivers(sender, signal),
+ # Add receivers that receive *all* signals from *this* sender.
+ get_receivers(sender, All),
+ # Add receivers that receive *this* signal from *any* sender.
+ get_receivers(Any, signal),
+ # Add receivers that receive *all* signals from *any* sender.
+ get_receivers(Any, All),
+ ):
+ for receiver in receivers:
+ if receiver: # filter out dead instance-method weakrefs
+ try:
+ if not receiver in yielded:
+ yielded.add(receiver)
+ yield receiver
+ except TypeError:
+ # dead weakrefs raise TypeError on hash...
+ pass
+
+
+def send(signal=All, sender=Anonymous, *arguments, **named):
+ """Send ``signal`` from ``sender`` to all connected receivers.
+
+ - ``signal``: (Hashable) signal value; see ``connect`` for details.
+
+ - ``sender``: The sender of the signal.
+
+ If ``Any``, only receivers registered for ``Any`` will receive the
+ message.
+
+ If ``Anonymous``, only receivers registered to receive messages
+ from ``Anonymous`` or ``Any`` will receive the message.
+
+ Otherwise can be any Python object (normally one registered with
+ a connect if you actually want something to occur).
+
+ - ``arguments``: Positional arguments which will be passed to *all*
+ receivers. Note that this may raise ``TypeError`` if the receivers
+ do not allow the particular arguments. Note also that arguments
+ are applied before named arguments, so they should be used with
+ care.
+
+ - ``named``: Named arguments which will be filtered according to the
+ parameters of the receivers to only provide those acceptable to
+ the receiver.
+
+ Return a list of tuple pairs ``[(receiver, response), ...]``
+
+ If any receiver raises an error, the error propagates back through
+ send, terminating the dispatch loop, so it is quite possible to
+ not have all receivers called if a raises an error.
+ """
+ # Call each receiver with whatever arguments it can accept.
+ # Return a list of tuple pairs [(receiver, response), ... ].
+ responses = []
+ for receiver in live_receivers(get_all_receivers(sender, signal)):
+ # Wrap receiver using installed plugins.
+ original = receiver
+ for plugin in plugins:
+ receiver = plugin.wrap_receiver(receiver)
+ response = robustapply.robust_apply(
+ receiver, original,
+ signal=signal,
+ sender=sender,
+ *arguments,
+ **named
+ )
+ responses.append((receiver, response))
+ # Update stats.
+ if __debug__:
+ global sends
+ sends += 1
+ return responses
+
+
+def send_minimal(signal=All, sender=Anonymous, *arguments, **named):
+ """Like ``send``, but does not attach ``signal`` and ``sender``
+ arguments to the call to the receiver."""
+ # Call each receiver with whatever arguments it can accept.
+ # Return a list of tuple pairs [(receiver, response), ... ].
+ responses = []
+ for receiver in live_receivers(get_all_receivers(sender, signal)):
+ # Wrap receiver using installed plugins.
+ original = receiver
+ for plugin in plugins:
+ receiver = plugin.wrap_receiver(receiver)
+ response = robustapply.robust_apply(
+ receiver, original,
+ *arguments,
+ **named
+ )
+ responses.append((receiver, response))
+ # Update stats.
+ if __debug__:
+ global sends
+ sends += 1
+ return responses
+
+
+def send_exact(signal=All, sender=Anonymous, *arguments, **named):
+ """Send ``signal`` only to receivers registered for exact message.
+
+ ``send_exact`` allows for avoiding ``Any``/``Anonymous`` registered
+ handlers, sending only to those receivers explicitly registered
+ for a particular signal on a particular sender.
+ """
+ responses = []
+ for receiver in live_receivers(get_receivers(sender, signal)):
+ # Wrap receiver using installed plugins.
+ original = receiver
+ for plugin in plugins:
+ receiver = plugin.wrap_receiver(receiver)
+ response = robustapply.robust_apply(
+ receiver, original,
+ signal=signal,
+ sender=sender,
+ *arguments,
+ **named
+ )
+ responses.append((receiver, response))
+ return responses
+
+
+def send_robust(signal=All, sender=Anonymous, *arguments, **named):
+ """Send ``signal`` from ``sender`` to all connected receivers catching
+ errors
+
+ - ``signal``: (Hashable) signal value, see connect for details
+
+ - ``sender``: The sender of the signal.
+
+ If ``Any``, only receivers registered for ``Any`` will receive the
+ message.
+
+ If ``Anonymous``, only receivers registered to receive messages
+ from ``Anonymous`` or ``Any`` will receive the message.
+
+ Otherwise can be any Python object (normally one registered with
+ a connect if you actually want something to occur).
+
+ - ``arguments``: Positional arguments which will be passed to *all*
+ receivers. Note that this may raise ``TypeError`` if the receivers
+ do not allow the particular arguments. Note also that arguments
+ are applied before named arguments, so they should be used with
+ care.
+
+ - ``named``: Named arguments which will be filtered according to the
+ parameters of the receivers to only provide those acceptable to
+ the receiver.
+
+ Return a list of tuple pairs ``[(receiver, response), ... ]``
+
+ If any receiver raises an error (specifically, any subclass of
+ ``Exception``), the error instance is returned as the result for
+ that receiver.
+ """
+ # Call each receiver with whatever arguments it can accept.
+ # Return a list of tuple pairs [(receiver, response), ... ].
+ responses = []
+ for receiver in live_receivers(get_all_receivers(sender, signal)):
+ original = receiver
+ for plugin in plugins:
+ receiver = plugin.wrap_receiver(receiver)
+ try:
+ response = robustapply.robust_apply(
+ receiver, original,
+ signal=signal,
+ sender=sender,
+ *arguments,
+ **named
+ )
+ except Exception, err:
+ responses.append((receiver, err))
+ else:
+ responses.append((receiver, response))
+ return responses
+
+
+def _remove_receiver(receiver):
+ """Remove ``receiver`` from connections."""
+ if not senders_back:
+ # During module cleanup the mapping will be replaced with None.
+ return False
+ backKey = id(receiver)
+ for senderkey in senders_back.get(backKey, ()):
+ try:
+ signals = connections[senderkey].keys()
+ except KeyError:
+ pass
+ else:
+ for signal in signals:
+ try:
+ receivers = connections[senderkey][signal]
+ except KeyError:
+ pass
+ else:
+ try:
+ receivers.remove(receiver)
+ except Exception:
+ pass
+ _cleanup_connections(senderkey, signal)
+ try:
+ del senders_back[backKey]
+ except KeyError:
+ pass
+
+
+def _cleanup_connections(senderkey, signal):
+ """Delete empty signals for ``senderkey``. Delete ``senderkey`` if
+ empty."""
+ try:
+ receivers = connections[senderkey][signal]
+ except:
+ pass
+ else:
+ if not receivers:
+ # No more connected receivers. Therefore, remove the signal.
+ try:
+ signals = connections[senderkey]
+ except KeyError:
+ pass
+ else:
+ del signals[signal]
+ if not signals:
+ # No more signal connections. Therefore, remove the sender.
+ _remove_sender(senderkey)
+
+
+def _remove_sender(senderkey):
+ """Remove ``senderkey`` from connections."""
+ _remove_back_refs(senderkey)
+ try:
+ del connections[senderkey]
+ except KeyError:
+ pass
+ # Senderkey will only be in senders dictionary if sender
+ # could be weakly referenced.
+ try:
+ del senders[senderkey]
+ except:
+ pass
+
+
+def _remove_back_refs(senderkey):
+ """Remove all back-references to this ``senderkey``."""
+ try:
+ signals = connections[senderkey]
+ except KeyError:
+ signals = None
+ else:
+ for signal, receivers in signals.iteritems():
+ for receiver in receivers:
+ _kill_back_ref(receiver, senderkey)
+
+
+def _remove_old_back_refs(senderkey, signal, receiver, receivers):
+ """Kill old ``senders_back`` references from ``receiver``.
+
+ This guards against multiple registration of the same receiver for
+ a given signal and sender leaking memory as old back reference
+ records build up.
+
+ Also removes old receiver instance from receivers.
+ """
+ try:
+ index = receivers.index(receiver)
+ # need to scan back references here and remove senderkey
+ except ValueError:
+ return False
+ else:
+ old_receiver = receivers[index]
+ del receivers[index]
+ found = 0
+ signals = connections.get(signal)
+ if signals is not None:
+ for sig, recs in connections.get(signal, {}).iteritems():
+ if sig != signal:
+ for rec in recs:
+ if rec is old_receiver:
+ found = 1
+ break
+ if not found:
+ _kill_back_ref(old_receiver, senderkey)
+ return True
+ return False
+
+
+def _kill_back_ref(receiver, senderkey):
+ """Do actual removal of back reference from ``receiver`` to
+ ``senderkey``."""
+ receiverkey = id(receiver)
+ senders = senders_back.get(receiverkey, ())
+ while senderkey in senders:
+ try:
+ senders.remove(senderkey)
+ except:
+ break
+ if not senders:
+ try:
+ del senders_back[receiverkey]
+ except KeyError:
+ pass
+ return True
+
+
diff --git a/wlauto/external/louie/error.py b/wlauto/external/louie/error.py
new file mode 100644
index 00000000..04f98ea6
--- /dev/null
+++ b/wlauto/external/louie/error.py
@@ -0,0 +1,22 @@
+"""Error types for Louie."""
+
+
+class LouieError(Exception):
+ """Base class for all Louie errors"""
+
+
+class DispatcherError(LouieError):
+ """Base class for all Dispatcher errors"""
+
+
+class DispatcherKeyError(KeyError, DispatcherError):
+ """Error raised when unknown (sender, signal) specified"""
+
+
+class DispatcherTypeError(TypeError, DispatcherError):
+ """Error raised when inappropriate signal-type specified (None)"""
+
+
+class PluginTypeError(TypeError, LouieError):
+ """Error raise when trying to install more than one plugin of a
+ certain type."""
diff --git a/wlauto/external/louie/plugin.py b/wlauto/external/louie/plugin.py
new file mode 100644
index 00000000..c186f2f9
--- /dev/null
+++ b/wlauto/external/louie/plugin.py
@@ -0,0 +1,108 @@
+"""Common plugins for Louie."""
+
+from louie import dispatcher
+from louie import error
+
+
+def install_plugin(plugin):
+ cls = plugin.__class__
+ for p in dispatcher.plugins:
+ if p.__class__ is cls:
+ raise error.PluginTypeError(
+ 'Plugin of type %r already installed.' % cls)
+ dispatcher.plugins.append(plugin)
+
+def remove_plugin(plugin):
+ dispatcher.plugins.remove(plugin)
+
+
+class Plugin(object):
+ """Base class for Louie plugins.
+
+ Plugins are used to extend or alter the behavior of Louie
+ in a uniform way without having to modify the Louie code
+ itself.
+ """
+
+ def is_live(self, receiver):
+ """Return True if the receiver is still live.
+
+ Only called for receivers who have already been determined to
+ be live by default Louie semantics.
+ """
+ return True
+
+ def wrap_receiver(self, receiver):
+ """Return a callable that passes arguments to the receiver.
+
+ Useful when you want to change the behavior of all receivers.
+ """
+ return receiver
+
+
+class QtWidgetPlugin(Plugin):
+ """A Plugin for Louie that knows how to handle Qt widgets
+ when using PyQt built with SIP 4 or higher.
+
+ Weak references are not useful when dealing with QWidget
+ instances, because even after a QWidget is closed and destroyed,
+ only the C++ object is destroyed. The Python 'shell' object
+ remains, but raises a RuntimeError when an attempt is made to call
+ an underlying QWidget method.
+
+ This plugin alleviates this behavior, and if a QWidget instance is
+ found that is just an empty shell, it prevents Louie from
+ dispatching to any methods on those objects.
+ """
+
+ def __init__(self):
+ try:
+ import qt
+ except ImportError:
+ self.is_live = self._is_live_no_qt
+ else:
+ self.qt = qt
+
+ def is_live(self, receiver):
+ """If receiver is a method on a QWidget, only return True if
+ it hasn't been destroyed."""
+ if (hasattr(receiver, 'im_self') and
+ isinstance(receiver.im_self, self.qt.QWidget)
+ ):
+ try:
+ receiver.im_self.x()
+ except RuntimeError:
+ return False
+ return True
+
+ def _is_live_no_qt(self, receiver):
+ return True
+
+
+class TwistedDispatchPlugin(Plugin):
+ """Plugin for Louie that wraps all receivers in callables
+ that return Twisted Deferred objects.
+
+ When the wrapped receiver is called, it adds a call to the actual
+ receiver to the reactor event loop, and returns a Deferred that is
+ called back with the result.
+ """
+
+ def __init__(self):
+ # Don't import reactor ourselves, but make access to it
+ # easier.
+ from twisted import internet
+ from twisted.internet.defer import Deferred
+ self._internet = internet
+ self._Deferred = Deferred
+
+ def wrap_receiver(self, receiver):
+ def wrapper(*args, **kw):
+ d = self._Deferred()
+ def called(dummy):
+ return receiver(*args, **kw)
+ d.addCallback(called)
+ self._internet.reactor.callLater(0, d.callback, None)
+ return d
+ return wrapper
+
diff --git a/wlauto/external/louie/prioritylist.py b/wlauto/external/louie/prioritylist.py
new file mode 100644
index 00000000..7a6f51eb
--- /dev/null
+++ b/wlauto/external/louie/prioritylist.py
@@ -0,0 +1,128 @@
+"""OrderedList class
+
+This class keeps its elements ordered according to their priority.
+"""
+from collections import defaultdict
+import numbers
+from bisect import insort
+
+class PriorityList(object):
+
+ def __init__(self):
+ """
+ Returns an OrderedReceivers object that externaly behaves
+ like a list but it maintains the order of its elements
+ according to their priority.
+ """
+ self.elements = defaultdict(list)
+ self.is_ordered = True
+ self.priorities = []
+ self.size = 0
+ self._cached_elements = None
+
+ def __del__(self):
+ pass
+
+ def __iter__(self):
+ """
+ this method makes PriorityList class iterable
+ """
+ self._order_elements()
+ for priority in reversed(self.priorities): # highest priority first
+ for element in self.elements[priority]:
+ yield element
+
+ def __getitem__(self, index):
+ self._order_elements()
+ return self._to_list()[index]
+
+ def __delitem__(self, index):
+ self._order_elements()
+ if isinstance(index, numbers.Integral):
+ index = int(index)
+ if index < 0:
+ index_range = [len(self)+index]
+ else:
+ index_range = [index]
+ elif isinstance(index, slice):
+ index_range = range(index.start or 0, index.stop, index.step or 1)
+ else:
+ raise ValueError('Invalid index {}'.format(index))
+ current_global_offset = 0
+ priority_counts = {priority : count for (priority, count) in
+ zip(self.priorities, [len(self.elements[p]) for p in self.priorities])}
+ for priority in self.priorities:
+ if not index_range:
+ break
+ priority_offset = 0
+ while index_range:
+ del_index = index_range[0]
+ if priority_counts[priority] + current_global_offset <= del_index:
+ current_global_offset += priority_counts[priority]
+ break
+ within_priority_index = del_index - (current_global_offset + priority_offset)
+ self._delete(priority, within_priority_index)
+ priority_offset += 1
+ index_range.pop(0)
+
+ def __len__(self):
+ return self.size
+
+ def add(self, new_element, priority=0, force_ordering=True):
+ """
+ adds a new item in the list.
+
+ - ``new_element`` the element to be inserted in the PriorityList
+ - ``priority`` is the priority of the element which specifies its
+ order withing the List
+ - ``force_ordering`` indicates whether elements should be ordered
+ right now. If set to False, ordering happens on demand (lazy)
+ """
+ self._add_element(new_element, priority)
+ if priority not in self.priorities:
+ self._add_priority(priority, force_ordering)
+
+ def index(self, element):
+ return self._to_list().index(element)
+
+ def remove(self, element):
+ index = self.index(element)
+ self.__delitem__(index)
+
+ def _order_elements(self):
+ if not self.is_ordered:
+ self.priorities = sorted(self.priorities)
+ self.is_ordered = True
+
+ def _to_list(self):
+ if self._cached_elements == None:
+ self._order_elements()
+ self._cached_elements = []
+ for priority in self.priorities:
+ self._cached_elements += self.elements[priority]
+ return self._cached_elements
+
+ def _add_element(self, element, priority):
+ self.elements[priority].append(element)
+ self.size += 1
+ self._cached_elements = None
+
+ def _delete(self, priority, priority_index):
+ del self.elements[priority][priority_index]
+ self.size -= 1
+ if len(self.elements[priority]) == 0:
+ self.priorities.remove(priority)
+ self._cached_elements = None
+
+ def _add_priority(self, priority, force_ordering):
+ if force_ordering and self.is_ordered:
+ insort(self.priorities, priority)
+ elif not force_ordering:
+ self.priorities.append(priority)
+ self.is_ordered = False
+ elif not self.is_ordered:
+ self.priorities.append(priority)
+ self._order_elements()
+ else:
+ raise AssertionError('Should never get here.')
+
diff --git a/wlauto/external/louie/robustapply.py b/wlauto/external/louie/robustapply.py
new file mode 100644
index 00000000..f932b875
--- /dev/null
+++ b/wlauto/external/louie/robustapply.py
@@ -0,0 +1,58 @@
+"""Robust apply mechanism.
+
+Provides a function 'call', which can sort out what arguments a given
+callable object can take, and subset the given arguments to match only
+those which are acceptable.
+"""
+
+def function(receiver):
+ """Get function-like callable object for given receiver.
+
+ returns (function_or_method, codeObject, fromMethod)
+
+ If fromMethod is true, then the callable already has its first
+ argument bound.
+ """
+ if hasattr(receiver, '__call__'):
+ # receiver is a class instance; assume it is callable.
+ # Reassign receiver to the actual method that will be called.
+ c = receiver.__call__
+ if hasattr(c, 'im_func') or hasattr(c, 'im_code'):
+ receiver = c
+ if hasattr(receiver, 'im_func'):
+ # receiver is an instance-method.
+ return receiver, receiver.im_func.func_code, 1
+ elif not hasattr(receiver, 'func_code'):
+ raise ValueError(
+ 'unknown reciever type %s %s' % (receiver, type(receiver)))
+ return receiver, receiver.func_code, 0
+
+
+def robust_apply(receiver, signature, *arguments, **named):
+ """Call receiver with arguments and appropriate subset of named.
+ ``signature`` is the callable used to determine the call signature
+ of the receiver, in case ``receiver`` is a callable wrapper of the
+ actual receiver."""
+ signature, code_object, startIndex = function(signature)
+ acceptable = code_object.co_varnames[
+ startIndex + len(arguments):
+ code_object.co_argcount
+ ]
+ for name in code_object.co_varnames[
+ startIndex:startIndex + len(arguments)
+ ]:
+ if named.has_key(name):
+ raise TypeError(
+ 'Argument %r specified both positionally '
+ 'and as a keyword for calling %r'
+ % (name, signature)
+ )
+ if not (code_object.co_flags & 8):
+ # fc does not have a **kwds type parameter, therefore
+ # remove unacceptable arguments.
+ for arg in named.keys():
+ if arg not in acceptable:
+ del named[arg]
+ return receiver(*arguments, **named)
+
+
diff --git a/wlauto/external/louie/saferef.py b/wlauto/external/louie/saferef.py
new file mode 100644
index 00000000..c3e98c0a
--- /dev/null
+++ b/wlauto/external/louie/saferef.py
@@ -0,0 +1,179 @@
+"""Refactored 'safe reference from dispatcher.py"""
+
+import weakref
+import traceback
+
+
+def safe_ref(target, on_delete=None):
+ """Return a *safe* weak reference to a callable target.
+
+ - ``target``: The object to be weakly referenced, if it's a bound
+ method reference, will create a BoundMethodWeakref, otherwise
+ creates a simple weakref.
+
+ - ``on_delete``: If provided, will have a hard reference stored to
+ the callable to be called after the safe reference goes out of
+ scope with the reference object, (either a weakref or a
+ BoundMethodWeakref) as argument.
+ """
+ if hasattr(target, 'im_self'):
+ if target.im_self is not None:
+ # Turn a bound method into a BoundMethodWeakref instance.
+ # Keep track of these instances for lookup by disconnect().
+ assert hasattr(target, 'im_func'), (
+ "safe_ref target %r has im_self, but no im_func, "
+ "don't know how to create reference"
+ % target
+ )
+ reference = BoundMethodWeakref(target=target, on_delete=on_delete)
+ return reference
+ if callable(on_delete):
+ return weakref.ref(target, on_delete)
+ else:
+ return weakref.ref(target)
+
+
+class BoundMethodWeakref(object):
+ """'Safe' and reusable weak references to instance methods.
+
+ BoundMethodWeakref objects provide a mechanism for referencing a
+ bound method without requiring that the method object itself
+ (which is normally a transient object) is kept alive. Instead,
+ the BoundMethodWeakref object keeps weak references to both the
+ object and the function which together define the instance method.
+
+ Attributes:
+
+ - ``key``: The identity key for the reference, calculated by the
+ class's calculate_key method applied to the target instance method.
+
+ - ``deletion_methods``: Sequence of callable objects taking single
+ argument, a reference to this object which will be called when
+ *either* the target object or target function is garbage
+ collected (i.e. when this object becomes invalid). These are
+ specified as the on_delete parameters of safe_ref calls.
+
+ - ``weak_self``: Weak reference to the target object.
+
+ - ``weak_func``: Weak reference to the target function.
+
+ Class Attributes:
+
+ - ``_all_instances``: Class attribute pointing to all live
+ BoundMethodWeakref objects indexed by the class's
+ calculate_key(target) method applied to the target objects.
+ This weak value dictionary is used to short-circuit creation so
+ that multiple references to the same (object, function) pair
+ produce the same BoundMethodWeakref instance.
+ """
+
+ _all_instances = weakref.WeakValueDictionary()
+
+ def __new__(cls, target, on_delete=None, *arguments, **named):
+ """Create new instance or return current instance.
+
+ Basically this method of construction allows us to
+ short-circuit creation of references to already- referenced
+ instance methods. The key corresponding to the target is
+ calculated, and if there is already an existing reference,
+ that is returned, with its deletion_methods attribute updated.
+ Otherwise the new instance is created and registered in the
+ table of already-referenced methods.
+ """
+ key = cls.calculate_key(target)
+ current = cls._all_instances.get(key)
+ if current is not None:
+ current.deletion_methods.append(on_delete)
+ return current
+ else:
+ base = super(BoundMethodWeakref, cls).__new__(cls)
+ cls._all_instances[key] = base
+ base.__init__(target, on_delete, *arguments, **named)
+ return base
+
+ def __init__(self, target, on_delete=None):
+ """Return a weak-reference-like instance for a bound method.
+
+ - ``target``: The instance-method target for the weak reference,
+ must have im_self and im_func attributes and be
+ reconstructable via the following, which is true of built-in
+ instance methods::
+
+ target.im_func.__get__( target.im_self )
+
+ - ``on_delete``: Optional callback which will be called when
+ this weak reference ceases to be valid (i.e. either the
+ object or the function is garbage collected). Should take a
+ single argument, which will be passed a pointer to this
+ object.
+ """
+ def remove(weak, self=self):
+ """Set self.isDead to True when method or instance is destroyed."""
+ methods = self.deletion_methods[:]
+ del self.deletion_methods[:]
+ try:
+ del self.__class__._all_instances[self.key]
+ except KeyError:
+ pass
+ for function in methods:
+ try:
+ if callable(function):
+ function(self)
+ except Exception:
+ try:
+ traceback.print_exc()
+ except AttributeError, e:
+ print ('Exception during saferef %s '
+ 'cleanup function %s: %s' % (self, function, e))
+ self.deletion_methods = [on_delete]
+ self.key = self.calculate_key(target)
+ self.weak_self = weakref.ref(target.im_self, remove)
+ self.weak_func = weakref.ref(target.im_func, remove)
+ self.self_name = str(target.im_self)
+ self.func_name = str(target.im_func.__name__)
+
+ def calculate_key(cls, target):
+ """Calculate the reference key for this reference.
+
+ Currently this is a two-tuple of the id()'s of the target
+ object and the target function respectively.
+ """
+ return (id(target.im_self), id(target.im_func))
+ calculate_key = classmethod(calculate_key)
+
+ def __str__(self):
+ """Give a friendly representation of the object."""
+ return "%s(%s.%s)" % (
+ self.__class__.__name__,
+ self.self_name,
+ self.func_name,
+ )
+
+ __repr__ = __str__
+
+ def __nonzero__(self):
+ """Whether we are still a valid reference."""
+ return self() is not None
+
+ def __cmp__(self, other):
+ """Compare with another reference."""
+ if not isinstance(other, self.__class__):
+ return cmp(self.__class__, type(other))
+ return cmp(self.key, other.key)
+
+ def __call__(self):
+ """Return a strong reference to the bound method.
+
+ If the target cannot be retrieved, then will return None,
+ otherwise returns a bound instance method for our object and
+ function.
+
+ Note: You may call this method any number of times, as it does
+ not invalidate the reference.
+ """
+ target = self.weak_self()
+ if target is not None:
+ function = self.weak_func()
+ if function is not None:
+ return function.__get__(target)
+ return None
diff --git a/wlauto/external/louie/sender.py b/wlauto/external/louie/sender.py
new file mode 100644
index 00000000..aac6c79c
--- /dev/null
+++ b/wlauto/external/louie/sender.py
@@ -0,0 +1,39 @@
+"""Sender classes."""
+
+
+class _SENDER(type):
+ """Base metaclass for sender classes."""
+
+ def __str__(cls):
+ return '<Sender: %s>' % (cls.__name__, )
+
+
+class Any(object):
+ """Used to represent either 'any sender'.
+
+ The Any class can be used with connect, disconnect, send, or
+ sendExact to denote that the sender paramater should react to any
+ sender, not just a particular sender.
+ """
+
+ __metaclass__ = _SENDER
+
+
+class Anonymous(object):
+ """Singleton used to signal 'anonymous sender'.
+
+ The Anonymous class is used to signal that the sender of a message
+ is not specified (as distinct from being 'any sender').
+ Registering callbacks for Anonymous will only receive messages
+ sent without senders. Sending with anonymous will only send
+ messages to those receivers registered for Any or Anonymous.
+
+ Note: The default sender for connect is Any, while the default
+ sender for send is Anonymous. This has the effect that if you do
+ not specify any senders in either function then all messages are
+ routed as though there was a single sender (Anonymous) being used
+ everywhere.
+ """
+
+ __metaclass__ = _SENDER
+
diff --git a/wlauto/external/louie/signal.py b/wlauto/external/louie/signal.py
new file mode 100644
index 00000000..0379151a
--- /dev/null
+++ b/wlauto/external/louie/signal.py
@@ -0,0 +1,30 @@
+"""Signal class.
+
+This class is provided as a way to consistently define and document
+signal types. Signal classes also have a useful string
+representation.
+
+Louie does not require you to use a subclass of Signal for signals.
+"""
+
+
+class _SIGNAL(type):
+ """Base metaclass for signal classes."""
+
+ def __str__(cls):
+ return '<Signal: %s>' % (cls.__name__, )
+
+
+class Signal(object):
+
+ __metaclass__ = _SIGNAL
+
+
+class All(Signal):
+ """Used to represent 'all signals'.
+
+ The All class can be used with connect, disconnect, send, or
+ sendExact to denote that the signal should react to all signals,
+ not just a particular signal.
+ """
+
diff --git a/wlauto/external/louie/test/__init__.py b/wlauto/external/louie/test/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/external/louie/test/__init__.py
diff --git a/wlauto/external/louie/test/conftest.py b/wlauto/external/louie/test/conftest.py
new file mode 100644
index 00000000..3b241af8
--- /dev/null
+++ b/wlauto/external/louie/test/conftest.py
@@ -0,0 +1,5 @@
+import sys
+import os
+
+sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
+
diff --git a/wlauto/external/louie/test/fixture.py b/wlauto/external/louie/test/fixture.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/external/louie/test/fixture.py
diff --git a/wlauto/external/louie/test/test_dispatcher.py b/wlauto/external/louie/test/test_dispatcher.py
new file mode 100644
index 00000000..b58f1c9f
--- /dev/null
+++ b/wlauto/external/louie/test/test_dispatcher.py
@@ -0,0 +1,154 @@
+import unittest
+
+import louie
+from louie import dispatcher
+
+
+def x(a):
+ return a
+
+
+class Dummy(object):
+ pass
+
+
+class Callable(object):
+
+ def __call__(self, a):
+ return a
+
+ def a(self, a):
+ return a
+
+
+class TestDispatcher(unittest.TestCase):
+
+ def setUp(self):
+ louie.reset()
+
+ def _isclean(self):
+ """Assert that everything has been cleaned up automatically"""
+ assert len(dispatcher.senders_back) == 0, dispatcher.senders_back
+ assert len(dispatcher.connections) == 0, dispatcher.connections
+ assert len(dispatcher.senders) == 0, dispatcher.senders
+
+ def test_Exact(self):
+ a = Dummy()
+ signal = 'this'
+ louie.connect(x, signal, a)
+ expected = [(x, a)]
+ result = louie.send('this', a, a=a)
+ assert result == expected, (
+ "Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
+ % (expected, result))
+ louie.disconnect(x, signal, a)
+ assert len(list(louie.get_all_receivers(a, signal))) == 0
+ self._isclean()
+
+ def test_AnonymousSend(self):
+ a = Dummy()
+ signal = 'this'
+ louie.connect(x, signal)
+ expected = [(x, a)]
+ result = louie.send(signal, None, a=a)
+ assert result == expected, (
+ "Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
+ % (expected, result))
+ louie.disconnect(x, signal)
+ assert len(list(louie.get_all_receivers(None, signal))) == 0
+ self._isclean()
+
+ def test_AnyRegistration(self):
+ a = Dummy()
+ signal = 'this'
+ louie.connect(x, signal, louie.Any)
+ expected = [(x, a)]
+ result = louie.send('this', object(), a=a)
+ assert result == expected, (
+ "Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
+ % (expected, result))
+ louie.disconnect(x, signal, louie.Any)
+ expected = []
+ result = louie.send('this', object(), a=a)
+ assert result == expected, (
+ "Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
+ % (expected, result))
+ assert len(list(louie.get_all_receivers(louie.Any, signal))) == 0
+ self._isclean()
+
+ def test_AllRegistration(self):
+ a = Dummy()
+ signal = 'this'
+ louie.connect(x, louie.All, a)
+ expected = [(x, a)]
+ result = louie.send('this', a, a=a)
+ assert result == expected, (
+ "Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
+ % (expected, result))
+ louie.disconnect(x, louie.All, a)
+ assert len(list(louie.get_all_receivers(a, louie.All))) == 0
+ self._isclean()
+
+ def test_GarbageCollected(self):
+ a = Callable()
+ b = Dummy()
+ signal = 'this'
+ louie.connect(a.a, signal, b)
+ expected = []
+ del a
+ result = louie.send('this', b, a=b)
+ assert result == expected, (
+ "Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
+ % (expected, result))
+ assert len(list(louie.get_all_receivers(b, signal))) == 0, (
+ "Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
+ self._isclean()
+
+ def test_GarbageCollectedObj(self):
+ class x:
+ def __call__(self, a):
+ return a
+ a = Callable()
+ b = Dummy()
+ signal = 'this'
+ louie.connect(a, signal, b)
+ expected = []
+ del a
+ result = louie.send('this', b, a=b)
+ assert result == expected, (
+ "Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
+ % (expected, result))
+ assert len(list(louie.get_all_receivers(b, signal))) == 0, (
+ "Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
+ self._isclean()
+
+ def test_MultipleRegistration(self):
+ a = Callable()
+ b = Dummy()
+ signal = 'this'
+ louie.connect(a, signal, b)
+ louie.connect(a, signal, b)
+ louie.connect(a, signal, b)
+ louie.connect(a, signal, b)
+ louie.connect(a, signal, b)
+ louie.connect(a, signal, b)
+ result = louie.send('this', b, a=b)
+ assert len(result) == 1, result
+ assert len(list(louie.get_all_receivers(b, signal))) == 1, (
+ "Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
+ del a
+ del b
+ del result
+ self._isclean()
+
+ def test_robust(self):
+ """Test the sendRobust function."""
+ def fails():
+ raise ValueError('this')
+ a = object()
+ signal = 'this'
+ louie.connect(fails, louie.All, a)
+ result = louie.send_robust('this', a, a=a)
+ err = result[0][1]
+ assert isinstance(err, ValueError)
+ assert err.args == ('this', )
diff --git a/wlauto/external/louie/test/test_plugin.py b/wlauto/external/louie/test/test_plugin.py
new file mode 100644
index 00000000..d8321d31
--- /dev/null
+++ b/wlauto/external/louie/test/test_plugin.py
@@ -0,0 +1,145 @@
+"""Louie plugin tests."""
+
+import unittest
+
+import louie
+
+try:
+ import qt
+ if not hasattr(qt.qApp, 'for_testing'):
+ _app = qt.QApplication([])
+ _app.for_testing = True
+ qt.qApp = _app
+except ImportError:
+ qt = None
+
+
+class ReceiverBase(object):
+
+ def __init__(self):
+ self.args = []
+ self.live = True
+
+ def __call__(self, arg):
+ self.args.append(arg)
+
+class Receiver1(ReceiverBase):
+ pass
+
+class Receiver2(ReceiverBase):
+ pass
+
+
+class Plugin1(louie.Plugin):
+
+ def is_live(self, receiver):
+ """ReceiverBase instances are only live if their `live`
+ attribute is True"""
+ if isinstance(receiver, ReceiverBase):
+ return receiver.live
+ return True
+
+
+class Plugin2(louie.Plugin):
+
+ def is_live(self, receiver):
+ """Pretend all Receiver2 instances are not live."""
+ if isinstance(receiver, Receiver2):
+ return False
+ return True
+
+
+def test_only_one_instance():
+ louie.reset()
+ plugin1a = Plugin1()
+ plugin1b = Plugin1()
+ louie.install_plugin(plugin1a)
+ # XXX: Move these tests into test cases so we can use unittest's
+ # 'assertRaises' method.
+ try:
+ louie.install_plugin(plugin1b)
+ except louie.error.PluginTypeError:
+ pass
+ else:
+ raise Exception('PluginTypeError not raised')
+
+
+def test_is_live():
+ louie.reset()
+ # Create receivers.
+ receiver1a = Receiver1()
+ receiver1b = Receiver1()
+ receiver2a = Receiver2()
+ receiver2b = Receiver2()
+ # Connect signals.
+ louie.connect(receiver1a, 'sig')
+ louie.connect(receiver1b, 'sig')
+ louie.connect(receiver2a, 'sig')
+ louie.connect(receiver2b, 'sig')
+ # Check reception without plugins.
+ louie.send('sig', arg='foo')
+ assert receiver1a.args == ['foo']
+ assert receiver1b.args == ['foo']
+ assert receiver2a.args == ['foo']
+ assert receiver2b.args == ['foo']
+ # Install plugin 1.
+ plugin1 = Plugin1()
+ louie.install_plugin(plugin1)
+ # Make some receivers not live.
+ receiver1a.live = False
+ receiver2b.live = False
+ # Check reception.
+ louie.send('sig', arg='bar')
+ assert receiver1a.args == ['foo']
+ assert receiver1b.args == ['foo', 'bar']
+ assert receiver2a.args == ['foo', 'bar']
+ assert receiver2b.args == ['foo']
+ # Remove plugin 1, install plugin 2.
+ plugin2 = Plugin2()
+ louie.remove_plugin(plugin1)
+ louie.install_plugin(plugin2)
+ # Check reception.
+ louie.send('sig', arg='baz')
+ assert receiver1a.args == ['foo', 'baz']
+ assert receiver1b.args == ['foo', 'bar', 'baz']
+ assert receiver2a.args == ['foo', 'bar']
+ assert receiver2b.args == ['foo']
+ # Install plugin 1 alongside plugin 2.
+ louie.install_plugin(plugin1)
+ # Check reception.
+ louie.send('sig', arg='fob')
+ assert receiver1a.args == ['foo', 'baz']
+ assert receiver1b.args == ['foo', 'bar', 'baz', 'fob']
+ assert receiver2a.args == ['foo', 'bar']
+ assert receiver2b.args == ['foo']
+
+
+if qt is not None:
+ def test_qt_plugin():
+ louie.reset()
+ # Create receivers.
+ class Receiver(qt.QWidget):
+ def __init__(self):
+ qt.QObject.__init__(self)
+ self.args = []
+ def receive(self, arg):
+ self.args.append(arg)
+ receiver1 = Receiver()
+ receiver2 = Receiver()
+ # Connect signals.
+ louie.connect(receiver1.receive, 'sig')
+ louie.connect(receiver2.receive, 'sig')
+ # Destroy receiver2 so only a shell is left.
+ receiver2.close(True)
+ # Check reception without plugins.
+ louie.send('sig', arg='foo')
+ assert receiver1.args == ['foo']
+ assert receiver2.args == ['foo']
+ # Install plugin.
+ plugin = louie.QtWidgetPlugin()
+ louie.install_plugin(plugin)
+ # Check reception with plugins.
+ louie.send('sig', arg='bar')
+ assert receiver1.args == ['foo', 'bar']
+ assert receiver2.args == ['foo']
+
diff --git a/wlauto/external/louie/test/test_prioritydispatcher.py b/wlauto/external/louie/test/test_prioritydispatcher.py
new file mode 100644
index 00000000..061ed07b
--- /dev/null
+++ b/wlauto/external/louie/test/test_prioritydispatcher.py
@@ -0,0 +1,41 @@
+import unittest
+
+import louie
+from louie import dispatcher
+
+class Callable(object):
+
+ def __init__(self, val):
+ self.val = val
+
+ def __call__(self):
+ return self.val
+
+
+one = Callable(1)
+two = Callable(2)
+three = Callable(3)
+
+class TestPriorityDispatcher(unittest.TestCase):
+
+ def test_ConnectNotify(self):
+ louie.connect(
+ two,
+ 'one',
+ priority=200
+ )
+ louie.connect(
+ one,
+ 'one',
+ priority=100
+ )
+ louie.connect(
+ three,
+ 'one',
+ priority=300
+ )
+ result = [ i[1] for i in louie.send('one')]
+ if not result == [1, 2, 3]:
+ print result
+ assert(False)
+
diff --git a/wlauto/external/louie/test/test_prioritylist.py b/wlauto/external/louie/test/test_prioritylist.py
new file mode 100644
index 00000000..7dccc7d4
--- /dev/null
+++ b/wlauto/external/louie/test/test_prioritylist.py
@@ -0,0 +1,62 @@
+import unittest
+
+import louie.prioritylist
+from louie.prioritylist import PriorityList
+
+#def populate_list(plist):
+
+class TestPriorityList(unittest.TestCase):
+
+ def test_Insert(self):
+ pl = PriorityList()
+ elements = {3: "element 3",
+ 2: "element 2",
+ 1: "element 1",
+ 5: "element 5",
+ 4: "element 4"
+ }
+ for key in elements:
+ pl.add(elements[key], priority=key)
+
+ match = zip(sorted(elements.values()), pl[:])
+ for pair in match:
+ assert(pair[0]==pair[1])
+
+ def test_Delete(self):
+ pl = PriorityList()
+ elements = {2: "element 3",
+ 1: "element 2",
+ 0: "element 1",
+ 4: "element 5",
+ 3: "element 4"
+ }
+ for key in elements:
+ pl.add(elements[key], priority=key)
+ del elements[2]
+ del pl[2]
+ match = zip(sorted(elements.values()) , pl[:])
+ for pair in match:
+ assert(pair[0]==pair[1])
+
+ def test_Multiple(self):
+ pl = PriorityList()
+ pl.add('1', 1)
+ pl.add('2.1', 2)
+ pl.add('3', 3)
+ pl.add('2.2', 2)
+ it = iter(pl)
+ assert(it.next() == '1')
+ assert(it.next() == '2.1')
+ assert(it.next() == '2.2')
+ assert(it.next() == '3')
+
+ def test_IteratorBreak(self):
+ pl = PriorityList()
+ pl.add('1', 1)
+ pl.add('2.1', 2)
+ pl.add('3', 3)
+ pl.add('2.2', 2)
+ for i in pl:
+ if i == '2.1':
+ break
+ assert(pl.index('3') == 3)
diff --git a/wlauto/external/louie/test/test_robustapply.py b/wlauto/external/louie/test/test_robustapply.py
new file mode 100644
index 00000000..ce2d9cc6
--- /dev/null
+++ b/wlauto/external/louie/test/test_robustapply.py
@@ -0,0 +1,34 @@
+import unittest
+
+from louie.robustapply import robust_apply
+
+
+def no_argument():
+ pass
+
+
+def one_argument(blah):
+ pass
+
+
+def two_arguments(blah, other):
+ pass
+
+
+class TestRobustApply(unittest.TestCase):
+
+ def test_01(self):
+ robust_apply(no_argument, no_argument)
+
+ def test_02(self):
+ self.assertRaises(TypeError, robust_apply, no_argument, no_argument,
+ 'this' )
+
+ def test_03(self):
+ self.assertRaises(TypeError, robust_apply, one_argument, one_argument)
+
+ def test_04(self):
+ """Raise error on duplication of a particular argument"""
+ self.assertRaises(TypeError, robust_apply, one_argument, one_argument,
+ 'this', blah='that')
+
diff --git a/wlauto/external/louie/test/test_saferef.py b/wlauto/external/louie/test/test_saferef.py
new file mode 100644
index 00000000..778c1c6e
--- /dev/null
+++ b/wlauto/external/louie/test/test_saferef.py
@@ -0,0 +1,83 @@
+import unittest
+
+from louie.saferef import safe_ref
+
+
+class _Sample1(object):
+ def x(self):
+ pass
+
+
+def _sample2(obj):
+ pass
+
+
+class _Sample3(object):
+ def __call__(self, obj):
+ pass
+
+
+class TestSaferef(unittest.TestCase):
+
+ # XXX: The original tests had a test for closure, and it had an
+ # off-by-one problem, perhaps due to scope issues. It has been
+ # removed from this test suite.
+
+ def setUp(self):
+ ts = []
+ ss = []
+ for x in xrange(5000):
+ t = _Sample1()
+ ts.append(t)
+ s = safe_ref(t.x, self._closure)
+ ss.append(s)
+ ts.append(_sample2)
+ ss.append(safe_ref(_sample2, self._closure))
+ for x in xrange(30):
+ t = _Sample3()
+ ts.append(t)
+ s = safe_ref(t, self._closure)
+ ss.append(s)
+ self.ts = ts
+ self.ss = ss
+ self.closure_count = 0
+
+ def tearDown(self):
+ if hasattr(self, 'ts'):
+ del self.ts
+ if hasattr(self, 'ss'):
+ del self.ss
+
+ def test_In(self):
+ """Test the `in` operator for safe references (cmp)"""
+ for t in self.ts[:50]:
+ assert safe_ref(t.x) in self.ss
+
+ def test_Valid(self):
+ """Test that the references are valid (return instance methods)"""
+ for s in self.ss:
+ assert s()
+
+ def test_ShortCircuit(self):
+ """Test that creation short-circuits to reuse existing references"""
+ sd = {}
+ for s in self.ss:
+ sd[s] = 1
+ for t in self.ts:
+ if hasattr(t, 'x'):
+ assert sd.has_key(safe_ref(t.x))
+ else:
+ assert sd.has_key(safe_ref(t))
+
+ def test_Representation(self):
+ """Test that the reference object's representation works
+
+ XXX Doesn't currently check the results, just that no error
+ is raised
+ """
+ repr(self.ss[-1])
+
+ def _closure(self, ref):
+ """Dumb utility mechanism to increment deletion counter"""
+ self.closure_count += 1
+
diff --git a/wlauto/external/louie/version.py b/wlauto/external/louie/version.py
new file mode 100644
index 00000000..e3b0f6a7
--- /dev/null
+++ b/wlauto/external/louie/version.py
@@ -0,0 +1,8 @@
+"""Louie version information."""
+
+
+NAME = 'Louie'
+DESCRIPTION = 'Signal dispatching mechanism'
+VERSION = '1.1'
+
+
diff --git a/wlauto/external/pmu_logger/Makefile b/wlauto/external/pmu_logger/Makefile
new file mode 100755
index 00000000..ca7b2674
--- /dev/null
+++ b/wlauto/external/pmu_logger/Makefile
@@ -0,0 +1,7 @@
+# To build the pmu_logger module use the following command line
+# make ARCH=arm CROSS_COMPILE=arm-linux-gnueabi- -C ../kernel/out SUBDIRS=$PWD modules
+# where
+# CROSS_COMPILE - prefix of the arm linux compiler
+# -C - location of the configured kernel source tree
+
+obj-m := pmu_logger.o \ No newline at end of file
diff --git a/wlauto/external/pmu_logger/README b/wlauto/external/pmu_logger/README
new file mode 100755
index 00000000..9f3952a2
--- /dev/null
+++ b/wlauto/external/pmu_logger/README
@@ -0,0 +1,35 @@
+The pmu_logger module provides the ability to periodically trace CCI PMU counters. The trace destinations can be ftrace buffer and/or kernel logs. This file gives a quick overview of the funcationality provided by the module and how to use it.
+
+The pmu_logger module creates a directory in the debugfs filesystem called cci_pmu_logger which can be used to enable/disable the counters and control the events that are counted.
+
+To configure the events being counted write the corresponding event id to the counter* files. The list of CCI PMU events can be found at http://arminfo.emea.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0470d/CJHICFBF.html.
+
+The "period_jiffies" can be used to control the periodicity of tracing. It accepts values in kernel jiffies.
+
+To enable tracing, write a 1 to "control". To disable write another 1 to "control". The files "enable_console" and "enable_ftrace" control where the trace is written to. To check if the counters are currently running or not, you can read the control file.
+
+The current values of the counters can be read from the "values" file.
+
+Eg. To trace, A15 and A7 snoop hit rate every 10 jiffies the following command are required -
+
+
+trace-cmd reset
+
+echo 0x63 > counter0
+echo 0x6A > counter1
+echo 0x83 > counter2
+echo 0x8A > counter3
+
+echo 10 > period_jiffies
+
+trace-cmd start -b 20000 -e "sched:sched_wakeup"
+
+echo 1 > control
+
+# perform the activity for which you would like to collect the CCI PMU trace.
+
+trace-cmd stop && trace-cmd extract
+
+echo 1 > control
+
+trace-cmd report trace.dat | grep print # shows the trace of the CCI PMU counters along with the cycle counter values. \ No newline at end of file
diff --git a/wlauto/external/pmu_logger/pmu_logger.c b/wlauto/external/pmu_logger/pmu_logger.c
new file mode 100755
index 00000000..47497a10
--- /dev/null
+++ b/wlauto/external/pmu_logger/pmu_logger.c
@@ -0,0 +1,294 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+/*
+ * pmu_logger.c - Kernel module to log the CCI PMU counters
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/timer.h>
+#include <asm/io.h>
+
+#define MODULE_NAME "cci_pmu_logger"
+
+// CCI_BASE needs to be modified to point to the mapped location of CCI in
+// memory on your device.
+#define CCI_BASE 0x2C090000 // TC2
+//#define CCI_BASE 0x10D20000
+#define CCI_SIZE 0x00010000
+
+#define PMCR 0x100
+
+#define PMCR_CEN (1 << 0)
+#define PMCR_RST (1 << 1)
+#define PMCR_CCR (1 << 2)
+#define PMCR_CCD (1 << 3)
+#define PMCR_EX (1 << 4)
+#define PMCR_DP (1 << 5)
+
+#define CC_BASE 0x9000
+#define PC0_BASE 0xA000
+#define PC1_BASE 0xB000
+#define PC2_BASE 0xC000
+#define PC3_BASE 0xD000
+
+#define PC_ESR 0x0
+#define CNT_VALUE 0x4
+#define CNT_CONTROL 0x8
+
+#define CNT_ENABLE (1 << 0)
+
+u32 counter0_event = 0x6A;
+u32 counter1_event = 0x63;
+u32 counter2_event = 0x8A;
+u32 counter3_event = 0x83;
+
+u32 enable_console = 0;
+u32 enable_ftrace = 1;
+
+void *cci_base = 0;
+
+static struct dentry *module_debugfs_root;
+static int enabled = false;
+
+u32 delay = 10; //jiffies. This translates to 1 sample every 100 ms
+struct timer_list timer;
+
+static void call_after_delay(void)
+{
+ timer.expires = jiffies + delay;
+ add_timer(&timer);
+}
+
+
+static void setup_and_call_after_delay(void (*fn)(unsigned long))
+{
+ init_timer(&timer);
+ timer.data = (unsigned long)&timer;
+ timer.function = fn;
+
+ call_after_delay();
+}
+
+static void print_counter_configuration(void)
+{
+ if (enable_ftrace)
+ trace_printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \
+ counter0_event, counter1_event, counter2_event, counter3_event);
+
+ if (enable_console)
+ printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \
+ counter0_event, counter1_event, counter2_event, counter3_event);
+}
+
+static void initialize_cci_pmu(void)
+{
+ u32 val;
+
+ // Select the events counted
+ iowrite32(counter0_event, cci_base + PC0_BASE + PC_ESR);
+ iowrite32(counter1_event, cci_base + PC1_BASE + PC_ESR);
+ iowrite32(counter2_event, cci_base + PC2_BASE + PC_ESR);
+ iowrite32(counter3_event, cci_base + PC3_BASE + PC_ESR);
+
+ // Enable the individual PMU counters
+ iowrite32(CNT_ENABLE, cci_base + PC0_BASE + CNT_CONTROL);
+ iowrite32(CNT_ENABLE, cci_base + PC1_BASE + CNT_CONTROL);
+ iowrite32(CNT_ENABLE, cci_base + PC2_BASE + CNT_CONTROL);
+ iowrite32(CNT_ENABLE, cci_base + PC3_BASE + CNT_CONTROL);
+ iowrite32(CNT_ENABLE, cci_base + CC_BASE + CNT_CONTROL);
+
+ // Reset the counters and configure the Cycle Count Divider
+ val = ioread32(cci_base + PMCR);
+ iowrite32(val | PMCR_RST | PMCR_CCR | PMCR_CCD, cci_base + PMCR);
+}
+
+static void enable_cci_pmu_counters(void)
+{
+ u32 val = ioread32(cci_base + PMCR);
+ iowrite32(val | PMCR_CEN, cci_base + PMCR);
+}
+
+static void disable_cci_pmu_counters(void)
+{
+ u32 val = ioread32(cci_base + PMCR);
+ iowrite32(val & ~PMCR_CEN, cci_base + PMCR);
+}
+
+static void trace_values(unsigned long arg)
+{
+ u32 cycles;
+ u32 counter[4];
+
+ cycles = ioread32(cci_base + CC_BASE + CNT_VALUE);
+ counter[0] = ioread32(cci_base + PC0_BASE + CNT_VALUE);
+ counter[1] = ioread32(cci_base + PC1_BASE + CNT_VALUE);
+ counter[2] = ioread32(cci_base + PC2_BASE + CNT_VALUE);
+ counter[3] = ioread32(cci_base + PC3_BASE + CNT_VALUE);
+
+ if (enable_ftrace)
+ trace_printk("Cycles: %08x Counter_0: %08x"
+ " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
+ cycles, counter[0], counter[1], counter[2], counter[3]);
+
+ if (enable_console)
+ printk("Cycles: %08x Counter_0: %08x"
+ " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
+ cycles, counter[0], counter[1], counter[2], counter[3]);
+
+ if (enabled) {
+ u32 val;
+ // Reset the counters
+ val = ioread32(cci_base + PMCR);
+ iowrite32(val | PMCR_RST | PMCR_CCR, cci_base + PMCR);
+
+ call_after_delay();
+ }
+}
+
+static ssize_t read_control(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ char status[16];
+ /* printk(KERN_DEBUG "%s\n", __func__); */
+
+ if (enabled)
+ snprintf(status, 16, "enabled\n");
+ else
+ snprintf(status, 16, "disabled\n");
+
+ return simple_read_from_buffer(buf, count, ppos, status, strlen(status));
+}
+
+static ssize_t write_control(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ if (enabled) {
+ disable_cci_pmu_counters();
+ enabled = false;
+ } else {
+ initialize_cci_pmu();
+ enable_cci_pmu_counters();
+ enabled = true;
+
+ print_counter_configuration();
+ setup_and_call_after_delay(trace_values);
+ }
+
+ return count;
+}
+
+static ssize_t read_values(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ char values[256];
+ /* u32 val; */
+
+ snprintf(values, 256, "Cycles: %08x Counter_0: %08x"
+ " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
+ ioread32(cci_base + CC_BASE + CNT_VALUE), \
+ ioread32(cci_base + PC0_BASE + CNT_VALUE), \
+ ioread32(cci_base + PC1_BASE + CNT_VALUE), \
+ ioread32(cci_base + PC2_BASE + CNT_VALUE), \
+ ioread32(cci_base + PC3_BASE + CNT_VALUE));
+
+ return simple_read_from_buffer(buf, count, ppos, values, strlen(values));
+}
+
+static const struct file_operations control_fops = {
+ .owner = THIS_MODULE,
+ .read = read_control,
+ .write = write_control,
+};
+
+static const struct file_operations value_fops = {
+ .owner = THIS_MODULE,
+ .read = read_values,
+};
+
+static int __init pmu_logger_init(void)
+{
+ struct dentry *retval;
+
+ module_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
+ if (!module_debugfs_root || IS_ERR(module_debugfs_root)) {
+ printk(KERN_ERR "error creating debugfs dir.\n");
+ goto out;
+ }
+
+ retval = debugfs_create_file("control", S_IRUGO | S_IWUGO, module_debugfs_root, NULL, &control_fops);
+ if (!retval)
+ goto out;
+
+ retval = debugfs_create_file("values", S_IRUGO, module_debugfs_root, NULL, &value_fops);
+ if (!retval)
+ goto out;
+
+ retval = debugfs_create_bool("enable_console", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_console);
+ if (!retval)
+ goto out;
+
+ retval = debugfs_create_bool("enable_ftrace", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_ftrace);
+ if (!retval)
+ goto out;
+
+ retval = debugfs_create_u32("period_jiffies", S_IRUGO | S_IWUGO, module_debugfs_root, &delay);
+ if (!retval)
+ goto out;
+
+ retval = debugfs_create_x32("counter0", S_IRUGO | S_IWUGO, module_debugfs_root, &counter0_event);
+ if (!retval)
+ goto out;
+ retval = debugfs_create_x32("counter1", S_IRUGO | S_IWUGO, module_debugfs_root, &counter1_event);
+ if (!retval)
+ goto out;
+ retval = debugfs_create_x32("counter2", S_IRUGO | S_IWUGO, module_debugfs_root, &counter2_event);
+ if (!retval)
+ goto out;
+ retval = debugfs_create_x32("counter3", S_IRUGO | S_IWUGO, module_debugfs_root, &counter3_event);
+ if (!retval)
+ goto out;
+
+ cci_base = ioremap(CCI_BASE, CCI_SIZE);
+ if (!cci_base)
+ goto out;
+
+ printk(KERN_INFO "CCI PMU Logger loaded.\n");
+ return 0;
+
+out:
+ debugfs_remove_recursive(module_debugfs_root);
+ return 1;
+}
+
+static void __exit pmu_logger_exit(void)
+{
+ if (module_debugfs_root) {
+ debugfs_remove_recursive(module_debugfs_root);
+ module_debugfs_root = NULL;
+ }
+ if (cci_base)
+ iounmap(cci_base);
+
+ printk(KERN_INFO "CCI PMU Logger removed.\n");
+}
+
+module_init(pmu_logger_init);
+module_exit(pmu_logger_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Punit Agrawal");
+MODULE_DESCRIPTION("logger for CCI PMU counters");
diff --git a/wlauto/external/pmu_logger/pmu_logger.ko b/wlauto/external/pmu_logger/pmu_logger.ko
new file mode 100644
index 00000000..84164383
--- /dev/null
+++ b/wlauto/external/pmu_logger/pmu_logger.ko
Binary files differ
diff --git a/wlauto/external/readenergy/Makefile b/wlauto/external/readenergy/Makefile
new file mode 100644
index 00000000..76a25594
--- /dev/null
+++ b/wlauto/external/readenergy/Makefile
@@ -0,0 +1,11 @@
+# To build:
+#
+# CROSS_COMPILE=aarch64-linux-gnu- make
+#
+CROSS_COMPILE?=aarch64-linux-gnu-
+CC=$(CROSS_COMPILE)gcc
+CFLAGS='-Wl,-static -Wl,-lc'
+
+readenergy: readenergy.c
+ $(CC) $(CFLAGS) readenergy.c -o readenergy
+ cp readenergy ../../instrumentation/juno_energy/readenergy
diff --git a/wlauto/external/readenergy/readenergy b/wlauto/external/readenergy/readenergy
new file mode 100755
index 00000000..c26991c2
--- /dev/null
+++ b/wlauto/external/readenergy/readenergy
Binary files differ
diff --git a/wlauto/external/readenergy/readenergy.c b/wlauto/external/readenergy/readenergy.c
new file mode 100644
index 00000000..cc945f7f
--- /dev/null
+++ b/wlauto/external/readenergy/readenergy.c
@@ -0,0 +1,345 @@
+/* Copyright 2014-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+/*
+ * readenergy.c
+ *
+ * Reads APB energy registers in Juno and outputs the measurements (converted to appropriate units).
+ *
+*/
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+// The following values obtained from Juno TRM 2014/03/04 section 4.5
+
+// Location of APB registers in memory
+#define APB_BASE_MEMORY 0x1C010000
+// APB energy counters start at offset 0xD0 from the base APB address.
+#define BASE_INDEX 0xD0 / 4
+// the one-past last APB counter
+#define APB_SIZE 0x120
+
+// Masks specifying the bits that contain the actual counter values
+#define CMASK 0xFFF
+#define VMASK 0xFFF
+#define PMASK 0xFFFFFF
+
+// Sclaing factor (divisor) or getting measured values from counters
+#define SYS_ADC_CH0_PM1_SYS_SCALE 761
+#define SYS_ADC_CH1_PM2_A57_SCALE 381
+#define SYS_ADC_CH2_PM3_A53_SCALE 761
+#define SYS_ADC_CH3_PM4_GPU_SCALE 381
+#define SYS_ADC_CH4_VSYS_SCALE 1622
+#define SYS_ADC_CH5_VA57_SCALE 1622
+#define SYS_ADC_CH6_VA53_SCALE 1622
+#define SYS_ADC_CH7_VGPU_SCALE 1622
+#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE)
+#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE)
+#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE)
+#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE)
+#define SYS_ENM_CH0_SYS_SCALE 12348030000
+#define SYS_ENM_CH1_A57_SCALE 6174020000
+#define SYS_ENM_CH0_A53_SCALE 12348030000
+#define SYS_ENM_CH0_GPU_SCALE 6174020000
+
+// Original values prior to re-callibrations.
+/*#define SYS_ADC_CH0_PM1_SYS_SCALE 819.2*/
+/*#define SYS_ADC_CH1_PM2_A57_SCALE 409.6*/
+/*#define SYS_ADC_CH2_PM3_A53_SCALE 819.2*/
+/*#define SYS_ADC_CH3_PM4_GPU_SCALE 409.6*/
+/*#define SYS_ADC_CH4_VSYS_SCALE 1638.4*/
+/*#define SYS_ADC_CH5_VA57_SCALE 1638.4*/
+/*#define SYS_ADC_CH6_VA53_SCALE 1638.4*/
+/*#define SYS_ADC_CH7_VGPU_SCALE 1638.4*/
+/*#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE)*/
+/*#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE)*/
+/*#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE)*/
+/*#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE)*/
+/*#define SYS_ENM_CH0_SYS_SCALE 13421772800.0*/
+/*#define SYS_ENM_CH1_A57_SCALE 6710886400.0*/
+/*#define SYS_ENM_CH0_A53_SCALE 13421772800.0*/
+/*#define SYS_ENM_CH0_GPU_SCALE 6710886400.0*/
+
+// Ignore individual errors but if see too many, abort.
+#define ERROR_THRESHOLD 10
+
+// Default counter poll period (in milliseconds).
+#define DEFAULT_PERIOD 100
+
+// A single reading from the energy meter. The values are the proper readings converted
+// to appropriate units (e.g. Watts for power); they are *not* raw counter values.
+struct reading
+{
+ double sys_adc_ch0_pm1_sys;
+ double sys_adc_ch1_pm2_a57;
+ double sys_adc_ch2_pm3_a53;
+ double sys_adc_ch3_pm4_gpu;
+ double sys_adc_ch4_vsys;
+ double sys_adc_ch5_va57;
+ double sys_adc_ch6_va53;
+ double sys_adc_ch7_vgpu;
+ double sys_pow_ch04_sys;
+ double sys_pow_ch15_a57;
+ double sys_pow_ch26_a53;
+ double sys_pow_ch37_gpu;
+ double sys_enm_ch0_sys;
+ double sys_enm_ch1_a57;
+ double sys_enm_ch0_a53;
+ double sys_enm_ch0_gpu;
+};
+
+inline uint64_t join_64bit_register(uint32_t *buffer, int index)
+{
+ uint64_t result = 0;
+ result |= buffer[index];
+ result |= (uint64_t)(buffer[index+1]) << 32;
+ return result;
+}
+
+int nsleep(const struct timespec *req, struct timespec *rem)
+{
+ struct timespec temp_rem;
+ if (nanosleep(req, rem) == -1)
+ {
+ if (errno == EINTR)
+ {
+ nsleep(rem, &temp_rem);
+ }
+ else
+ {
+ return errno;
+ }
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+void print_help()
+{
+ fprintf(stderr, "Usage: readenergy [-t PERIOD] -o OUTFILE\n\n"
+ "Read Juno energy counters every PERIOD milliseconds, writing them\n"
+ "to OUTFILE in CSV format until SIGTERM is received.\n\n"
+ "Parameters:\n"
+ " PERIOD is the counter poll period in milliseconds.\n"
+ " (Defaults to 100 milliseconds.)\n"
+ " OUTFILE is the output file path\n");
+}
+
+// debugging only...
+inline void dprint(char *msg)
+{
+ fprintf(stderr, "%s\n", msg);
+ sync();
+}
+
+// -------------------------------------- config ----------------------------------------------------
+
+struct config
+{
+ struct timespec period;
+ char *output_file;
+};
+
+void config_init_period_from_millis(struct config *this, long millis)
+{
+ this->period.tv_sec = (time_t)(millis / 1000);
+ this->period.tv_nsec = (millis % 1000) * 1000000;
+}
+
+void config_init(struct config *this, int argc, char *argv[])
+{
+ this->output_file = NULL;
+ config_init_period_from_millis(this, DEFAULT_PERIOD);
+
+ int opt;
+ while ((opt = getopt(argc, argv, "ht:o:")) != -1)
+ {
+ switch(opt)
+ {
+ case 't':
+ config_init_period_from_millis(this, atol(optarg));
+ break;
+ case 'o':
+ this->output_file = optarg;
+ break;
+ case 'h':
+ print_help();
+ exit(EXIT_SUCCESS);
+ break;
+ default:
+ fprintf(stderr, "ERROR: Unexpected option %s\n\n", opt);
+ print_help();
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (this->output_file == NULL)
+ {
+ fprintf(stderr, "ERROR: Mandatory -o option not specified.\n\n");
+ print_help();
+ exit(EXIT_FAILURE);
+ }
+}
+
+// -------------------------------------- /config ---------------------------------------------------
+
+// -------------------------------------- emeter ----------------------------------------------------
+
+struct emeter
+{
+ int fd;
+ FILE *out;
+ void *mmap_base;
+};
+
+void emeter_init(struct emeter *this, char *outfile)
+{
+ this->out = fopen(outfile, "w");
+ if (this->out == NULL)
+ {
+ fprintf(stderr, "ERROR: Could not open output file %s; got %s\n", outfile, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ this->fd = open("/dev/mem", O_RDONLY);
+ if(this->fd < 0)
+ {
+ fprintf(stderr, "ERROR: Can't open /dev/mem; got %s\n", strerror(errno));
+ fclose(this->out);
+ exit(EXIT_FAILURE);
+ }
+
+ this->mmap_base = mmap(NULL, APB_SIZE, PROT_READ, MAP_SHARED, this->fd, APB_BASE_MEMORY);
+ if (this->mmap_base == MAP_FAILED)
+ {
+ fprintf(stderr, "ERROR: mmap failed; got %s\n", strerror(errno));
+ close(this->fd);
+ fclose(this->out);
+ exit(EXIT_FAILURE);
+ }
+
+ fprintf(this->out, "sys_curr,a57_curr,a53_curr,gpu_curr,"
+ "sys_volt,a57_volt,a53_volt,gpu_volt,"
+ "sys_pow,a57_pow,a53_pow,gpu_pow,"
+ "sys_cenr,a57_cenr,a53_cenr,gpu_cenr\n");
+}
+
+void emeter_read_measurements(struct emeter *this, struct reading *reading)
+{
+ uint32_t *buffer = (uint32_t *)this->mmap_base;
+ reading->sys_adc_ch0_pm1_sys = (double)(CMASK & buffer[BASE_INDEX+0]) / SYS_ADC_CH0_PM1_SYS_SCALE;
+ reading->sys_adc_ch1_pm2_a57 = (double)(CMASK & buffer[BASE_INDEX+1]) / SYS_ADC_CH1_PM2_A57_SCALE;
+ reading->sys_adc_ch2_pm3_a53 = (double)(CMASK & buffer[BASE_INDEX+2]) / SYS_ADC_CH2_PM3_A53_SCALE;
+ reading->sys_adc_ch3_pm4_gpu = (double)(CMASK & buffer[BASE_INDEX+3]) / SYS_ADC_CH3_PM4_GPU_SCALE;
+ reading->sys_adc_ch4_vsys = (double)(VMASK & buffer[BASE_INDEX+4]) / SYS_ADC_CH4_VSYS_SCALE;
+ reading->sys_adc_ch5_va57 = (double)(VMASK & buffer[BASE_INDEX+5]) / SYS_ADC_CH5_VA57_SCALE;
+ reading->sys_adc_ch6_va53 = (double)(VMASK & buffer[BASE_INDEX+6]) / SYS_ADC_CH6_VA53_SCALE;
+ reading->sys_adc_ch7_vgpu = (double)(VMASK & buffer[BASE_INDEX+7]) / SYS_ADC_CH7_VGPU_SCALE;
+ reading->sys_pow_ch04_sys = (double)(PMASK & buffer[BASE_INDEX+8]) / SYS_POW_CH04_SYS_SCALE;
+ reading->sys_pow_ch15_a57 = (double)(PMASK & buffer[BASE_INDEX+9]) / SYS_POW_CH15_A57_SCALE;
+ reading->sys_pow_ch26_a53 = (double)(PMASK & buffer[BASE_INDEX+10]) / SYS_POW_CH26_A53_SCALE;
+ reading->sys_pow_ch37_gpu = (double)(PMASK & buffer[BASE_INDEX+11]) / SYS_POW_CH37_GPU_SCALE;
+ reading->sys_enm_ch0_sys = (double)join_64bit_register(buffer, BASE_INDEX+12) / SYS_ENM_CH0_SYS_SCALE;
+ reading->sys_enm_ch1_a57 = (double)join_64bit_register(buffer, BASE_INDEX+14) / SYS_ENM_CH1_A57_SCALE;
+ reading->sys_enm_ch0_a53 = (double)join_64bit_register(buffer, BASE_INDEX+16) / SYS_ENM_CH0_A53_SCALE;
+ reading->sys_enm_ch0_gpu = (double)join_64bit_register(buffer, BASE_INDEX+18) / SYS_ENM_CH0_GPU_SCALE;
+}
+
+void emeter_take_reading(struct emeter *this)
+{
+ static struct reading reading;
+ int error_count = 0;
+ emeter_read_measurements(this, &reading);
+ int ret = fprintf(this->out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
+ reading.sys_adc_ch0_pm1_sys,
+ reading.sys_adc_ch1_pm2_a57,
+ reading.sys_adc_ch2_pm3_a53,
+ reading.sys_adc_ch3_pm4_gpu,
+ reading.sys_adc_ch4_vsys,
+ reading.sys_adc_ch5_va57,
+ reading.sys_adc_ch6_va53,
+ reading.sys_adc_ch7_vgpu,
+ reading.sys_pow_ch04_sys,
+ reading.sys_pow_ch15_a57,
+ reading.sys_pow_ch26_a53,
+ reading.sys_pow_ch37_gpu,
+ reading.sys_enm_ch0_sys,
+ reading.sys_enm_ch1_a57,
+ reading.sys_enm_ch0_a53,
+ reading.sys_enm_ch0_gpu);
+ if (ret < 0)
+ {
+ fprintf(stderr, "ERROR: while writing a meter reading: %s\n", strerror(errno));
+ if (++error_count > ERROR_THRESHOLD)
+ exit(EXIT_FAILURE);
+ }
+}
+
+void emeter_finalize(struct emeter *this)
+{
+ if (munmap(this->mmap_base, APB_SIZE) == -1)
+ {
+ // Report the error but don't bother doing anything else, as we're not gonna do
+ // anything with emeter after this point anyway.
+ fprintf(stderr, "ERROR: munmap failed; got %s\n", strerror(errno));
+ }
+ close(this->fd);
+ fclose(this->out);
+}
+
+// -------------------------------------- /emeter ----------------------------------------------------
+
+int done = 0;
+
+void term_handler(int signum)
+{
+ done = 1;
+}
+
+int main(int argc, char *argv[])
+{
+ struct sigaction action;
+ memset(&action, 0, sizeof(struct sigaction));
+ action.sa_handler = term_handler;
+ sigaction(SIGTERM, &action, NULL);
+
+ struct config config;
+ struct emeter emeter;
+ config_init(&config, argc, argv);
+ emeter_init(&emeter, config.output_file);
+
+ struct timespec remaining;
+ while (!done)
+ {
+ emeter_take_reading(&emeter);
+ nsleep(&config.period, &remaining);
+ }
+
+ emeter_finalize(&emeter);
+ return EXIT_SUCCESS;
+}
diff --git a/wlauto/external/revent/Makefile b/wlauto/external/revent/Makefile
new file mode 100644
index 00000000..dbbfea75
--- /dev/null
+++ b/wlauto/external/revent/Makefile
@@ -0,0 +1,12 @@
+# CROSS_COMPILE=aarch64-linux-gnu- make
+#
+CC=gcc
+CFLAGS=-static -lc
+
+revent: revent.c
+ $(CROSS_COMPILE)$(CC) $(CFLAGS) revent.c -o revent
+
+clean:
+ rm -rf revent
+
+.PHONY: clean
diff --git a/wlauto/external/revent/revent.c b/wlauto/external/revent/revent.c
new file mode 100644
index 00000000..368e0617
--- /dev/null
+++ b/wlauto/external/revent/revent.c
@@ -0,0 +1,598 @@
+/* Copyright 2012-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <limits.h>
+#include <linux/input.h>
+#include <sys/stat.h>
+
+#ifdef ANDROID
+#include <android/log.h>
+#endif
+
+
+#define die(args...) do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, args); \
+ exit(EXIT_FAILURE); \
+} while(0)
+
+#define dprintf(args...) if (verbose) printf(args)
+
+
+#define INPDEV_MAX_DEVICES 16
+#define INPDEV_MAX_PATH 30
+
+
+#ifndef ANDROID
+int strlcpy(char *dest, char *source, size_t size)
+{
+ strncpy(dest, source, size-1);
+ dest[size-1] = '\0';
+ return size;
+}
+#endif
+
+typedef enum {
+ FALSE=0,
+ TRUE
+} bool_t;
+
+typedef enum {
+ RECORD=0,
+ REPLAY,
+ DUMP,
+ INFO,
+ INVALID
+} revent_mode_t;
+
+typedef struct {
+ revent_mode_t mode;
+ int record_time;
+ int device_number;
+ char *file;
+} revent_args_t;
+
+typedef struct {
+ size_t id_pathc; /* Count of total paths so far. */
+ char id_pathv[INPDEV_MAX_DEVICES][INPDEV_MAX_PATH]; /* List of paths matching pattern. */
+} inpdev_t;
+
+typedef struct {
+ int dev_idx;
+ struct input_event event;
+} replay_event_t;
+
+typedef struct {
+ int num_fds;
+ int num_events;
+ int *fds;
+ replay_event_t *events;
+} replay_buffer_t;
+
+
+bool_t verbose = FALSE;
+
+
+bool_t is_numeric(char *string)
+{
+ int len = strlen(string);
+
+ int i = 0;
+ while(i < len)
+ {
+ if(!isdigit(string[i]))
+ return FALSE;
+ i++;
+ }
+
+ return TRUE;
+}
+
+off_t get_file_size(const char *filename) {
+ struct stat st;
+
+ if (stat(filename, &st) == 0)
+ return st.st_size;
+
+ die("Cannot determine size of %s: %s\n", filename, strerror(errno));
+}
+
+int inpdev_init(inpdev_t **inpdev, int devid)
+{
+ int i;
+ int fd;
+ int num_devices;
+
+ *inpdev = malloc(sizeof(inpdev_t));
+ (*inpdev)->id_pathc = 0;
+
+ if (devid == -1) {
+ // device id was not specified so we want to record from all available input devices.
+ for(i = 0; i < INPDEV_MAX_DEVICES; ++i)
+ {
+ sprintf((*inpdev)->id_pathv[(*inpdev)->id_pathc], "/dev/input/event%d", i);
+ fd = open((*inpdev)->id_pathv[(*inpdev)->id_pathc], O_RDONLY);
+ if(fd > 0)
+ {
+ close(fd);
+ dprintf("opened %s\n", (*inpdev)->id_pathv[(*inpdev)->id_pathc]);
+ (*inpdev)->id_pathc++;
+ }
+ else
+ {
+ dprintf("could not open %s\n", (*inpdev)->id_pathv[(*inpdev)->id_pathc]);
+ }
+ }
+ }
+ else {
+ // device id was specified so record just that device.
+ sprintf((*inpdev)->id_pathv[0], "/dev/input/event%d", devid);
+ fd = open((*inpdev)->id_pathv[0], O_RDONLY);
+ if(fd > 0)
+ {
+ close(fd);
+ dprintf("opened %s\n", (*inpdev)->id_pathv[0]);
+ (*inpdev)->id_pathc++;
+ }
+ else
+ {
+ die("could not open %s\n", (*inpdev)->id_pathv[0]);
+ }
+ }
+
+ return 0;
+}
+
+int inpdev_close(inpdev_t *inpdev)
+{
+ free(inpdev);
+ return 0;
+}
+
+void printDevProperties(const char* aDev)
+{
+ int fd = -1;
+ char name[256]= "Unknown";
+ if ((fd = open(aDev, O_RDONLY)) < 0)
+ die("could not open %s\n", aDev);
+
+ if(ioctl(fd, EVIOCGNAME(sizeof(name)), name) < 0)
+ die("evdev ioctl failed on %s\n", aDev);
+
+ printf("The device on %s says its name is %s\n",
+ aDev, name);
+ close(fd);
+}
+
+void dump(const char *logfile)
+{
+ int fdin = open(logfile, O_RDONLY);
+ if (fdin < 0) die("Could not open eventlog %s\n", logfile);
+
+ int nfds;
+ size_t rb = read(fdin, &nfds, sizeof(nfds));
+ if (rb != sizeof(nfds)) die("problems reading eventlog\n");
+ int *fds = malloc(sizeof(int)*nfds);
+ if (!fds) die("out of memory\n");
+
+ int len;
+ int i;
+ char buf[INPDEV_MAX_PATH];
+
+ inpdev_t *inpdev = malloc(sizeof(inpdev_t));
+ inpdev->id_pathc = 0;
+ for (i=0; i<nfds; i++) {
+ memset(buf, 0, sizeof(buf));
+ rb = read(fdin, &len, sizeof(len));
+ if (rb != sizeof(len)) die("problems reading eventlog\n");
+ rb = read(fdin, &buf[0], len);
+ if (rb != len) die("problems reading eventlog\n");
+ strlcpy(inpdev->id_pathv[inpdev->id_pathc], buf, INPDEV_MAX_PATH);
+ inpdev->id_pathv[inpdev->id_pathc][INPDEV_MAX_PATH-1] = '\0';
+ inpdev->id_pathc++;
+ }
+
+ struct input_event ev;
+ int count = 0;
+ while(1) {
+ int idx;
+ rb = read(fdin, &idx, sizeof(idx));
+ if (rb != sizeof(idx)) break;
+ rb = read(fdin, &ev, sizeof(ev));
+ if (rb < (int)sizeof(ev)) break;
+
+ printf("%10u.%-6u %30s type %2d code %3d value %4d\n",
+ (unsigned int)ev.time.tv_sec, (unsigned int)ev.time.tv_usec,
+ inpdev->id_pathv[idx], ev.type, ev.code, ev.value);
+ count++;
+ }
+
+ printf("\nTotal: %d events\n", count);
+ close(fdin);
+ free(inpdev);
+}
+
+int replay_buffer_init(replay_buffer_t **buffer, const char *logfile)
+{
+ *buffer = malloc(sizeof(replay_buffer_t));
+ replay_buffer_t *buff = *buffer;
+ off_t fsize = get_file_size(logfile);
+ buff->events = (replay_event_t *)malloc((size_t)fsize);
+ if (!buff->events)
+ die("out of memory\n");
+
+ int fdin = open(logfile, O_RDONLY);
+ if (fdin < 0)
+ die("Could not open eventlog %s\n", logfile);
+
+ size_t rb = read(fdin, &(buff->num_fds), sizeof(buff->num_fds));
+ if (rb!=sizeof(buff->num_fds))
+ die("problems reading eventlog\n");
+
+ buff->fds = malloc(sizeof(int) * buff->num_fds);
+ if (!buff->fds)
+ die("out of memory\n");
+
+ int len, i;
+ char path_buff[256]; // should be more than enough
+ for (i = 0; i < buff->num_fds; i++) {
+ memset(path_buff, 0, sizeof(path_buff));
+ rb = read(fdin, &len, sizeof(len));
+ if (rb!=sizeof(len))
+ die("problems reading eventlog\n");
+
+ rb = read(fdin, &path_buff[0], len);
+ if (rb != len)
+ die("problems reading eventlog\n");
+
+ buff->fds[i] = open(path_buff, O_WRONLY | O_NDELAY);
+ if (buff->fds[i] < 0)
+ die("could not open device file %s\n", path_buff);
+ }
+
+ struct timeval start_time;
+ replay_event_t rep_ev;
+ buff->num_events = 0;
+ while(1) {
+ int idx;
+ rb = read(fdin, &rep_ev, sizeof(rep_ev));
+ if (rb < (int)sizeof(rep_ev))
+ break;
+
+ if (buff->num_events == 0) {
+ start_time = rep_ev.event.time;
+ }
+ timersub(&(rep_ev.event.time), &start_time, &(rep_ev.event.time));
+ memcpy(&(buff->events[buff->num_events]), &rep_ev, sizeof(rep_ev));
+ buff->num_events++;
+ }
+ close(fdin);
+ return 0;
+}
+
+int replay_buffer_close(replay_buffer_t *buff)
+{
+ free(buff->fds);
+ free(buff->events);
+ free(buff);
+ return 0;
+}
+
+int replay_buffer_play(replay_buffer_t *buff)
+{
+ int i = 0, rb;
+ struct timeval start_time, now, desired_time, last_event_delta, delta;
+ memset(&last_event_delta, 0, sizeof(struct timeval));
+ gettimeofday(&start_time, NULL);
+
+ while (i < buff->num_events) {
+ gettimeofday(&now, NULL);
+ timeradd(&start_time, &last_event_delta, &desired_time);
+
+ if (timercmp(&desired_time, &now, >)) {
+ timersub(&desired_time, &now, &delta);
+ useconds_t d = (useconds_t)delta.tv_sec * 1000000 + delta.tv_usec;
+ dprintf("now %u.%u desiredtime %u.%u sleeping %u uS\n",
+ (unsigned int)now.tv_sec, (unsigned int)now.tv_usec,
+ (unsigned int)desired_time.tv_sec, (unsigned int)desired_time.tv_usec, d);
+ usleep(d);
+ }
+
+ int idx = (buff->events[i]).dev_idx;
+ struct input_event ev = (buff->events[i]).event;
+ while((i < buff->num_events) && !timercmp(&ev.time, &last_event_delta, !=)) {
+ rb = write(buff->fds[idx], &ev, sizeof(ev));
+ if (rb!=sizeof(ev))
+ die("problems writing\n");
+ dprintf("replayed event: type %d code %d value %d\n", ev.type, ev.code, ev.value);
+
+ i++;
+ idx = (buff->events[i]).dev_idx;
+ ev = (buff->events[i]).event;
+ }
+ last_event_delta = ev.time;
+ }
+}
+
+void replay(const char *logfile)
+{
+ replay_buffer_t *replay_buffer;
+ replay_buffer_init(&replay_buffer, logfile);
+#ifdef ANDROID
+ __android_log_write(ANDROID_LOG_INFO, "REVENT", "Replay starting");
+#endif
+ replay_buffer_play(replay_buffer);
+#ifdef ANDROID
+ __android_log_write(ANDROID_LOG_INFO, "REVENT", "Replay complete");
+#endif
+ replay_buffer_close(replay_buffer);
+}
+
+void record(inpdev_t *inpdev, int delay, const char *logfile)
+{
+ fd_set readfds;
+ FILE* fdout;
+ struct input_event ev;
+ int i;
+ int maxfd = 0;
+ int keydev=0;
+
+ int* fds = malloc(sizeof(int)*inpdev->id_pathc);
+ if (!fds) die("out of memory\n");
+
+ fdout = fopen(logfile, "wb");
+ if (!fdout) die("Could not open eventlog %s\n", logfile);
+
+ fwrite(&inpdev->id_pathc, sizeof(inpdev->id_pathc), 1, fdout);
+ for (i=0; i<inpdev->id_pathc; i++) {
+ int len = strlen(inpdev->id_pathv[i]);
+ fwrite(&len, sizeof(len), 1, fdout);
+ fwrite(inpdev->id_pathv[i], len, 1, fdout);
+ }
+
+ for (i=0; i < inpdev->id_pathc; i++)
+ {
+ fds[i] = open(inpdev->id_pathv[i], O_RDONLY);
+ if (fds[i]>maxfd) maxfd = fds[i];
+ dprintf("opened %s with %d\n", inpdev->id_pathv[i], fds[i]);
+ if (fds[i]<0) die("could not open \%s\n", inpdev->id_pathv[i]);
+ }
+
+ int count =0;
+ struct timeval tout;
+ while(1)
+ {
+ FD_ZERO(&readfds);
+ FD_SET(STDIN_FILENO, &readfds);
+ for (i=0; i < inpdev->id_pathc; i++)
+ FD_SET(fds[i], &readfds);
+ /* wait for input */
+ tout.tv_sec = delay;
+ tout.tv_usec = 0;
+ int r = select(maxfd+1, &readfds, NULL, NULL, &tout);
+ /* dprintf("got %d (err %d)\n", r, errno); */
+ if (!r) break;
+ if (FD_ISSET(STDIN_FILENO, &readfds)) {
+ // in this case the key down for the return key will be recorded
+ // so we need to up the key up
+ memset(&ev, 0, sizeof(ev));
+ ev.type = EV_KEY;
+ ev.code = KEY_ENTER;
+ ev.value = 0;
+ gettimeofday(&ev.time, NULL);
+ fwrite(&keydev, sizeof(keydev), 1, fdout);
+ fwrite(&ev, sizeof(ev), 1, fdout);
+ memset(&ev, 0, sizeof(ev)); // SYN
+ gettimeofday(&ev.time, NULL);
+ fwrite(&keydev, sizeof(keydev), 1, fdout);
+ fwrite(&ev, sizeof(ev), 1, fdout);
+ dprintf("added fake return exiting...\n");
+ break;
+ }
+
+ for (i=0; i < inpdev->id_pathc; i++)
+ {
+ if (FD_ISSET(fds[i], &readfds))
+ {
+ dprintf("Got event from %s\n", inpdev->id_pathv[i]);
+ memset(&ev, 0, sizeof(ev));
+ size_t rb = read(fds[i], (void*) &ev, sizeof(ev));
+ dprintf("%d event: type %d code %d value %d\n",
+ (unsigned int)rb, ev.type, ev.code, ev.value);
+ if (ev.type == EV_KEY && ev.code == KEY_ENTER && ev.value == 1)
+ keydev = i;
+ fwrite(&i, sizeof(i), 1, fdout);
+ fwrite(&ev, sizeof(ev), 1, fdout);
+ count++;
+ }
+ }
+ }
+
+ for (i=0; i < inpdev->id_pathc; i++)
+ {
+ close(fds[i]);
+ }
+
+ fclose(fdout);
+ free(fds);
+ dprintf("Recorded %d events\n", count);
+}
+
+
+void usage()
+{
+ printf("usage:\n revent [-h] [-v] COMMAND [OPTIONS] \n"
+ "\n"
+ " Options:\n"
+ " -h print this help message and quit.\n"
+ " -v enable verbose output.\n"
+ "\n"
+ " Commands:\n"
+ " record [-t SECONDS] [-d DEVICE] FILE\n"
+ " Record input event. stops after return on STDIN (or, optionally, \n"
+ " a fixed delay)\n"
+ "\n"
+ " FILE file into which events will be recorded.\n"
+ " -t SECONDS time, in seconds, for which to record events.\n"
+ " if not specifed, recording will continue until\n"
+ " return key is pressed.\n"
+ " -d DEVICE the number of the input device form which\n"
+ " events will be recoreded. If not specified, \n"
+ " all available inputs will be used.\n"
+ "\n"
+ " replay FILE\n"
+ " replays previously recorded events from the specified file.\n"
+ "\n"
+ " FILE file into which events will be recorded.\n"
+ "\n"
+ " dump FILE\n"
+ " dumps the contents of the specified event log to STDOUT in\n"
+ " human-readable form.\n"
+ "\n"
+ " FILE event log which will be dumped.\n"
+ "\n"
+ " info\n"
+ " shows info about each event char device\n"
+ "\n"
+ );
+}
+
+void revent_args_init(revent_args_t **rargs, int argc, char** argv)
+{
+ *rargs = malloc(sizeof(revent_args_t));
+ revent_args_t *revent_args = *rargs;
+ revent_args->mode = INVALID;
+ revent_args->record_time = INT_MAX;
+ revent_args->device_number = -1;
+ revent_args->file = NULL;
+
+ int opt;
+ while ((opt = getopt(argc, argv, "ht:d:v")) != -1)
+ {
+ switch (opt) {
+ case 'h':
+ usage();
+ exit(0);
+ break;
+ case 't':
+ if (is_numeric(optarg)) {
+ revent_args->record_time = atoi(optarg);
+ dprintf("timeout: %d\n", revent_args->record_time);
+ } else {
+ die("-t parameter must be numeric; got %s.\n", optarg);
+ }
+ break;
+ case 'd':
+ if (is_numeric(optarg)) {
+ revent_args->device_number = atoi(optarg);
+ dprintf("device: %d\n", revent_args->device_number);
+ } else {
+ die("-d parameter must be numeric; got %s.\n", optarg);
+ }
+ break;
+ case 'v':
+ verbose = TRUE;
+ break;
+ default:
+ die("Unexpected option: %c", opt);
+ }
+ }
+
+ int next_arg = optind;
+ if (next_arg == argc) {
+ usage();
+ die("Must specify a command.\n");
+ }
+ if (!strcmp(argv[next_arg], "record"))
+ revent_args->mode = RECORD;
+ else if (!strcmp(argv[next_arg], "replay"))
+ revent_args->mode = REPLAY;
+ else if (!strcmp(argv[next_arg], "dump"))
+ revent_args->mode = DUMP;
+ else if (!strcmp(argv[next_arg], "info"))
+ revent_args->mode = INFO;
+ else {
+ usage();
+ die("Unknown command -- %s\n", argv[next_arg]);
+ }
+ next_arg++;
+
+ if (next_arg != argc) {
+ revent_args->file = argv[next_arg];
+ dprintf("file: %s\n", revent_args->file);
+ next_arg++;
+ if (next_arg != argc) {
+ die("Trailling arguments (use -h for help).\n");
+ }
+ }
+
+ if ((revent_args->mode != RECORD) && (revent_args->record_time != INT_MAX)) {
+ die("-t parameter is only valid for \"record\" command.\n");
+ }
+ if ((revent_args->mode != RECORD) && (revent_args->device_number != -1)) {
+ die("-d parameter is only valid for \"record\" command.\n");
+ }
+ if ((revent_args->mode == INFO) && (revent_args->file != NULL)) {
+ die("File path cannot be specified for \"info\" command.\n");
+ }
+ if (((revent_args->mode == RECORD) || (revent_args->mode == REPLAY)) && (revent_args->file == NULL)) {
+ die("Must specify a file for recording/replaying (use -h for help).\n");
+ }
+}
+
+int revent_args_close(revent_args_t *rargs)
+{
+ free(rargs);
+ return 0;
+}
+
+int main(int argc, char** argv)
+{
+ int i;
+ char *logfile = NULL;
+
+ revent_args_t *rargs;
+ revent_args_init(&rargs, argc, argv);
+
+ inpdev_t *inpdev;
+ inpdev_init(&inpdev, rargs->device_number);
+
+ switch(rargs->mode) {
+ case RECORD:
+ record(inpdev, rargs->record_time, rargs->file);
+ break;
+ case REPLAY:
+ replay(rargs->file);
+ break;
+ case DUMP:
+ dump(rargs->file);
+ break;
+ case INFO:
+ for (i = 0; i < inpdev->id_pathc; i++) {
+ printDevProperties(inpdev->id_pathv[i]);
+ }
+ };
+
+ inpdev_close(inpdev);
+ revent_args_close(rargs);
+ return 0;
+}
+
diff --git a/wlauto/external/terminalsize.py b/wlauto/external/terminalsize.py
new file mode 100644
index 00000000..32231020
--- /dev/null
+++ b/wlauto/external/terminalsize.py
@@ -0,0 +1,92 @@
+# Taken from
+# https://gist.github.com/jtriley/1108174
+import os
+import shlex
+import struct
+import platform
+import subprocess
+
+
+def get_terminal_size():
+ """ getTerminalSize()
+ - get width and height of console
+ - works on linux,os x,windows,cygwin(windows)
+ originally retrieved from:
+ http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
+ """
+ current_os = platform.system()
+ tuple_xy = None
+ if current_os == 'Windows':
+ tuple_xy = _get_terminal_size_windows()
+ if tuple_xy is None:
+ tuple_xy = _get_terminal_size_tput()
+ # needed for window's python in cygwin's xterm!
+ if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
+ tuple_xy = _get_terminal_size_linux()
+ if tuple_xy is None:
+ print "default"
+ tuple_xy = (80, 25) # default value
+ return tuple_xy
+
+
+def _get_terminal_size_windows():
+ try:
+ from ctypes import windll, create_string_buffer
+ # stdin handle is -10
+ # stdout handle is -11
+ # stderr handle is -12
+ h = windll.kernel32.GetStdHandle(-12)
+ csbi = create_string_buffer(22)
+ res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
+ if res:
+ (bufx, bufy, curx, cury, wattr,
+ left, top, right, bottom,
+ maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
+ sizex = right - left + 1
+ sizey = bottom - top + 1
+ return sizex, sizey
+ except:
+ pass
+
+
+def _get_terminal_size_tput():
+ # get terminal width
+ # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
+ try:
+ cols = int(subprocess.check_call(shlex.split('tput cols')))
+ rows = int(subprocess.check_call(shlex.split('tput lines')))
+ return (cols, rows)
+ except:
+ pass
+
+
+def _get_terminal_size_linux():
+ def ioctl_GWINSZ(fd):
+ try:
+ import fcntl
+ import termios
+ cr = struct.unpack('hh',
+ fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
+ return cr
+ except:
+ pass
+ cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
+ if not cr:
+ try:
+ fd = os.open(os.ctermid(), os.O_RDONLY)
+ cr = ioctl_GWINSZ(fd)
+ os.close(fd)
+ except:
+ pass
+ if not cr:
+ try:
+ cr = (os.environ['LINES'], os.environ['COLUMNS'])
+ except:
+ return None
+ return int(cr[1]), int(cr[0])
+
+
+if __name__ == "__main__":
+ sizex, sizey = get_terminal_size()
+ print 'width =', sizex, 'height =', sizey
+
diff --git a/wlauto/external/uiauto/build.sh b/wlauto/external/uiauto/build.sh
new file mode 100755
index 00000000..96b8b7f2
--- /dev/null
+++ b/wlauto/external/uiauto/build.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+ant build
+
+cp bin/classes/com/arm/wlauto/uiauto/BaseUiAutomation.class ../../common
diff --git a/wlauto/external/uiauto/build.xml b/wlauto/external/uiauto/build.xml
new file mode 100644
index 00000000..478a86cc
--- /dev/null
+++ b/wlauto/external/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/external/uiauto/project.properties b/wlauto/external/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/external/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/external/uiauto/src/com/arm/wlauto/uiauto/BaseUiAutomation.java b/wlauto/external/uiauto/src/com/arm/wlauto/uiauto/BaseUiAutomation.java
new file mode 100644
index 00000000..4d26100b
--- /dev/null
+++ b/wlauto/external/uiauto/src/com/arm/wlauto/uiauto/BaseUiAutomation.java
@@ -0,0 +1,113 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto;
+
+import java.io.File;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.util.concurrent.TimeoutException;
+
+import android.app.Activity;
+import android.os.Bundle;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+public class BaseUiAutomation extends UiAutomatorTestCase {
+
+
+ public void sleep(int second) {
+ super.sleep(second * 1000);
+ }
+
+ public boolean takeScreenshot(String name) {
+ Bundle params = getParams();
+ String png_dir = params.getString("workdir");
+
+ try {
+ return getUiDevice().takeScreenshot(new File(png_dir, name + ".png"));
+ } catch(NoSuchMethodError e) {
+ return true;
+ }
+ }
+
+ public void waitText(String text) throws UiObjectNotFoundException {
+ waitText(text, 600);
+ }
+
+ public void waitText(String text, int second) throws UiObjectNotFoundException {
+ UiSelector selector = new UiSelector();
+ UiObject text_obj = new UiObject(selector.text(text)
+ .className("android.widget.TextView"));
+ waitObject(text_obj, second);
+ }
+
+ public void waitObject(UiObject obj) throws UiObjectNotFoundException {
+ waitObject(obj, 600);
+ }
+
+ public void waitObject(UiObject obj, int second) throws UiObjectNotFoundException {
+ if (! obj.waitForExists(second * 1000)){
+ throw new UiObjectNotFoundException("UiObject is not found: "
+ + obj.getSelector().toString());
+ }
+ }
+
+ public boolean waitUntilNoObject(UiObject obj, int second) {
+ return obj.waitUntilGone(second * 1000);
+ }
+
+ public void clearLogcat() throws Exception {
+ Runtime.getRuntime().exec("logcat -c");
+ }
+
+ public void waitForLogcatText(String searchText, long timeout) throws Exception {
+ long startTime = System.currentTimeMillis();
+ Process process = Runtime.getRuntime().exec("logcat");
+ BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
+ String line;
+
+ long currentTime = System.currentTimeMillis();
+ boolean found = false;
+ while ((currentTime - startTime) < timeout){
+ sleep(2); // poll every two seconds
+
+ while((line=reader.readLine())!=null) {
+ if (line.contains(searchText)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ break;
+ }
+ currentTime = System.currentTimeMillis();
+ }
+
+ process.destroy();
+
+ if ((currentTime - startTime) >= timeout) {
+ throw new TimeoutException("Timed out waiting for Logcat text \"%s\"".format(searchText));
+ }
+ }
+}
+
diff --git a/wlauto/instrumentation/__init__.py b/wlauto/instrumentation/__init__.py
new file mode 100644
index 00000000..094b8fa6
--- /dev/null
+++ b/wlauto/instrumentation/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from wlauto.core import instrumentation
+
+
+def instrument_is_installed(instrument):
+ """Returns ``True`` if the specified instrument is installed, and ``False``
+ other wise. The insturment maybe specified either as a name or a subclass (or
+ instance of subclass) of :class:`wlauto.core.Instrument`."""
+ return instrumentation.is_installed(instrument)
+
+
+def clear_instrumentation():
+ instrumentation.installed = []
diff --git a/wlauto/instrumentation/coreutil/__init__.py b/wlauto/instrumentation/coreutil/__init__.py
new file mode 100644
index 00000000..e63f8c3e
--- /dev/null
+++ b/wlauto/instrumentation/coreutil/__init__.py
@@ -0,0 +1,278 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import sys
+import re
+import time
+import shutil
+import logging
+import threading
+import subprocess
+import tempfile
+import csv
+
+from wlauto import Instrument, Parameter
+from wlauto.core.execution import ExecutionContext
+from wlauto.exceptions import InstrumentError, WorkerThreadError
+from wlauto.core import signal
+
+
+class CoreUtilization(Instrument):
+
+ name = 'coreutil'
+ description = """
+ Measures CPU core activity during workload execution in terms of the percentage of time a number
+ of cores were utilized above the specfied threshold.
+
+ This workload generates ``coreutil.csv`` report in the workload's output directory. The report is
+ formatted as follows::
+
+ <threshold,1core,2core,3core,4core
+ 18.098132,38.650248000000005,10.736180000000001,3.6809760000000002,28.834312000000001
+
+ Interpretation of the result:
+
+ - 38.65% of total time only single core is running above or equal to threshold value
+ - 10.736% of total time two cores are running simultaneously above or equal to threshold value
+ - 3.6809% of total time three cores are running simultaneously above or equal to threshold value
+ - 28.8314% of total time four cores are running simultaneously above or equal to threshold value
+ - 18.098% of time all core are running below threshold value.
+
+ ..note : This instrument doesn't work on ARM big.LITTLE IKS implementation
+
+ """
+
+ parameters = [
+ Parameter('threshold', kind=int, default=50,
+ constraint=lambda x: 0 < x <= 100,
+ description='Cores with percentage utilization above this value will be considered '
+ 'as "utilized". This value may need to be adjusted based on the background '
+ 'activity and the intensity of the workload being instrumented (e.g. it may '
+ 'need to be lowered for low-intensity workloads such as video playback).'
+ )
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(CoreUtilization, self).__init__(device, **kwargs)
+ self.collector = None
+ self.output_dir = None
+ self.cores = None
+ self.output_artifact_registered = False
+
+ def setup(self, context):
+ ''' Calls ProcCollect class '''
+ self.output_dir = context.output_directory
+ self.collector = ProcCollect(self.device, self.logger, self.output_dir)
+ self.cores = self.device.number_of_cores
+
+ def start(self, context): # pylint: disable=W0613
+ ''' Starts collecting data once the workload starts '''
+ self.logger.debug('Starting to collect /proc/stat data')
+ self.collector.start()
+
+ def stop(self, context): # pylint: disable=W0613
+ ''' Stops collecting data once the workload stops '''
+ self.logger.debug('Stopping /proc/stat data collection')
+ self.collector.stop()
+
+ def update_result(self, context):
+ ''' updates result into coreutil.csv '''
+ self.collector.join() # wait for "proc.txt" to generate.
+ context.add_artifact('proctxt', 'proc.txt', 'raw')
+ calc = Calculator(self.cores, self.threshold, context) # pylint: disable=E1101
+ calc.calculate()
+ if not self.output_artifact_registered:
+ context.add_run_artifact('cpuutil', 'coreutil.csv', 'data')
+ self.output_artifact_registered = True
+
+
+class ProcCollect(threading.Thread):
+ ''' Dumps data into proc.txt '''
+
+ def __init__(self, device, logger, out_dir):
+ super(ProcCollect, self).__init__()
+ self.device = device
+ self.logger = logger
+ self.dire = out_dir
+ self.stop_signal = threading.Event()
+ self.command = 'cat /proc/stat'
+ self.exc = None
+
+ def run(self):
+ try:
+ self.stop_signal.clear()
+ _, temp_file = tempfile.mkstemp()
+ self.logger.debug('temp file : {}'.format(temp_file))
+ with open(temp_file, 'wb') as tempfp:
+ while not self.stop_signal.is_set():
+ tempfp.write(self.device.execute(self.command))
+ tempfp.write('\n')
+ time.sleep(0.5)
+ raw_file = os.path.join(self.dire, 'proc.txt')
+ shutil.copy(temp_file, raw_file)
+ os.unlink(temp_file)
+ except Exception, error: # pylint: disable=W0703
+ self.logger.warning('Exception on collector thread : {}({})'.format(error.__class__.__name__, error))
+ self.exc = WorkerThreadError(self.name, sys.exc_info())
+
+ def stop(self):
+ '''Executed once the workload stops'''
+ self.stop_signal.set()
+ if self.exc is not None:
+ raise self.exc # pylint: disable=E0702
+
+
+class Calculator(object):
+ """
+ Read /proc/stat and dump data into ``proc.txt`` which is parsed to generate ``coreutil.csv``
+ Sample output from 'proc.txt' ::
+
+ ----------------------------------------------------------------------
+ cpu 9853753 51448 3248855 12403398 4241 111 14996 0 0 0
+ cpu0 1585220 7756 1103883 4977224 552 97 10505 0 0 0
+ cpu1 2141168 7243 564347 972273 504 4 1442 0 0 0
+ cpu2 1940681 7994 651946 1005534 657 3 1424 0 0 0
+ cpu3 1918013 8833 667782 1012249 643 3 1326 0 0 0
+ cpu4 165429 5363 50289 1118910 474 0 148 0 0 0
+ cpu5 1661299 4910 126654 1104018 480 0 53 0 0 0
+ cpu6 333642 4657 48296 1102531 482 2 55 0 0 0
+ cpu7 108299 4691 35656 1110658 448 0 41 0 0 0
+ ----------------------------------------------------------------------
+ Description:
+
+ 1st column : cpu_id( cpu0, cpu1, cpu2,......)
+ Next all column represents the amount of time, measured in units of USER_HZ
+ 2nd column : Time spent in user mode
+ 3rd column : Time spent in user mode with low priority
+ 4th column : Time spent in system mode
+ 5th column : Time spent in idle task
+ 6th column : Time waiting for i/o to compelete
+ 7th column : Time servicing interrupts
+ 8th column : Time servicing softirqs
+ 9th column : Stolen time is the time spent in other operating systems
+ 10th column : Time spent running a virtual CPU
+ 11th column : Time spent running a niced guest
+
+ ----------------------------------------------------------------------------
+
+ Procedure to calculate instantaneous CPU utilization:
+
+ 1) Subtract two consecutive samples for every column( except 1st )
+ 2) Sum all the values except "Time spent in idle task"
+ 3) CPU utilization(%) = ( value obtained in 2 )/sum of all the values)*100
+
+ """
+
+ idle_time_index = 3
+
+ def __init__(self, cores, threshold, context):
+ self.cores = cores
+ self.threshold = threshold
+ self.context = context
+ self.cpu_util = None # Store CPU utilization for each core
+ self.active = None # Store active time(total time - idle)
+ self.total = None # Store the total amount of time (in USER_HZ)
+ self.output = None
+ self.cpuid_regex = re.compile(r'cpu(\d+)')
+ self.outfile = os.path.join(context.run_output_directory, 'coreutil.csv')
+ self.infile = os.path.join(context.output_directory, 'proc.txt')
+
+ def calculate(self):
+ self.calculate_total_active()
+ self.calculate_core_utilization()
+ self.generate_csv(self.context)
+
+ def calculate_total_active(self):
+ """ Read proc.txt file and calculate 'self.active' and 'self.total' """
+ all_cores = set(xrange(self.cores))
+ self.total = [[] for _ in all_cores]
+ self.active = [[] for _ in all_cores]
+ with open(self.infile, "r") as fh:
+ # parsing logic:
+ # - keep spinning through lines until see the cpu summary line
+ # (taken to indicate start of new record).
+ # - extract values for individual cores after the summary line,
+ # keeping track of seen cores until no more lines match 'cpu\d+'
+ # pattern.
+ # - For every core not seen in this record, pad zeros.
+ # - Loop
+ try:
+ while True:
+ line = fh.next()
+ if not line.startswith('cpu '):
+ continue
+
+ seen_cores = set([])
+ line = fh.next()
+ match = self.cpuid_regex.match(line)
+ while match:
+ cpu_id = int(match.group(1))
+ seen_cores.add(cpu_id)
+ times = map(int, line.split()[1:]) # first column is the cpu_id
+ self.total[cpu_id].append(sum(times))
+ self.active[cpu_id].append(sum(times) - times[self.idle_time_index])
+ line = fh.next()
+ match = self.cpuid_regex.match(line)
+
+ for unseen_core in all_cores - seen_cores:
+ self.total[unseen_core].append(0)
+ self.active[unseen_core].append(0)
+ except StopIteration: # EOF
+ pass
+
+ def calculate_core_utilization(self):
+ """Calculates CPU utilization"""
+ diff_active = [[] for _ in xrange(self.cores)]
+ diff_total = [[] for _ in xrange(self.cores)]
+ self.cpu_util = [[] for _ in xrange(self.cores)]
+ for i in xrange(self.cores):
+ for j in xrange(len(self.active[i]) - 1):
+ temp = self.active[i][j + 1] - self.active[i][j]
+ diff_active[i].append(temp)
+ diff_total[i].append(self.total[i][j + 1] - self.total[i][j])
+ if diff_total[i][j] == 0:
+ self.cpu_util[i].append(0)
+ else:
+ temp = float(diff_active[i][j]) / diff_total[i][j]
+ self.cpu_util[i].append(round((float(temp)) * 100, 2))
+
+ def generate_csv(self, context):
+ """ generates ``coreutil.csv``"""
+ self.output = [0 for _ in xrange(self.cores + 1)]
+ for i in range(len(self.cpu_util[0])):
+ count = 0
+ for j in xrange(len(self.cpu_util)):
+ if self.cpu_util[j][i] > round(float(self.threshold), 2):
+ count = count + 1
+ self.output[count] += 1
+ if self.cpu_util[0]:
+ scale_factor = round((float(1) / len(self.cpu_util[0])) * 100, 6)
+ else:
+ scale_factor = 0
+ for i in xrange(len(self.output)):
+ self.output[i] = self.output[i] * scale_factor
+ with open(self.outfile, 'a+') as tem:
+ writer = csv.writer(tem)
+ reader = csv.reader(tem)
+ if sum(1 for row in reader) == 0:
+ row = ['workload', 'iteration', '<threshold']
+ for i in xrange(1, self.cores + 1):
+ row.append('{}core'.format(i))
+ writer.writerow(row)
+ row = [context.result.workload.name, context.result.iteration]
+ row.extend(self.output)
+ writer.writerow(row)
diff --git a/wlauto/instrumentation/daq/__init__.py b/wlauto/instrumentation/daq/__init__.py
new file mode 100644
index 00000000..a0f5bbd1
--- /dev/null
+++ b/wlauto/instrumentation/daq/__init__.py
@@ -0,0 +1,221 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
+from __future__ import division
+import os
+import sys
+import csv
+from collections import OrderedDict
+
+from wlauto import Instrument, Parameter
+from wlauto.exceptions import ConfigError, InstrumentError
+from wlauto.utils.misc import ensure_directory_exists as _d
+from wlauto.utils.types import list_of_ints, list_of_strs
+
+daqpower_path = os.path.join(os.path.dirname(__file__), '..', '..', 'external', 'daq_server', 'src')
+sys.path.insert(0, daqpower_path)
+try:
+ import daqpower.client as daq # pylint: disable=F0401
+ from daqpower.config import DeviceConfiguration, ServerConfiguration, ConfigurationError # pylint: disable=F0401
+except ImportError, e:
+ daq, DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None, None
+ import_error_mesg = e.message
+sys.path.pop(0)
+
+
+UNITS = {
+ 'power': 'Watts',
+ 'voltage': 'Volts',
+}
+
+
+class Daq(Instrument):
+
+ name = 'daq'
+ description = """
+ DAQ instrument obtains the power consumption of the target device's core
+ measured by National Instruments Data Acquisition(DAQ) device.
+
+ WA communicates with a DAQ device server running on a Windows machine
+ (Please refer to :ref:`daq_setup`) over a network. You must specify the IP
+ address and port the server is listening on in the config file as follows ::
+
+ daq_server_host = '10.1.197.176'
+ daq_server_port = 45677
+
+ These values will be output by the server when you run it on Windows.
+
+ You must also specify the values of resistors (in Ohms) across which the
+ voltages are measured (Please refer to :ref:`daq_setup`). The values should be
+ specified as a list with an entry for each resistor, e.g.::
+
+ daq_resistor_values = [0.005, 0.005]
+
+ In addition to this mandatory configuration, you can also optionally specify the
+ following::
+
+ :daq_labels: Labels to be used for ports. Defaults to ``'PORT_<pnum>'``, where
+ 'pnum' is the number of the port.
+ :daq_device_id: The ID under which the DAQ is registered with the driver.
+ Defaults to ``'Dev1'``.
+ :daq_v_range: Specifies the voltage range for the SOC voltage channel on the DAQ
+ (please refer to :ref:`daq_setup` for details). Defaults to ``2.5``.
+ :daq_dv_range: Specifies the voltage range for the resistor voltage channel on
+ the DAQ (please refer to :ref:`daq_setup` for details).
+ Defaults to ``0.2``.
+ :daq_sampling_rate: DAQ sampling rate. DAQ will take this many samples each
+ second. Please note that this maybe limitted by your DAQ model
+ and then number of ports you're measuring (again, see
+ :ref:`daq_setup`). Defaults to ``10000``.
+ :daq_channel_map: Represents mapping from logical AI channel number to physical
+ connector on the DAQ (varies between DAQ models). The default
+ assumes DAQ 6363 and similar with AI channels on connectors
+ 0-7 and 16-23.
+
+ """
+
+ parameters = [
+ Parameter('server_host', kind=str, default='localhost',
+ description='The host address of the machine that runs the daq Server which the '
+ 'insturment communicates with.'),
+ Parameter('server_port', kind=int, default=56788,
+ description='The port number for daq Server in which daq insturment communicates '
+ 'with.'),
+ Parameter('device_id', kind=str, default='Dev1',
+ description='The ID under which the DAQ is registered with the driver.'),
+ Parameter('v_range', kind=float, default=2.5,
+ description='Specifies the voltage range for the SOC voltage channel on the DAQ '
+ '(please refer to :ref:`daq_setup` for details).'),
+ Parameter('dv_range', kind=float, default=0.2,
+ description='Specifies the voltage range for the resistor voltage channel on '
+ 'the DAQ (please refer to :ref:`daq_setup` for details).'),
+ Parameter('sampling_rate', kind=int, default=10000,
+ description='DAQ sampling rate. DAQ will take this many samples each '
+ 'second. Please note that this maybe limitted by your DAQ model '
+ 'and then number of ports you\'re measuring (again, see '
+ ':ref:`daq_setup`)'),
+ Parameter('resistor_values', kind=list, mandatory=True,
+ description='The values of resistors (in Ohms) across which the voltages are measured on '
+ 'each port.'),
+ Parameter('channel_map', kind=list_of_ints, default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
+ description='Represents mapping from logical AI channel number to physical '
+ 'connector on the DAQ (varies between DAQ models). The default '
+ 'assumes DAQ 6363 and similar with AI channels on connectors '
+ '0-7 and 16-23.'),
+ Parameter('labels', kind=list_of_strs,
+ description='List of port labels. If specified, the lenght of the list must match '
+ 'the length of ``resistor_values``. Defaults to "PORT_<pnum>", where '
+ '"pnum" is the number of the port.')
+ ]
+
+ def initialize(self, context):
+ devices = self._execute_command('list_devices')
+ if not devices:
+ raise InstrumentError('DAQ: server did not report any devices registered with the driver.')
+ self._results = OrderedDict()
+
+ def setup(self, context):
+ self.logger.debug('Initialising session.')
+ self._execute_command('configure', config=self.device_config)
+
+ def slow_start(self, context):
+ self.logger.debug('Starting collecting measurements.')
+ self._execute_command('start')
+
+ def slow_stop(self, context):
+ self.logger.debug('Stopping collecting measurements.')
+ self._execute_command('stop')
+
+ def update_result(self, context): # pylint: disable=R0914
+ self.logger.debug('Downloading data files.')
+ output_directory = _d(os.path.join(context.output_directory, 'daq'))
+ self._execute_command('get_data', output_directory=output_directory)
+ for entry in os.listdir(output_directory):
+ context.add_iteration_artifact('DAQ_{}'.format(os.path.splitext(entry)[0]),
+ path=os.path.join('daq', entry),
+ kind='data',
+ description='DAQ power measurments.')
+ port = os.path.splitext(entry)[0]
+ path = os.path.join(output_directory, entry)
+ key = (context.spec.id, context.workload.name, context.current_iteration)
+ if key not in self._results:
+ self._results[key] = {}
+ with open(path) as fh:
+ reader = csv.reader(fh)
+ metrics = reader.next()
+ data = [map(float, d) for d in zip(*list(reader))]
+ n = len(data[0])
+ means = [s / n for s in map(sum, data)]
+ for metric, value in zip(metrics, means):
+ metric_name = '{}_{}'.format(port, metric)
+ context.result.add_metric(metric_name, round(value, 3), UNITS[metric])
+ self._results[key][metric_name] = round(value, 3)
+
+ def teardown(self, context):
+ self.logger.debug('Terminating session.')
+ self._execute_command('close')
+
+ def validate(self):
+ if not daq:
+ raise ImportError(import_error_mesg)
+ self._results = None
+ if self.labels:
+ if not (len(self.labels) == len(self.resistor_values)): # pylint: disable=superfluous-parens
+ raise ConfigError('Number of DAQ port labels does not match the number of resistor values.')
+ else:
+ self.labels = ['PORT_{}'.format(i) for i, _ in enumerate(self.resistor_values)]
+ self.server_config = ServerConfiguration(host=self.server_host,
+ port=self.server_port)
+ self.device_config = DeviceConfiguration(device_id=self.device_id,
+ v_range=self.v_range,
+ dv_range=self.dv_range,
+ sampling_rate=self.sampling_rate,
+ resistor_values=self.resistor_values,
+ channel_map=self.channel_map,
+ labels=self.labels)
+ try:
+ self.server_config.validate()
+ self.device_config.validate()
+ except ConfigurationError, ex:
+ raise ConfigError('DAQ configuration: ' + ex.message) # Re-raise as a WA error
+
+ def before_overall_results_processing(self, context):
+ if self._results:
+ headers = ['id', 'workload', 'iteration']
+ metrics = sorted(self._results.iteritems().next()[1].keys())
+ headers += metrics
+ rows = [headers]
+ for key, value in self._results.iteritems():
+ rows.append(list(key) + [value[m] for m in metrics])
+
+ outfile = os.path.join(context.output_directory, 'daq_power.csv')
+ with open(outfile, 'wb') as fh:
+ writer = csv.writer(fh)
+ writer.writerows(rows)
+
+ def _execute_command(self, command, **kwargs):
+ # pylint: disable=E1101
+ result = daq.execute_command(self.server_config, command, **kwargs)
+ if result.status == daq.Status.OK:
+ pass # all good
+ elif result.status == daq.Status.OKISH:
+ self.logger.debug(result.message)
+ elif result.status == daq.Status.ERROR:
+ raise InstrumentError('DAQ: {}'.format(result.message))
+ else:
+ raise InstrumentError('DAQ: Unexpected result: {} - {}'.format(result.status, result.message))
+ return result.data
diff --git a/wlauto/instrumentation/delay/__init__.py b/wlauto/instrumentation/delay/__init__.py
new file mode 100644
index 00000000..e942520e
--- /dev/null
+++ b/wlauto/instrumentation/delay/__init__.py
@@ -0,0 +1,181 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+#pylint: disable=W0613,E1101,E0203,W0201
+import time
+
+from wlauto import Instrument, Parameter
+from wlauto.exceptions import ConfigError, InstrumentError
+from wlauto.utils.types import boolean
+
+
+class DelayInstrument(Instrument):
+
+ name = 'delay'
+ description = """
+ This instrument introduces a delay before executing either an iteration
+ or all iterations for a spec.
+
+ The delay may be specified as either a fixed period or a temperature
+ threshold that must be reached.
+
+ Optionally, if an active cooling solution is employed to speed up temperature drop between
+ runs, it may be controlled using this instrument.
+
+ """
+
+ parameters = [
+ Parameter('temperature_file', default='/sys/devices/virtual/thermal/thermal_zone0/temp',
+ global_alias='thermal_temp_file',
+ description="""Full path to the sysfile on the device that contains the device's
+ temperature."""),
+ Parameter('temperature_timeout', kind=int, default=600,
+ global_alias='thermal_timeout',
+ description="""
+ The timeout after which the instrument will stop waiting even if the specified threshold
+ temperature is not reached. If this timeout is hit, then a warning will be logged stating
+ the actual temperature at which the timeout has ended.
+ """),
+ Parameter('temperature_poll_period', kind=int, default=5,
+ global_alias='thermal_sleep_time',
+ description="""How long to sleep (in seconds) between polling current device temperature."""),
+ Parameter('temperature_between_specs', kind=int, default=None,
+ global_alias='thermal_threshold_between_specs',
+ description="""
+ Temperature (in device-specific units) the device must cool down to before
+ the iteration spec will be run.
+
+ .. note:: This cannot be specified at the same time as ``fixed_between_specs``
+
+ """),
+ Parameter('temperature_between_iterations', kind=int, default=None,
+ global_alias='thermal_threshold_between_iterations',
+ description="""
+ Temperature (in device-specific units) the device must cool down to before
+ the next spec will be run.
+
+ .. note:: This cannot be specified at the same time as ``fixed_between_iterations``
+
+ """),
+ Parameter('temperature_before_start', kind=int, default=None,
+ global_alias='thermal_threshold_before_start',
+ description="""
+ Temperature (in device-specific units) the device must cool down to just before
+ the actual workload execution (after setup has been performed).
+
+ .. note:: This cannot be specified at the same time as ``fixed_between_iterations``
+
+ """),
+ Parameter('fixed_between_specs', kind=int, default=None,
+ global_alias='fixed_delay_between_specs',
+ description="""
+ How long to sleep (in seconds) after all iterations for a workload spec have
+ executed.
+
+ .. note:: This cannot be specified at the same time as ``temperature_between_specs``
+
+ """),
+ Parameter('fixed_between_iterations', kind=int, default=None,
+ global_alias='fixed_delay_between_iterations',
+ description="""
+ How long to sleep (in seconds) after each iterations for a workload spec has
+ executed.
+
+ .. note:: This cannot be specified at the same time as ``temperature_between_iterations``
+
+ """),
+ Parameter('active_cooling', kind=boolean, default=False,
+ global_alias='thermal_active_cooling',
+ description="""
+ This instrument supports an active cooling solution while waiting for the device temperature
+ to drop to the threshold. The solution involves an mbed controlling a fan. The mbed is signaled
+ over a serial port. If this solution is present in the setup, this should be set to ``True``.
+ """),
+ ]
+
+ def initialize(self, context):
+ if self.temperature_between_iterations == 0:
+ temp = self.device.get_sysfile_value(self.temperature_file, int)
+ self.logger.debug('Setting temperature threshold between iterations to {}'.format(temp))
+ self.temperature_between_iterations = temp
+ if self.temperature_between_specs == 0:
+ temp = self.device.get_sysfile_value(self.temperature_file, int)
+ self.logger.debug('Setting temperature threshold between workload specs to {}'.format(temp))
+ self.temperature_between_specs = temp
+
+ def slow_on_iteration_start(self, context):
+ if self.active_cooling:
+ self.device.stop_active_cooling()
+ if self.fixed_between_iterations:
+ self.logger.debug('Waiting for a fixed period after iteration...')
+ time.sleep(self.fixed_between_iterations)
+ elif self.temperature_between_iterations:
+ self.logger.debug('Waiting for temperature drop before iteration...')
+ self.wait_for_temperature(self.temperature_between_iterations)
+
+ def slow_on_spec_start(self, context):
+ if self.active_cooling:
+ self.device.stop_active_cooling()
+ if self.fixed_between_specs:
+ self.logger.debug('Waiting for a fixed period after spec execution...')
+ time.sleep(self.fixed_between_specs)
+ elif self.temperature_between_specs:
+ self.logger.debug('Waiting for temperature drop before spec execution...')
+ self.wait_for_temperature(self.temperature_between_specs)
+
+ def very_slow_start(self, context):
+ if self.active_cooling:
+ self.device.stop_active_cooling()
+ if self.temperature_before_start:
+ self.logger.debug('Waiting for temperature drop before commencing execution...')
+ self.wait_for_temperature(self.temperature_before_start)
+
+ def wait_for_temperature(self, temperature):
+ if self.active_cooling:
+ self.device.start_active_cooling()
+ self.do_wait_for_temperature(temperature)
+ self.device.stop_active_cooling()
+ else:
+ self.do_wait_for_temperature(temperature)
+
+ def do_wait_for_temperature(self, temperature):
+ reading = self.device.get_sysfile_value(self.temperature_file, int)
+ waiting_start_time = time.time()
+ while reading > temperature:
+ self.logger.debug('Device temperature: {}'.format(reading))
+ if time.time() - waiting_start_time > self.temperature_timeout:
+ self.logger.warning('Reached timeout; current temperature: {}'.format(reading))
+ break
+ time.sleep(self.temperature_poll_period)
+ reading = self.device.get_sysfile_value(self.temperature_file, int)
+
+ def validate(self):
+ if (self.temperature_between_specs is not None and
+ self.fixed_between_specs is not None):
+ raise ConfigError('Both fixed delay and thermal threshold specified for specs.')
+
+ if (self.temperature_between_iterations is not None and
+ self.fixed_between_iterations is not None):
+ raise ConfigError('Both fixed delay and thermal threshold specified for iterations.')
+
+ if not any([self.temperature_between_specs, self.fixed_between_specs, self.temperature_before_start,
+ self.temperature_between_iterations, self.fixed_between_iterations]):
+ raise ConfigError('delay instrument is enabled, but no delay is specified.')
+
+ if self.active_cooling and not self.device.has('active_cooling'):
+ message = 'Your device does not support active cooling. Did you configure it with an approprite module?'
+ raise InstrumentError(message)
+
diff --git a/wlauto/instrumentation/dmesg/__init__.py b/wlauto/instrumentation/dmesg/__init__.py
new file mode 100644
index 00000000..2603d8a4
--- /dev/null
+++ b/wlauto/instrumentation/dmesg/__init__.py
@@ -0,0 +1,62 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+
+from wlauto import Instrument, Parameter
+from wlauto.utils.misc import ensure_file_directory_exists as _f
+
+
+class DmesgInstrument(Instrument):
+ # pylint: disable=no-member,attribute-defined-outside-init
+ """
+ Collected dmesg output before and during the run.
+
+ """
+
+ name = 'dmesg'
+
+ parameters = [
+ Parameter('loglevel', kind=int, allowed_values=range(8),
+ description='Set loglevel for console output.')
+ ]
+
+ loglevel_file = '/proc/sys/kernel/printk'
+
+ def setup(self, context):
+ if self.loglevel:
+ self.old_loglevel = self.device.get_sysfile_value(self.loglevel_file)
+ self.device.set_sysfile_value(self.loglevel_file, self.loglevel, verify=False)
+ self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before'))
+ self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after'))
+
+ def slow_start(self, context):
+ with open(self.before_file, 'w') as wfh:
+ wfh.write(self.device.execute('dmesg'))
+ context.add_artifact('dmesg_before', self.before_file, kind='data')
+ if self.device.is_rooted:
+ self.device.execute('dmesg -c', as_root=True)
+
+ def slow_stop(self, context):
+ with open(self.after_file, 'w') as wfh:
+ wfh.write(self.device.execute('dmesg'))
+ context.add_artifact('dmesg_after', self.after_file, kind='data')
+
+ def teardown(self, context): # pylint: disable=unused-argument
+ if self.loglevel:
+ self.device.set_sysfile_value(self.loglevel_file, self.old_loglevel, verify=False)
+
+
diff --git a/wlauto/instrumentation/energy_probe/__init__.py b/wlauto/instrumentation/energy_probe/__init__.py
new file mode 100644
index 00000000..2a5466c8
--- /dev/null
+++ b/wlauto/instrumentation/energy_probe/__init__.py
@@ -0,0 +1,145 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
+import os
+import subprocess
+import signal
+import struct
+import csv
+try:
+ import pandas
+except ImportError:
+ pandas = None
+
+from wlauto import Instrument, Parameter, Executable
+from wlauto.exceptions import InstrumentError, ConfigError
+from wlauto.utils.types import list_of_numbers
+
+
+class EnergyProbe(Instrument):
+
+ name = 'energy_probe'
+ description = """Collects power traces using the ARM energy probe.
+
+ This instrument requires ``caiman`` utility to be installed in the workload automation
+ host and be in the PATH. Caiman is part of DS-5 and should be in ``/path/to/DS-5/bin/`` .
+ Energy probe can simultaneously collect energy from up to 3 power rails.
+
+ To connect the energy probe on a rail, connect the white wire to the pin that is closer to the
+ Voltage source and the black wire to the pin that is closer to the load (the SoC or the device
+ you are probing). Between the pins there should be a shunt resistor of known resistance in the
+ range of 5 to 20 mOhm. The resistance of the shunt resistors is a mandatory parameter
+ ``resistor_values``.
+
+ .. note:: This instrument can process results a lot faster if python pandas is installed.
+ """
+
+ parameters = [
+ Parameter('resistor_values', kind=list_of_numbers, default=[],
+ description="""The value of shunt resistors. This is a mandatory parameter."""),
+ Parameter('labels', kind=list, default=[],
+ description="""Meaningful labels for each of the monitored rails."""),
+ ]
+
+ MAX_CHANNELS = 3
+
+ def __init__(self, device, **kwargs):
+ super(EnergyProbe, self).__init__(device, **kwargs)
+ self.attributes_per_sample = 3
+ self.bytes_per_sample = self.attributes_per_sample * 4
+ self.attributes = ['power', 'voltage', 'current']
+ for i, val in enumerate(self.resistor_values):
+ self.resistor_values[i] = int(1000 * float(val))
+
+ def validate(self):
+ if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True):
+ raise InstrumentError('caiman not in PATH. Cannot enable energy probe')
+ if not self.resistor_values:
+ raise ConfigError('At least one resistor value must be specified')
+ if len(self.resistor_values) > self.MAX_CHANNELS:
+ raise ConfigError('{} Channels where specified when Energy Probe supports up to {}'
+ .format(len(self.resistor_values), self.MAX_CHANNELS))
+ if pandas is None:
+ self.logger.warning("pandas package will significantly speed up this instrument")
+ self.logger.warning("to install it try: pip install pandas")
+
+ def setup(self, context):
+ if not self.labels:
+ self.labels = ["PORT_{}".format(channel) for channel, _ in enumerate(self.resistor_values)]
+ self.output_directory = os.path.join(context.output_directory, 'energy_probe')
+ rstring = ""
+ for i, rval in enumerate(self.resistor_values):
+ rstring += '-r {}:{} '.format(i, rval)
+ self.command = 'caiman -l {} {}'.format(rstring, self.output_directory)
+ os.makedirs(self.output_directory)
+
+ def start(self, context):
+ self.logger.debug(self.command)
+ self.caiman = subprocess.Popen(self.command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=os.setpgrp,
+ shell=True)
+
+ def stop(self, context):
+ os.killpg(self.caiman.pid, signal.SIGTERM)
+
+ def update_result(self, context): # pylint: disable=too-many-locals
+ num_of_channels = len(self.resistor_values)
+ processed_data = [[] for _ in xrange(num_of_channels)]
+ filenames = [os.path.join(self.output_directory, '{}.csv'.format(label)) for label in self.labels]
+ struct_format = '{}I'.format(num_of_channels * self.attributes_per_sample)
+ not_a_full_row_seen = False
+ with open(os.path.join(self.output_directory, "0000000000"), "rb") as bfile:
+ while True:
+ data = bfile.read(num_of_channels * self.bytes_per_sample)
+ if data == '':
+ break
+ try:
+ unpacked_data = struct.unpack(struct_format, data)
+ except struct.error:
+ if not_a_full_row_seen:
+ self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
+ continue
+ else:
+ not_a_full_row_seen = True
+ for i in xrange(num_of_channels):
+ index = i * self.attributes_per_sample
+ processed_data[i].append({attr: val for attr, val in
+ zip(self.attributes, unpacked_data[index:index + self.attributes_per_sample])})
+ for i, path in enumerate(filenames):
+ with open(path, 'w') as f:
+ if pandas is not None:
+ self._pandas_produce_csv(processed_data[i], f)
+ else:
+ self._slow_produce_csv(processed_data[i], f)
+
+ # pylint: disable=R0201
+ def _pandas_produce_csv(self, data, f):
+ dframe = pandas.DataFrame(data)
+ dframe = dframe / 1000.0
+ dframe.to_csv(f)
+
+ def _slow_produce_csv(self, data, f):
+ new_data = []
+ for entry in data:
+ new_data.append({key: val / 1000.0 for key, val in entry.items()})
+ writer = csv.DictWriter(f, self.attributes)
+ writer.writeheader()
+ writer.writerows(new_data)
+
diff --git a/wlauto/instrumentation/fps/__init__.py b/wlauto/instrumentation/fps/__init__.py
new file mode 100644
index 00000000..ecdd1bb6
--- /dev/null
+++ b/wlauto/instrumentation/fps/__init__.py
@@ -0,0 +1,298 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101
+from __future__ import division
+import os
+import sys
+import time
+import csv
+import shutil
+import threading
+import errno
+import tempfile
+
+from distutils.version import LooseVersion
+
+
+from wlauto import Instrument, Parameter, IterationResult
+from wlauto.instrumentation import instrument_is_installed
+from wlauto.exceptions import (InstrumentError, WorkerThreadError, ConfigError,
+ DeviceNotRespondingError, TimeoutError)
+from wlauto.utils.types import boolean, numeric
+
+try:
+ import pandas as pd
+except ImportError:
+ pd = None
+
+
+VSYNC_INTERVAL = 16666667
+EPSYLON = 0.0001
+
+
+class FpsInstrument(Instrument):
+
+ name = 'fps'
+ description = """
+ Measures Frames Per Second (FPS) and associated metrics for a workload's main View.
+
+ .. note:: This instrument depends on pandas Python library (which is not part of standard
+ WA dependencies), so you will need to install that first, before you can use it.
+
+ The view is specified by the workload as ``view`` attribute. This defaults
+ to ``'SurfaceView'`` for game workloads, and ``None`` for non-game
+ workloads (as for them FPS mesurement usually doesn't make sense).
+ Individual workloads may override this.
+
+ This instrument adds four metrics to the results:
+
+ :FPS: Frames Per Second. This is the frame rate of the workload.
+ :frames: The total number of frames rendered during the execution of
+ the workload.
+ :janks: The number of "janks" that occured during execution of the
+ workload. Janks are sudden shifts in frame rate. They result
+ in a "stuttery" UI. See http://jankfree.org/jank-busters-io
+ :not_at_vsync: The number of frames that did not render in a single
+ vsync cycle.
+
+ """
+
+ parameters = [
+ Parameter('drop_threshold', kind=numeric, default=5,
+ description='Data points below this FPS will be dropped as they '
+ 'do not constitute "real" gameplay. The assumption '
+ 'being that while actually running, the FPS in the '
+ 'game will not drop below X frames per second, '
+ 'except on loading screens, menus, etc, which '
+ 'should not contribute to FPS calculation. '),
+ Parameter('keep_raw', kind=boolean, default=False,
+ description='If set to True, this will keep the raw dumpsys output '
+ 'in the results directory (this is maily used for debugging) '
+ 'Note: frames.csv with collected frames data will always be '
+ 'generated regardless of this setting.'),
+ Parameter('crash_check', kind=boolean, default=True,
+ description="""
+ Specifies wither the instrument should check for crashed content by examining
+ frame data. If this is set, ``execution_time`` instrument must also be installed.
+ The check is performed by using the measured FPS and exection time to estimate the expected
+ frames cound and comparing that against the measured frames count. The the ratio of
+ measured/expected is too low, then it is assumed that the content has crashed part way
+ during the run. What is "too low" is determined by ``crash_threshold``.
+
+ .. note:: This is not 100\% fool-proof. If the crash occurs sufficiently close to
+ workload's termination, it may not be detected. If this is expected, the
+ threshold may be adjusted up to compensate.
+ """),
+ Parameter('crash_threshold', kind=float, default=0.7,
+ description="""
+ Specifies the threshold used to decided whether a measured/expected frames ration indicates
+ a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a
+ quarter lower than expected, it will treated as a content crash.
+ """),
+ ]
+
+ clear_command = 'dumpsys SurfaceFlinger --latency-clear '
+
+ def __init__(self, device, **kwargs):
+ super(FpsInstrument, self).__init__(device, **kwargs)
+ self.collector = None
+ self.outfile = None
+ self.is_enabled = True
+
+ def validate(self):
+ if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'):
+ message = ('fps instrument requires pandas Python package (version 0.13.1 or higher) to be installed.\n'
+ 'You can install it with pip, e.g. "sudo pip install pandas"')
+ raise InstrumentError(message)
+ if self.crash_check and not instrument_is_installed('execution_time'):
+ raise ConfigError('execution_time instrument must be installed in order to check for content crash.')
+
+ def setup(self, context):
+ workload = context.workload
+ if hasattr(workload, 'view'):
+ self.outfile = os.path.join(context.output_directory, 'frames.csv')
+ self.collector = LatencyCollector(self.outfile, self.device, workload.view or '', self.keep_raw, self.logger)
+ self.device.execute(self.clear_command)
+ else:
+ self.logger.debug('Workload does not contain a view; disabling...')
+ self.is_enabled = False
+
+ def start(self, context):
+ if self.is_enabled:
+ self.logger.debug('Starting SurfaceFlinger collection...')
+ self.collector.start()
+
+ def stop(self, context):
+ if self.is_enabled and self.collector.is_alive():
+ self.logger.debug('Stopping SurfaceFlinger collection...')
+ self.collector.stop()
+
+ def update_result(self, context):
+ if self.is_enabled:
+ data = pd.read_csv(self.outfile)
+ if not data.empty: # pylint: disable=maybe-no-member
+ self._update_stats(context, data)
+ else:
+ context.result.add_metric('FPS', float('nan'))
+ context.result.add_metric('frame_count', 0)
+ context.result.add_metric('janks', 0)
+ context.result.add_metric('not_at_vsync', 0)
+
+ def slow_update_result(self, context):
+ result = context.result
+ if result.has_metric('execution_time'):
+ self.logger.debug('Checking for crashed content.')
+ exec_time = result['execution_time'].value
+ fps = result['FPS'].value
+ frames = result['frame_count'].value
+ if all([exec_time, fps, frames]):
+ expected_frames = fps * exec_time
+ ratio = frames / expected_frames
+ self.logger.debug('actual/expected frames: {:.2}'.format(ratio))
+ if ratio < self.crash_threshold:
+ self.logger.error('Content for {} appears to have crashed.'.format(context.spec.label))
+ result.status = IterationResult.FAILED
+ result.add_event('Content crash detected (actual/expected frames: {:.2}).'.format(ratio))
+
+ def _update_stats(self, context, data):
+ vsync_interval = self.collector.refresh_period
+ actual_present_time_deltas = (data.actual_present_time - data.actual_present_time.shift()).drop(0) # pylint: disable=E1103
+ vsyncs_to_compose = (actual_present_time_deltas / vsync_interval).apply(lambda x: int(round(x, 0)))
+ # drop values lower than drop_threshold FPS as real in-game frame
+ # rate is unlikely to drop below that (except on loading screens
+ # etc, which should not be factored in frame rate calculation).
+ keep_filter = (1.0 / (vsyncs_to_compose * (vsync_interval / 1e9))) > self.drop_threshold
+ filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
+ if not filtered_vsyncs_to_compose.empty:
+ total_vsyncs = filtered_vsyncs_to_compose.sum()
+ if total_vsyncs:
+ frame_count = filtered_vsyncs_to_compose.size
+ fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
+ context.result.add_metric('FPS', fps)
+ context.result.add_metric('frame_count', frame_count)
+ else:
+ context.result.add_metric('FPS', float('nan'))
+ context.result.add_metric('frame_count', 0)
+
+ vtc_deltas = filtered_vsyncs_to_compose - filtered_vsyncs_to_compose.shift()
+ vtc_deltas.index = range(0, vtc_deltas.size)
+ vtc_deltas = vtc_deltas.drop(0).abs()
+ janks = vtc_deltas.apply(lambda x: (x > EPSYLON) and 1 or 0).sum()
+ not_at_vsync = vsyncs_to_compose.apply(lambda x: (abs(x - 1.0) > EPSYLON) and 1 or 0).sum()
+ context.result.add_metric('janks', janks)
+ context.result.add_metric('not_at_vsync', not_at_vsync)
+ else: # no filtered_vsyncs_to_compose
+ context.result.add_metric('FPS', float('nan'))
+ context.result.add_metric('frame_count', 0)
+ context.result.add_metric('janks', 0)
+ context.result.add_metric('not_at_vsync', 0)
+
+
+class LatencyCollector(threading.Thread):
+
+ # Note: the size of the frames buffer for a particular surface is defined
+ # by NUM_FRAME_RECORDS inside android/services/surfaceflinger/FrameTracker.h.
+ # At the time of writing, this was hard-coded to 128. So at 60 fps
+ # (and there is no reason to go above that, as it matches vsync rate
+ # on pretty much all phones), there is just over 2 seconds' worth of
+ # frames in there. Hence the sleep time of 2 seconds between dumps.
+ #command_template = 'while (true); do dumpsys SurfaceFlinger --latency {}; sleep 2; done'
+ command_template = 'dumpsys SurfaceFlinger --latency {}'
+
+ def __init__(self, outfile, device, activity, keep_raw, logger):
+ super(LatencyCollector, self).__init__()
+ self.outfile = outfile
+ self.device = device
+ self.command = self.command_template.format(activity)
+ self.keep_raw = keep_raw
+ self.logger = logger
+ self.stop_signal = threading.Event()
+ self.frames = []
+ self.last_ready_time = 0
+ self.refresh_period = VSYNC_INTERVAL
+ self.drop_threshold = self.refresh_period * 1000
+ self.exc = None
+ self.unresponsive_count = 0
+
+ def run(self):
+ try:
+ self.logger.debug('SurfaceFlinger collection started.')
+ self.stop_signal.clear()
+ fd, temp_file = tempfile.mkstemp()
+ self.logger.debug('temp file: {}'.format(temp_file))
+ wfh = os.fdopen(fd, 'wb')
+ try:
+ while not self.stop_signal.is_set():
+ wfh.write(self.device.execute(self.command))
+ time.sleep(2)
+ finally:
+ wfh.close()
+ # TODO: this can happen after the run during results processing
+ with open(temp_file) as fh:
+ text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
+ for line in text.split('\n'):
+ line = line.strip()
+ if line:
+ self._process_trace_line(line)
+ if self.keep_raw:
+ raw_file = os.path.join(os.path.dirname(self.outfile), 'surfaceflinger.raw')
+ shutil.copy(temp_file, raw_file)
+ os.unlink(temp_file)
+ except (DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
+ raise
+ except Exception, e: # pylint: disable=W0703
+ self.logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
+ self.exc = WorkerThreadError(self.name, sys.exc_info())
+ self.logger.debug('SurfaceFlinger collection stopped.')
+
+ with open(self.outfile, 'w') as wfh:
+ writer = csv.writer(wfh)
+ writer.writerow(['desired_present_time', 'actual_present_time', 'frame_ready_time'])
+ writer.writerows(self.frames)
+ self.logger.debug('Frames data written.')
+
+ def stop(self):
+ self.stop_signal.set()
+ self.join()
+ if self.unresponsive_count:
+ message = 'SurfaceFlinger was unrepsonsive {} times.'.format(self.unresponsive_count)
+ if self.unresponsive_count > 10:
+ self.logger.warning(message)
+ else:
+ self.logger.debug(message)
+ if self.exc:
+ raise self.exc # pylint: disable=E0702
+ self.logger.debug('FSP collection complete.')
+
+ def _process_trace_line(self, line):
+ parts = line.split()
+ if len(parts) == 3:
+ desired_present_time, actual_present_time, frame_ready_time = map(int, parts)
+ if frame_ready_time <= self.last_ready_time:
+ return # duplicate frame
+ if (frame_ready_time - desired_present_time) > self.drop_threshold:
+ self.logger.debug('Dropping bogus frame {}.'.format(line))
+ return # bogus data
+ self.last_ready_time = frame_ready_time
+ self.frames.append((desired_present_time, actual_present_time, frame_ready_time))
+ elif len(parts) == 1:
+ self.refresh_period = int(parts[0])
+ self.drop_threshold = self.refresh_period * 10
+ elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
+ self.unresponsive_count += 1
+ else:
+ self.logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
diff --git a/wlauto/instrumentation/hwmon/__init__.py b/wlauto/instrumentation/hwmon/__init__.py
new file mode 100644
index 00000000..598564f0
--- /dev/null
+++ b/wlauto/instrumentation/hwmon/__init__.py
@@ -0,0 +1,120 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101
+from __future__ import division
+from collections import OrderedDict
+
+from wlauto import Parameter, Instrument
+from wlauto.exceptions import InstrumentError, ConfigError
+from wlauto.utils.hwmon import discover_sensors
+from wlauto.utils.types import list_of_strs
+
+
+# sensor_kind: (report_type, units, conversion)
+HWMON_SENSORS = {
+ 'energy': ('diff', 'Joules', lambda x: x / 10 ** 6),
+ 'temp': ('before/after', 'Celsius', lambda x: x / 10 ** 3),
+}
+
+HWMON_SENSOR_PRIORITIES = ['energy', 'temp']
+
+
+class HwmonInstrument(Instrument):
+
+ name = 'hwmon'
+ description = """
+ Hardware Monitor (hwmon) is a generic Linux kernel subsystem,
+ providing access to hardware monitoring components like temperature or
+ voltage/current sensors.
+
+ The following web page has more information:
+
+ http://blogs.arm.com/software-enablement/925-linux-hwmon-power-management-and-arm-ds-5-streamline/
+
+ You can specify which sensors HwmonInstrument looks for by specifying
+ hwmon_sensors in your config.py, e.g. ::
+
+ hwmon_sensors = ['energy', 'temp']
+
+ If this setting is not specified, it will look for all sensors it knows about.
+ Current valid values are::
+
+ :energy: Collect energy measurements and report energy consumed
+ during run execution (the diff of before and after readings)
+ in Joules.
+ :temp: Collect temperature measurements and report the before and
+ after readings in degrees Celsius.
+
+ """
+
+ parameters = [
+ Parameter('sensors', kind=list_of_strs, default=['energy', 'temp'],
+ description='The kinds of sensors hwmon instrument will look for')
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(HwmonInstrument, self).__init__(device, **kwargs)
+
+ if self.sensors:
+ self.sensor_kinds = {}
+ for kind in self.sensors:
+ if kind in HWMON_SENSORS:
+ self.sensor_kinds[kind] = HWMON_SENSORS[kind]
+ else:
+ message = 'Unexpected sensor type: {}; must be in {}'.format(kind, HWMON_SENSORS.keys())
+ raise ConfigError(message)
+ else:
+ self.sensor_kinds = HWMON_SENSORS
+
+ self.sensors = []
+
+ def setup(self, context):
+ self.sensors = []
+ self.logger.debug('Searching for HWMON sensors.')
+ discovered_sensors = discover_sensors(self.device, self.sensor_kinds.keys())
+ for sensor in sorted(discovered_sensors, key=lambda s: HWMON_SENSOR_PRIORITIES.index(s.kind)):
+ self.logger.debug('Adding {}'.format(sensor.filepath))
+ self.sensors.append(sensor)
+ for sensor in self.sensors:
+ sensor.clear_readings()
+
+ def fast_start(self, context):
+ for sensor in reversed(self.sensors):
+ sensor.take_reading()
+
+ def fast_stop(self, context):
+ for sensor in self.sensors:
+ sensor.take_reading()
+
+ def update_result(self, context):
+ for sensor in self.sensors:
+ try:
+ report_type, units, conversion = HWMON_SENSORS[sensor.kind]
+ if report_type == 'diff':
+ before, after = sensor.readings
+ diff = conversion(after - before)
+ context.result.add_metric(sensor.label, diff, units)
+ elif report_type == 'before/after':
+ before, after = sensor.readings
+ context.result.add_metric(sensor.label + ' before', conversion(before), units)
+ context.result.add_metric(sensor.label + ' after', conversion(after), units)
+ else:
+ raise InstrumentError('Unexpected report_type: {}'.format(report_type))
+ except ValueError, e:
+ self.logger.error('Could not collect all {} readings for {}'.format(sensor.kind, sensor.label))
+ self.logger.error('Got: {}'.format(e))
+
diff --git a/wlauto/instrumentation/juno_energy/__init__.py b/wlauto/instrumentation/juno_energy/__init__.py
new file mode 100644
index 00000000..4c1a4a4c
--- /dev/null
+++ b/wlauto/instrumentation/juno_energy/__init__.py
@@ -0,0 +1,77 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,W0201
+import os
+import csv
+import time
+import threading
+import logging
+from operator import itemgetter
+
+from wlauto import Instrument, File, Parameter
+from wlauto.exceptions import InstrumentError
+
+
+class JunoEnergy(Instrument):
+
+ name = 'juno_energy'
+ description = """
+ Collects internal energy meter measurements from Juno development board.
+
+ This instrument was created because (at the time of creation) Juno's energy
+ meter measurements aren't exposed through HWMON or similar standardized mechanism,
+ necessitating a dedicated instrument to access them.
+
+ This instrument, and the ``readenergy`` executable it relies on are very much tied
+ to the Juno platform and are not expected to work on other boards.
+
+ """
+
+ parameters = [
+ Parameter('period', kind=float, default=0.1,
+ description='Specifies the time, in Seconds, between polling energy counters.'),
+ ]
+
+ def on_run_init(self, context):
+ local_file = context.resolver.get(File(self, 'readenergy'))
+ self.device.killall('readenergy', as_root=True)
+ self.readenergy = self.device.install(local_file)
+
+ def setup(self, context):
+ self.host_output_file = os.path.join(context.output_directory, 'energy.csv')
+ self.device_output_file = self.device.path.join(self.device.working_directory, 'energy.csv')
+ self.command = '{} -o {}'.format(self.readenergy, self.device_output_file)
+ self.device.killall('readenergy', as_root=True)
+
+ def start(self, context):
+ self.device.kick_off(self.command)
+
+ def stop(self, context):
+ self.device.killall('readenergy', signal='TERM', as_root=True)
+
+ def update_result(self, context):
+ self.device.pull_file(self.device_output_file, self.host_output_file)
+ context.add_artifact('junoenergy', self.host_output_file, 'data')
+
+ def teardown(self, conetext):
+ self.device.delete_file(self.device_output_file)
+
+ def validate(self):
+ if self.device.name.lower() != 'juno':
+ message = 'juno_energy instrument is only supported on juno devices; found {}'
+ raise InstrumentError(message.format(self.device.name))
+
diff --git a/wlauto/instrumentation/juno_energy/readenergy b/wlauto/instrumentation/juno_energy/readenergy
new file mode 100755
index 00000000..c26991c2
--- /dev/null
+++ b/wlauto/instrumentation/juno_energy/readenergy
Binary files differ
diff --git a/wlauto/instrumentation/misc/__init__.py b/wlauto/instrumentation/misc/__init__.py
new file mode 100644
index 00000000..6fc55de9
--- /dev/null
+++ b/wlauto/instrumentation/misc/__init__.py
@@ -0,0 +1,365 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,no-member,attribute-defined-outside-init
+"""
+
+Some "standard" instruments to collect additional info about workload execution.
+
+.. note:: The run() method of a Workload may perform some "boilerplate" as well as
+ the actual execution of the workload (e.g. it may contain UI automation
+ needed to start the workload). This "boilerplate" execution will also
+ be measured by these instruments. As such, they are not suitable for collected
+ precise data about specific operations.
+"""
+import os
+import re
+import logging
+import time
+import tarfile
+from itertools import izip, izip_longest
+from subprocess import CalledProcessError
+
+from wlauto import Instrument, Parameter
+from wlauto.core import signal
+from wlauto.exceptions import DeviceError
+from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative
+from wlauto.utils.misc import ensure_file_directory_exists as _f
+from wlauto.utils.misc import ensure_directory_exists as _d
+from wlauto.utils.android import ApkInfo
+from wlauto.utils.types import list_of_strings
+
+
+logger = logging.getLogger(__name__)
+
+
+class SysfsExtractor(Instrument):
+
+ name = 'sysfs_extractor'
+ description = """
+ Collects the contest of a set of directories, before and after workload execution
+ and diffs the result.
+
+ """
+
+ mount_command = 'mount -t tmpfs -o size={} tmpfs {}'
+ extract_timeout = 30
+ tarname = 'sysfs.tar.gz'
+
+ parameters = [
+ Parameter('paths', kind=list_of_strings, mandatory=True,
+ description="""A list of paths to be pulled from the device. These could be directories
+ as well as files.""",
+ global_alias='sysfs_extract_dirs'),
+ Parameter('tmpfs_mount_point', default=None,
+ description="""Mount point for tmpfs partition used to store snapshots of paths."""),
+ Parameter('tmpfs_size', default='32m',
+ description="""Size of the tempfs partition."""),
+ ]
+
+ def initialize(self, context):
+ if self.device.is_rooted:
+ self.on_device_before = self.device.path.join(self.tmpfs_mount_point, 'before')
+ self.on_device_after = self.device.path.join(self.tmpfs_mount_point, 'after')
+
+ if not self.device.file_exists(self.tmpfs_mount_point):
+ self.device.execute('mkdir -p {}'.format(self.tmpfs_mount_point), as_root=True)
+ self.device.execute(self.mount_command.format(self.tmpfs_size, self.tmpfs_mount_point),
+ as_root=True)
+
+ def setup(self, context):
+ self.before_dirs = [
+ _d(os.path.join(context.output_directory, 'before', self._local_dir(d)))
+ for d in self.paths
+ ]
+ self.after_dirs = [
+ _d(os.path.join(context.output_directory, 'after', self._local_dir(d)))
+ for d in self.paths
+ ]
+ self.diff_dirs = [
+ _d(os.path.join(context.output_directory, 'diff', self._local_dir(d)))
+ for d in self.paths
+ ]
+
+ if self.device.is_rooted:
+ for d in self.paths:
+ before_dir = self.device.path.join(self.on_device_before,
+ self.device.path.dirname(as_relative(d)))
+ after_dir = self.device.path.join(self.on_device_after,
+ self.device.path.dirname(as_relative(d)))
+ if self.device.file_exists(before_dir):
+ self.device.execute('rm -rf {}'.format(before_dir), as_root=True)
+ self.device.execute('mkdir -p {}'.format(before_dir), as_root=True)
+ if self.device.file_exists(after_dir):
+ self.device.execute('rm -rf {}'.format(after_dir), as_root=True)
+ self.device.execute('mkdir -p {}'.format(after_dir), as_root=True)
+
+ def slow_start(self, context):
+ if self.device.is_rooted:
+ for d in self.paths:
+ dest_dir = self.device.path.join(self.on_device_before, as_relative(d))
+ if '*' in dest_dir:
+ dest_dir = self.device.path.dirname(dest_dir)
+ self.device.execute('busybox cp -Hr {} {}'.format(d, dest_dir),
+ as_root=True, check_exit_code=False)
+ else: # not rooted
+ for dev_dir, before_dir in zip(self.paths, self.before_dirs):
+ self.device.pull_file(dev_dir, before_dir)
+
+ def slow_stop(self, context):
+ if self.device.is_rooted:
+ for d in self.paths:
+ dest_dir = self.device.path.join(self.on_device_after, as_relative(d))
+ if '*' in dest_dir:
+ dest_dir = self.device.path.dirname(dest_dir)
+ self.device.execute('busybox cp -Hr {} {}'.format(d, dest_dir),
+ as_root=True, check_exit_code=False)
+ else: # not rooted
+ for dev_dir, after_dir in zip(self.paths, self.after_dirs):
+ self.device.pull_file(dev_dir, after_dir)
+
+ def update_result(self, context):
+ if self.device.is_rooted:
+ on_device_tarball = self.device.path.join(self.device.working_directory, self.tarname)
+ on_host_tarball = self.device.path.join(context.output_directory, self.tarname)
+ self.device.execute('busybox tar czf {} -C {} .'.format(on_device_tarball, self.tmpfs_mount_point),
+ as_root=True)
+ self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
+ self.device.pull_file(on_device_tarball, on_host_tarball)
+ with tarfile.open(on_host_tarball, 'r:gz') as tf:
+ tf.extractall(context.output_directory)
+ self.device.delete_file(on_device_tarball)
+ os.remove(on_host_tarball)
+
+ for after_dir in self.after_dirs:
+ if not os.listdir(after_dir):
+ self.logger.error('sysfs files were not pulled from the device.')
+ return
+ for diff_dir, before_dir, after_dir in zip(self.diff_dirs, self.before_dirs, self.after_dirs):
+ _diff_sysfs_dirs(before_dir, after_dir, diff_dir)
+
+ def teardown(self, context):
+ self._one_time_setup_done = []
+
+ def finalize(self, context):
+ if self.device.is_rooted:
+ try:
+ self.device.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True)
+ except (DeviceError, CalledProcessError):
+ # assume a directory but not mount point
+ pass
+ self.device.execute('rm -rf {}'.format(self.tmpfs_mount_point), as_root=True)
+
+ def validate(self):
+ if not self.tmpfs_mount_point: # pylint: disable=access-member-before-definition
+ self.tmpfs_mount_point = self.device.path.join(self.device.working_directory, 'temp-fs')
+
+ def _local_dir(self, directory):
+ return os.path.dirname(as_relative(directory).replace(self.device.path.sep, os.sep))
+
+
+class ExecutionTimeInstrument(Instrument):
+
+ name = 'execution_time'
+ description = """
+ Measure how long it took to execute the run() methods of a Workload.
+
+ """
+
+ priority = 15
+
+ def __init__(self, device, **kwargs):
+ super(ExecutionTimeInstrument, self).__init__(device, **kwargs)
+ self.start_time = None
+ self.end_time = None
+
+ def on_run_start(self, context):
+ signal.connect(self.get_start_time, signal.BEFORE_WORKLOAD_EXECUTION, priority=self.priority)
+ signal.connect(self.get_stop_time, signal.AFTER_WORKLOAD_EXECUTION, priority=self.priority)
+
+ def get_start_time(self, context):
+ self.start_time = time.time()
+
+ def get_stop_time(self, context):
+ self.end_time = time.time()
+
+ def update_result(self, context):
+ execution_time = self.end_time - self.start_time
+ context.result.add_metric('execution_time', execution_time, 'seconds')
+
+
+class ApkVersion(Instrument):
+
+ name = 'apk_version'
+ description = """
+ Extracts APK versions for workloads that have them.
+
+ """
+
+ def __init__(self, device, **kwargs):
+ super(ApkVersion, self).__init__(device, **kwargs)
+ self.apk_info = None
+
+ def setup(self, context):
+ if hasattr(context.workload, 'apk_file'):
+ self.apk_info = ApkInfo(context.workload.apk_file)
+ else:
+ self.apk_info = None
+
+ def update_result(self, context):
+ if self.apk_info:
+ context.result.add_metric(self.name, self.apk_info.version_name)
+
+
+class InterruptStatsInstrument(Instrument):
+
+ name = 'interrupts'
+ description = """
+ Pulls the ``/proc/interrupts`` file before and after workload execution and diffs them
+ to show what interrupts occurred during that time.
+
+ """
+
+ def __init__(self, device, **kwargs):
+ super(InterruptStatsInstrument, self).__init__(device, **kwargs)
+ self.before_file = None
+ self.after_file = None
+ self.diff_file = None
+
+ def setup(self, context):
+ self.before_file = os.path.join(context.output_directory, 'before', 'proc', 'interrupts')
+ self.after_file = os.path.join(context.output_directory, 'after', 'proc', 'interrupts')
+ self.diff_file = os.path.join(context.output_directory, 'diff', 'proc', 'interrupts')
+
+ def start(self, context):
+ with open(_f(self.before_file), 'w') as wfh:
+ wfh.write(self.device.execute('cat /proc/interrupts'))
+
+ def stop(self, context):
+ with open(_f(self.after_file), 'w') as wfh:
+ wfh.write(self.device.execute('cat /proc/interrupts'))
+
+ def update_result(self, context):
+ # If workload execution failed, the after_file may not have been created.
+ if os.path.isfile(self.after_file):
+ _diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file))
+
+
+class DynamicFrequencyInstrument(SysfsExtractor):
+
+ name = 'cpufreq'
+ description = """
+ Collects dynamic frequency (DVFS) settings before and after workload execution.
+
+ """
+
+ tarname = 'cpufreq.tar.gz'
+
+ parameters = [
+ Parameter('paths', mandatory=False, override=True),
+ ]
+
+ def setup(self, context):
+ self.paths = ['/sys/devices/system/cpu']
+ if self.device.is_rooted:
+ self.paths.append('/sys/class/devfreq/*') # the '*' would cause problems for adb pull.
+ super(DynamicFrequencyInstrument, self).setup(context)
+
+ def validate(self):
+ # temp-fs would have been set in super's validate, if not explicitly specified.
+ if not self.tmpfs_mount_point.endswith('-cpufreq'): # pylint: disable=access-member-before-definition
+ self.tmpfs_mount_point += '-cpufreq'
+
+
+def _diff_interrupt_files(before, after, result): # pylint: disable=R0914
+ output_lines = []
+ with open(before) as bfh:
+ with open(after) as ofh:
+ for bline, aline in izip(bfh, ofh):
+ bchunks = bline.strip().split()
+ while True:
+ achunks = aline.strip().split()
+ if achunks[0] == bchunks[0]:
+ diffchunks = ['']
+ diffchunks.append(achunks[0])
+ diffchunks.extend([diff_tokens(b, a) for b, a
+ in zip(bchunks[1:], achunks[1:])])
+ output_lines.append(diffchunks)
+ break
+ else: # new category appeared in the after file
+ diffchunks = ['>'] + achunks
+ output_lines.append(diffchunks)
+ try:
+ aline = ofh.next()
+ except StopIteration:
+ break
+
+ # Offset heading columns by one to allow for row labels on subsequent
+ # lines.
+ output_lines[0].insert(0, '')
+
+ # Any "columns" that do not have headings in the first row are not actually
+ # columns -- they are a single column where space-spearated words got
+ # split. Merge them back together to prevent them from being
+ # column-aligned by write_table.
+ table_rows = [output_lines[0]]
+ num_cols = len(output_lines[0])
+ for row in output_lines[1:]:
+ table_row = row[:num_cols]
+ table_row.append(' '.join(row[num_cols:]))
+ table_rows.append(table_row)
+
+ with open(result, 'w') as wfh:
+ write_table(table_rows, wfh)
+
+
+def _diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
+ before_files = []
+ os.path.walk(before,
+ lambda arg, dirname, names: arg.extend([os.path.join(dirname, f) for f in names]),
+ before_files
+ )
+ before_files = filter(os.path.isfile, before_files)
+ files = [os.path.relpath(f, before) for f in before_files]
+ after_files = [os.path.join(after, f) for f in files]
+ diff_files = [os.path.join(result, f) for f in files]
+
+ for bfile, afile, dfile in zip(before_files, after_files, diff_files):
+ if not os.path.isfile(afile):
+ logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile))
+ continue
+
+ with open(bfile) as bfh, open(afile) as afh: # pylint: disable=C0321
+ with open(_f(dfile), 'w') as dfh:
+ for i, (bline, aline) in enumerate(izip_longest(bfh, afh), 1):
+ if aline is None:
+ logger.debug('Lines missing from {}'.format(afile))
+ break
+ bchunks = re.split(r'(\W+)', bline)
+ achunks = re.split(r'(\W+)', aline)
+ if len(bchunks) != len(achunks):
+ logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
+ dfh.write('xxx ' + bline)
+ continue
+ if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
+ (bchunks[0] == achunks[0])):
+ # if there are only two columns and the first column is the
+ # same, assume it's a "header" column and do not diff it.
+ dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
+ else:
+ dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
+ dfh.write(''.join(dchunks))
+
diff --git a/wlauto/instrumentation/perf/LICENSE b/wlauto/instrumentation/perf/LICENSE
new file mode 100644
index 00000000..99f70b0d
--- /dev/null
+++ b/wlauto/instrumentation/perf/LICENSE
@@ -0,0 +1,9 @@
+perf binaries included here are part of the Linux kernel and are distributed
+under GPL version 2; The full text of the license may be viewed here:
+
+http://www.gnu.org/licenses/gpl-2.0.html
+
+Source for these binaries is part of Linux Kernel source tree. This may be obtained
+from Linaro here:
+
+https://git.linaro.org/arm/big.LITTLE/mp.git
diff --git a/wlauto/instrumentation/perf/__init__.py b/wlauto/instrumentation/perf/__init__.py
new file mode 100644
index 00000000..523ae2e0
--- /dev/null
+++ b/wlauto/instrumentation/perf/__init__.py
@@ -0,0 +1,176 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101,W0201
+import os
+import re
+import itertools
+
+
+from wlauto import Instrument, Executable, Parameter
+from wlauto.exceptions import ConfigError
+from wlauto.utils.misc import ensure_file_directory_exists as _f
+from wlauto.utils.types import list_or_string, list_of_strs
+
+PERF_COMMAND_TEMPLATE = '{} stat {} {} sleep 1000 > {} 2>&1 '
+
+DEVICE_RESULTS_FILE = '/data/local/perf_results.txt'
+HOST_RESULTS_FILE_BASENAME = 'perf.txt'
+
+PERF_COUNT_REGEX = re.compile(r'^\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$')
+
+
+class PerfInstrument(Instrument):
+
+ name = 'perf'
+ description = """
+ Perf is a Linux profiling with performance counters.
+
+ Performance counters are CPU hardware registers that count hardware events
+ such as instructions executed, cache-misses suffered, or branches
+ mispredicted. They form a basis for profiling applications to trace dynamic
+ control flow and identify hotspots.
+
+ pref accepts options and events. If no option is given the default '-a' is
+ used. For events, the default events are migrations and cs. They both can
+ be specified in the config file.
+
+ Events must be provided as a list that contains them and they will look like
+ this ::
+
+ perf_events = ['migrations', 'cs']
+
+ Events can be obtained by typing the following in the command line on the
+ device ::
+
+ perf list
+
+ Whereas options, they can be provided as a single string as following ::
+
+ perf_options = '-a -i'
+
+ Options can be obtained by running the following in the command line ::
+
+ man perf-record
+ """
+
+ parameters = [
+ Parameter('events', kind=list_of_strs, default=['migrations', 'cs'],
+ constraint=(lambda x: x, 'must not be empty.'),
+ description="""Specifies the events to be counted."""),
+ Parameter('optionstring', kind=list_or_string, default='-a',
+ description="""Specifies options to be used for the perf command. This
+ may be a list of option strings, in which case, multiple instances of perf
+ will be kicked off -- one for each option string. This may be used to e.g.
+ collected different events from different big.LITTLE clusters.
+ """),
+ Parameter('labels', kind=list_of_strs, default=None,
+ description="""Provides labels for pref output. If specified, the number of
+ labels must match the number of ``optionstring``\ s.
+ """),
+ ]
+
+ def on_run_init(self, context):
+ if not self.device.is_installed('perf'):
+ binary = context.resolver.get(Executable(self, self.device.abi, 'perf'))
+ self.device.install(binary)
+ self.commands = self._build_commands()
+
+ def setup(self, context):
+ self._clean_device()
+
+ def start(self, context):
+ for command in self.commands:
+ self.device.kick_off(command)
+
+ def stop(self, context):
+ self.device.killall('sleep')
+
+ def update_result(self, context):
+ for label in self.labels:
+ device_file = self._get_device_outfile(label)
+ host_relpath = os.path.join('perf', os.path.basename(device_file))
+ host_file = _f(os.path.join(context.output_directory, host_relpath))
+ self.device.pull_file(device_file, host_file)
+ context.add_iteration_artifact(label, kind='raw', path=host_relpath)
+ with open(host_file) as fh:
+ in_results_section = False
+ for line in fh:
+ if 'Performance counter stats' in line:
+ in_results_section = True
+ fh.next() # skip the following blank line
+ if in_results_section:
+ if not line.strip(): # blank line
+ in_results_section = False
+ break
+ else:
+ line = line.split('#')[0] # comment
+ match = PERF_COUNT_REGEX.search(line)
+ if match:
+ count = int(match.group(1))
+ metric = '{}_{}'.format(label, match.group(2))
+ context.result.add_metric(metric, count)
+
+ def teardown(self, context): # pylint: disable=R0201
+ self._clean_device()
+
+ def validate(self):
+ if isinstance(self.optionstring, list):
+ self.optionstrings = self.optionstring
+ else:
+ self.optionstrings = [self.optionstring]
+ if isinstance(self.events[0], list): # we know events are non-empty due to param constraint pylint: disable=access-member-before-definition
+ self.events = self.events
+ else:
+ self.events = [self.events]
+ if not self.labels: # pylint: disable=E0203
+ self.labels = ['perf_{}'.format(i) for i in xrange(len(self.optionstrings))]
+ if not len(self.labels) == len(self.optionstrings):
+ raise ConfigError('The number of labels must match the number of optstrings provided for perf.')
+
+ def _build_commands(self):
+ events = itertools.cycle(self.events)
+ commands = []
+ for opts, label in itertools.izip(self.optionstrings, self.labels):
+ commands.append(self._build_perf_command(opts, events.next(), label))
+ return commands
+
+ def _clean_device(self):
+ for label in self.labels:
+ filepath = self._get_device_outfile(label)
+ self.device.delete_file(filepath)
+
+ def _get_device_outfile(self, label):
+ return self.device.path.join(self.device.working_directory, '{}.out'.format(label))
+
+ def _build_perf_command(self, options, events, label):
+ event_string = ' '.join(['-e {}'.format(e) for e in events])
+ command = PERF_COMMAND_TEMPLATE.format('perf',
+ options or '',
+ event_string,
+ self._get_device_outfile(label))
+ return command
+
+
+class CCIPerfEvent(object):
+
+ def __init__(self, name, config):
+ self.name = name
+ self.config = config
+
+ def __str__(self):
+ return 'CCI/config={config},name={name}/'.format(**self.__dict__)
+
diff --git a/wlauto/instrumentation/perf/bin/arm64/perf b/wlauto/instrumentation/perf/bin/arm64/perf
new file mode 100755
index 00000000..5ec37c76
--- /dev/null
+++ b/wlauto/instrumentation/perf/bin/arm64/perf
Binary files differ
diff --git a/wlauto/instrumentation/perf/bin/armeabi/perf b/wlauto/instrumentation/perf/bin/armeabi/perf
new file mode 100755
index 00000000..5a52db56
--- /dev/null
+++ b/wlauto/instrumentation/perf/bin/armeabi/perf
Binary files differ
diff --git a/wlauto/instrumentation/pmu_logger/__init__.py b/wlauto/instrumentation/pmu_logger/__init__.py
new file mode 100644
index 00000000..1a9a0adb
--- /dev/null
+++ b/wlauto/instrumentation/pmu_logger/__init__.py
@@ -0,0 +1,148 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101,W0201
+import os
+import re
+import csv
+
+from wlauto import Instrument, settings, Parameter
+from wlauto.instrumentation import instrument_is_installed
+from wlauto.exceptions import ConfigError
+from wlauto.utils.types import boolean
+
+
+NUMBER_OF_CCI_PMU_COUNTERS = 4
+DEFAULT_EVENTS = ['0x63', '0x6A', '0x83', '0x8A']
+DEFAULT_PERIOD = 10 # in jiffies
+
+CPL_BASE = '/sys/kernel/debug/cci_pmu_logger/'
+CPL_CONTROL_FILE = CPL_BASE + 'control'
+CPL_PERIOD_FILE = CPL_BASE + 'period_jiffies'
+
+DRIVER = 'pmu_logger.ko'
+
+REGEX = re.compile(r'(\d+(?:\.\d+)?):\s+bprint:.*Cycles:\s*(\S+)\s*Counter_0:\s*(\S+)\s*Counter_1:\s*(\S+)\s*Counter_2:\s*(\S+)\s*Counter_3:\s*(\S+)')
+
+
+class CciPmuLogger(Instrument):
+
+ name = "cci_pmu_logger"
+ description = """
+ This instrument allows collecting CCI counter data.
+
+ It relies on the pmu_logger.ko kernel driver, the source for which is
+ included with Workload Automation (see inside ``wlauto/external`` directory).
+ You will need to build this against your specific kernel. Once compiled, it needs
+ to be placed in the dependencies directory (usually ``~/.workload_uatomation/dependencies``).
+
+ .. note:: When compling pmu_logger.ko for a new hardware platform, you may need to
+ modify CCI_BASE inside pmu_logger.c to contain the base address of where
+ CCI is mapped in memory on your device.
+
+ This instrument relies on ``trace-cmd`` instrument to also be enabled. You should enable
+ at least ``'bprint'`` trace event.
+
+ """
+
+ parameters = [
+ Parameter('events', kind=list, default=DEFAULT_EVENTS,
+ description="""
+ A list of strings, each representing an event to be counted. The length
+ of the list cannot exceed the number of PMU counters available (4 in CCI-400).
+ If this is not specified, shareable read transactions and snoop hits on both
+ clusters will be counted by default. E.g. ``['0x63', '0x83']``.
+ """),
+ Parameter('event_labels', kind=list, default=[],
+ description="""
+ A list of labels to be used when reporting PMU counts. If specified,
+ this must be of the same length as ``cci_pmu_events``. If not specified,
+ events will be labeled "event_<event_number>".
+ """),
+ Parameter('period', kind=int, default=10,
+ description='The period (in jiffies) between counter reads.'),
+ Parameter('install_module', kind=boolean, default=True,
+ description="""
+ Specifies whether pmu_logger has been compiled as a .ko module that needs
+ to be installed by the instrument. (.ko binary must be in {}). If this is set
+ to ``False``, it will be assumed that pmu_logger has been compiled into the kernel,
+ or that it has been installed prior to the invocation of WA.
+ """.format(settings.dependencies_directory)),
+ ]
+
+ def on_run_init(self, context):
+ if self.install_module:
+ self.device_driver_file = self.device.path.join(self.device.working_directory, DRIVER)
+ host_driver_file = os.path.join(settings.dependencies_directory, DRIVER)
+ self.device.push_file(host_driver_file, self.device_driver_file)
+
+ def setup(self, context):
+ if self.install_module:
+ self.device.execute('insmod {}'.format(self.device_driver_file), check_exit_code=False)
+ self.device.set_sysfile_value(CPL_PERIOD_FILE, self.period)
+ for i, event in enumerate(self.events):
+ counter = CPL_BASE + 'counter{}'.format(i)
+ self.device.set_sysfile_value(counter, event, verify=False)
+
+ def start(self, context):
+ self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False)
+
+ def stop(self, context):
+ self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False)
+
+ # Doing result processing inside teardown because need to make sure that
+ # trace-cmd has processed its results and generated the trace.txt
+ def teardown(self, context):
+ trace_file = os.path.join(context.output_directory, 'trace.txt')
+ rows = [['timestamp', 'cycles'] + self.event_labels]
+ with open(trace_file) as fh:
+ for line in fh:
+ match = REGEX.search(line)
+ if match:
+ rows.append([
+ float(match.group(1)),
+ int(match.group(2), 16),
+ int(match.group(3), 16),
+ int(match.group(4), 16),
+ int(match.group(5), 16),
+ int(match.group(6), 16),
+ ])
+ output_file = os.path.join(context.output_directory, 'cci_counters.txt')
+ with open(output_file, 'wb') as wfh:
+ writer = csv.writer(wfh)
+ writer.writerows(rows)
+ context.add_iteration_artifact('cci_counters', path='cci_counters.txt', kind='data',
+ description='CCI PMU counter data.')
+
+ # summary metrics
+ sums = map(sum, zip(*(r[1:] for r in rows[1:])))
+ labels = ['cycles'] + self.event_labels
+ for label, value in zip(labels, sums):
+ context.result.add_metric('cci ' + label, value, lower_is_better=True)
+
+ # actual teardown
+ if self.install_module:
+ self.device.execute('rmmod pmu_logger', check_exit_code=False)
+
+ def validate(self):
+ if not instrument_is_installed('trace-cmd'):
+ raise ConfigError('To use cci_pmu_logger, trace-cmd instrument must also be enabled.')
+ if not self.event_labels: # pylint: disable=E0203
+ self.event_labels = ['event_{}'.format(e) for e in self.events]
+ elif not len(self.events) == len(self.event_labels):
+ raise ConfigError('cci_pmu_events and cci_pmu_event_labels must be of the same length.')
+ if len(self.events) > NUMBER_OF_CCI_PMU_COUNTERS:
+ raise ConfigError('The number cci_pmu_counters must be at most {}'.format(NUMBER_OF_CCI_PMU_COUNTERS))
diff --git a/wlauto/instrumentation/streamline/__init__.py b/wlauto/instrumentation/streamline/__init__.py
new file mode 100644
index 00000000..841c44b0
--- /dev/null
+++ b/wlauto/instrumentation/streamline/__init__.py
@@ -0,0 +1,298 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101
+import os
+import signal
+import shutil
+import subprocess
+import logging
+import re
+
+from wlauto import settings, Instrument, Parameter, ResourceGetter, GetterPriority, File
+from wlauto.exceptions import InstrumentError, DeviceError, ResourceError
+from wlauto.utils.misc import ensure_file_directory_exists as _f
+from wlauto.utils.types import boolean
+from wlauto.utils.log import StreamLogger, LogWriter, LineLogWriter
+
+
+SESSION_TEXT_TEMPLATE = ('<?xml version="1.0" encoding="US-ASCII" ?>'
+ '<session'
+ ' version="1"'
+ ' output_path="x"'
+ ' call_stack_unwinding="no"'
+ ' parse_debug_info="no"'
+ ' high_resolution="no"'
+ ' buffer_mode="streaming"'
+ ' sample_rate="none"'
+ ' duration="0"'
+ ' target_host="127.0.0.1"'
+ ' target_port="{}"'
+ ' energy_cmd_line="{}">'
+ '</session>')
+
+VERSION_REGEX = re.compile(r'\(DS-5 v(.*?)\)')
+
+
+class StreamlineResourceGetter(ResourceGetter):
+
+ name = 'streamline_resource'
+ resource_type = 'file'
+ priority = GetterPriority.environment + 1 # run before standard enviroment resolvers.
+
+ dependencies_directory = os.path.join(settings.dependencies_directory, 'streamline')
+ old_dependencies_directory = os.path.join(settings.environment_root, 'streamline') # backwards compatibility
+
+ def get(self, resource, **kwargs):
+ if resource.owner.name != 'streamline':
+ return None
+ test_path = _f(os.path.join(self.dependencies_directory, resource.path))
+ if os.path.isfile(test_path):
+ return test_path
+ test_path = _f(os.path.join(self.old_dependencies_directory, resource.path))
+ if os.path.isfile(test_path):
+ return test_path
+
+
+class StreamlineInstrument(Instrument):
+
+ name = 'streamline'
+ description = """
+ Collect Streamline traces from the device.
+
+ .. note:: This instrument supports streamline that comes with DS-5 5.17 and later
+ earlier versions of streamline may not work correctly (or at all).
+
+ This Instrument allows collecting streamline traces (such as PMU counter values) from
+ the device. It assumes you have DS-5 (which Streamline is part of) installed on your
+ system, and that streamline command is somewhere in PATH.
+
+ Streamline works by connecting to gator service on the device. gator comes in two parts
+ a driver (gator.ko) and daemon (gatord). The driver needs to be compiled against your
+ kernel and both driver and daemon need to be compatible with your version of Streamline.
+ The best way to ensure compatibility is to build them from source which came with your
+ DS-5. gator source can be found in ::
+
+ /usr/local/DS-5/arm/gator
+
+ (the exact path may vary depending of where you have installed DS-5.) Please refer to the
+ README the accompanies the source for instructions on how to build it.
+
+ Once you have built the driver and the daemon, place the binaries into your
+ ~/.workload_automation/streamline/ directory (if you haven't tried running WA with
+ this instrument before, the streamline/ subdirectory might not exist, in which
+ case you will need to create it.
+
+ In order to specify which events should be captured, you need to provide a
+ configuration.xml for the gator. The easiest way to obtain this file is to export it
+ from event configuration dialog in DS-5 streamline GUI. The file should be called
+ "configuration.xml" and it be placed in the same directory as the gator binaries.
+
+ With that done, you can enable streamline traces by adding the following entry to
+ instrumentation list in your ~/.workload_automation/config.py
+
+ ::
+
+ instrumentation = [
+ # ...
+ 'streamline',
+ # ...
+ ]
+
+ You can also specify the following (optional) configuration in the same config file:
+
+ """
+ supported_platforms = ['android']
+
+ parameters = [
+ Parameter('port', default='8080',
+ description='Specifies the port on which streamline will connect to gator'),
+ Parameter('configxml', default=None,
+ description='streamline configuration XML file to be used. This must be '
+ 'an absolute path, though it may count the user home symbol (~)'),
+ Parameter('report', kind=boolean, default=False, global_alias='streamline_report_csv',
+ description='Specifies whether a report should be generated from streamline data.'),
+ Parameter('report_options', kind=str, default='-format csv',
+ description='A string with options that will be added to stramline -report command.'),
+ ]
+
+ daemon = 'gatord'
+ driver = 'gator.ko'
+ configuration_file_name = 'configuration.xml'
+
+ def __init__(self, device, **kwargs):
+ super(StreamlineInstrument, self).__init__(device, **kwargs)
+ self.streamline = None
+ self.session_file = None
+ self.capture_file = None
+ self.analysis_file = None
+ self.report_file = None
+ self.configuration_file = None
+ self.on_device_config = None
+ self.daemon_process = None
+ self.enabled = False
+ self.resource_getter = None
+
+ self.host_daemon_file = None
+ self.host_driver_file = None
+ self.device_driver_file = None
+
+ self._check_has_valid_display()
+
+ def on_run_start(self, context):
+ if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True):
+ raise InstrumentError('caiman not in PATH. Cannot enable Streamline tracing.')
+ p = subprocess.Popen('caiman --version 2>&1', stdout=subprocess.PIPE, shell=True)
+ out, _ = p.communicate()
+ match = VERSION_REGEX.search(out)
+ if not match:
+ raise InstrumentError('caiman not in PATH. Cannot enable Streamline tracing.')
+ version_tuple = tuple(map(int, match.group(1).split('.')))
+ if version_tuple < (5, 17):
+ raise InstrumentError('Need DS-5 v5.17 or greater; found v{}'.format(match.group(1)))
+ self.enabled = True
+ self.resource_getter = StreamlineResourceGetter(context.resolver)
+ self.resource_getter.register()
+
+ def on_run_end(self, context):
+ self.enabled = False
+ self.resource_getter.unregister()
+
+ def on_run_init(self, context):
+ try:
+ self.host_daemon_file = context.resolver.get(File(self, self.daemon))
+ self.logger.debug('Using daemon from {}.'.format(self.host_daemon_file))
+ self.device.killall(self.daemon) # in case a version is already running
+ self.device.install(self.host_daemon_file)
+ except ResourceError:
+ self.logger.debug('Using on-device daemon.')
+
+ try:
+ self.host_driver_file = context.resolver.get(File(self, self.driver))
+ self.logger.debug('Using driver from {}.'.format(self.host_driver_file))
+ self.device_driver_file = self.device.install(self.host_driver_file)
+ except ResourceError:
+ self.logger.debug('Using on-device driver.')
+
+ try:
+ self.configuration_file = (os.path.expanduser(self.configxml or '') or
+ context.resolver.get(File(self, self.configuration_file_name)))
+ self.logger.debug('Using {}'.format(self.configuration_file))
+ self.on_device_config = self.device.path.join(self.device.working_directory, 'configuration.xml')
+ shutil.copy(self.configuration_file, settings.meta_directory)
+ except ResourceError:
+ self.logger.debug('No configuration file was specfied.')
+
+ caiman_path = subprocess.check_output('which caiman', shell=True).strip() # pylint: disable=E1103
+ self.session_file = os.path.join(context.host_working_directory, 'streamline_session.xml')
+ with open(self.session_file, 'w') as wfh:
+ wfh.write(SESSION_TEXT_TEMPLATE.format(self.port, caiman_path))
+
+ def setup(self, context):
+ # Note: the config file needs to be copies on each iteration's setup
+ # because gator appears to "consume" it on invocation...
+ if self.configuration_file:
+ self.device.push_file(self.configuration_file, self.on_device_config)
+ self._initialize_daemon()
+ self.capture_file = _f(os.path.join(context.output_directory, 'streamline', 'capture.apc'))
+ self.report_file = _f(os.path.join(context.output_directory, 'streamline', 'streamline.csv'))
+
+ def start(self, context):
+ if self.enabled:
+ command = ['streamline', '-capture', self.session_file, '-output', self.capture_file]
+ self.streamline = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=os.setpgrp)
+ outlogger = StreamLogger('streamline', self.streamline.stdout, klass=LineLogWriter)
+ errlogger = StreamLogger('streamline', self.streamline.stderr, klass=LineLogWriter)
+ outlogger.start()
+ errlogger.start()
+
+ def stop(self, context):
+ if self.enabled:
+ os.killpg(self.streamline.pid, signal.SIGTERM)
+
+ def update_result(self, context):
+ if self.enabled:
+ self._kill_daemon()
+ if self.report:
+ self.logger.debug('Creating report...')
+ command = ['streamline', '-report', self.capture_file, '-output', self.report_file]
+ command += self.report_options.split()
+ _run_streamline_command(command)
+ context.add_artifact('streamlinecsv', self.report_file, 'data')
+
+ def teardown(self, context):
+ self.device.delete_file(self.on_device_config)
+
+ def _check_has_valid_display(self): # pylint: disable=R0201
+ reason = None
+ if os.name == 'posix' and not os.getenv('DISPLAY'):
+ reason = 'DISPLAY is not set.'
+ else:
+ p = subprocess.Popen('xhost', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ _, error = p.communicate()
+ if p.returncode:
+ reason = 'Invalid DISPLAY; xhost returned: "{}".'.format(error.strip()) # pylint: disable=E1103
+ if reason:
+ raise InstrumentError('{}\nstreamline binary requires a valid display server to be running.'.format(reason))
+
+ def _initialize_daemon(self):
+ if self.device_driver_file:
+ try:
+ self.device.execute('insmod {}'.format(self.device_driver_file))
+ except DeviceError, e:
+ if 'File exists' not in e.message:
+ raise
+ self.logger.debug('Driver was already installed.')
+ self._start_daemon()
+ port_spec = 'tcp:{}'.format(self.port)
+ self.device.forward_port(port_spec, port_spec)
+
+ def _start_daemon(self):
+ self.logger.debug('Starting gatord')
+ self.device.killall('gatord', as_root=True)
+ if self.configuration_file:
+ command = '{} -c {}'.format(self.daemon, self.on_device_config)
+ else:
+
+ command = '{}'.format(self.daemon)
+
+ self.daemon_process = self.device.execute(command, as_root=True, background=True)
+ outlogger = StreamLogger('gatord', self.daemon_process.stdout)
+ errlogger = StreamLogger('gatord', self.daemon_process.stderr, logging.ERROR)
+ outlogger.start()
+ errlogger.start()
+ if self.daemon_process.poll() is not None:
+ # If adb returned, something went wrong.
+ raise InstrumentError('Could not start gatord.')
+
+ def _kill_daemon(self):
+ self.logger.debug('Killing daemon process.')
+ self.daemon_process.kill()
+
+
+def _run_streamline_command(command):
+ streamline = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ output, error = streamline.communicate()
+ LogWriter('streamline').write(output).close()
+ LogWriter('streamline').write(error).close()
+
diff --git a/wlauto/instrumentation/trace_cmd/LICENSE b/wlauto/instrumentation/trace_cmd/LICENSE
new file mode 100644
index 00000000..9d46c1a5
--- /dev/null
+++ b/wlauto/instrumentation/trace_cmd/LICENSE
@@ -0,0 +1,39 @@
+Included trace-cmd binaries are Free Software ditributed under GPLv2:
+
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+The full text of the license may be viewed here:
+
+http://www.gnu.org/licenses/gpl-2.0.html
+
+Source code for trace-cmd may be obtained here:
+
+git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/trace-cmd.git
+
+Binaries included here contain modifications by ARM that, at the time of writing,
+have not yet made it into the above repository. The patches for these modifications
+are available here:
+
+http://article.gmane.org/gmane.linux.kernel/1869111
+http://article.gmane.org/gmane.linux.kernel/1869112
+
+
+
diff --git a/wlauto/instrumentation/trace_cmd/__init__.py b/wlauto/instrumentation/trace_cmd/__init__.py
new file mode 100644
index 00000000..fdd4f76b
--- /dev/null
+++ b/wlauto/instrumentation/trace_cmd/__init__.py
@@ -0,0 +1,322 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101
+from __future__ import division
+import os
+import time
+import subprocess
+from collections import defaultdict
+
+from wlauto import Instrument, Parameter, Executable
+from wlauto.exceptions import InstrumentError, ConfigError
+from wlauto.core import signal
+from wlauto.utils.types import boolean
+
+OUTPUT_TRACE_FILE = 'trace.dat'
+OUTPUT_TEXT_FILE = '{}.txt'.format(os.path.splitext(OUTPUT_TRACE_FILE)[0])
+TIMEOUT = 180
+
+
+class TraceCmdInstrument(Instrument):
+
+ name = 'trace-cmd'
+ description = """
+ trace-cmd is an instrument which interacts with Ftrace Linux kernel internal
+ tracer
+
+ From trace-cmd man page:
+
+ trace-cmd command interacts with the Ftrace tracer that is built inside the
+ Linux kernel. It interfaces with the Ftrace specific files found in the
+ debugfs file system under the tracing directory.
+
+ trace-cmd reads a list of events it will trace, which can be specified in
+ the config file as follows ::
+
+ trace_events = ['irq*', 'power*']
+
+ If no event is specified in the config file, trace-cmd traces the following events:
+
+ - sched*
+ - irq*
+ - power*
+ - cpufreq_interactive*
+
+ The list of available events can be obtained by rooting and running the following
+ command line on the device ::
+
+ trace-cmd list
+
+ You may also specify ``trace_buffer_size`` setting which must be an integer that will
+ be used to set the ftrace buffer size. It will be interpreted as KB::
+
+ trace_cmd_buffer_size = 8000
+
+ The maximum buffer size varies from device to device, but there is a maximum and trying
+ to set buffer size beyound that will fail. If you plan on collecting a lot of trace over
+ long periods of time, the buffer size will not be enough and you will only get trace for
+ the last portion of your run. To deal with this you can set the ``trace_mode`` setting to
+ ``'record'`` (the default is ``'start'``)::
+
+ trace_cmd_mode = 'record'
+
+ This will cause trace-cmd to trace into file(s) on disk, rather than the buffer, and so the
+ limit for the max size of the trace is set by the storage available on device. Bear in mind
+ that ``'record'`` mode *is* more instrusive than the default, so if you do not plan on
+ generating a lot of trace, it is best to use the default ``'start'`` mode.
+
+ .. note:: Mode names correspend to the underlying trace-cmd exectuable's command used to
+ implement them. You can find out more about what is happening in each case from
+ trace-cmd documentation: https://lwn.net/Articles/341902/.
+
+ This instrument comes with an Android trace-cmd binary that will be copied and used on the
+ device, however post-processing will be done on-host and you must have trace-cmd installed and
+ in your path. On Ubuntu systems, this may be done with::
+
+ sudo apt-get install trace-cmd
+
+ """
+
+ parameters = [
+ Parameter('events', kind=list, default=['sched*', 'irq*', 'power*', 'cpufreq_interactive*'],
+ global_alias='trace_events',
+ description="""
+ Specifies the list of events to be traced. Each event in the list will be passed to
+ trace-cmd with -e parameter and must be in the format accepted by trace-cmd.
+ """),
+ Parameter('mode', default='start', allowed_values=['start', 'record'],
+ global_alias='trace_mode',
+ description="""
+ Trace can be collected using either 'start' or 'record' trace-cmd
+ commands. In 'start' mode, trace will be collected into the ftrace buffer;
+ in 'record' mode, trace will be written into a file on the device's file
+ system. 'start' mode is (in theory) less intrusive than 'record' mode, however
+ it is limited by the size of the ftrace buffer (which is configurable --
+ see ``buffer_size`` -- but only up to a point) and that may overflow
+ for long-running workloads, which will result in dropped events.
+ """),
+ Parameter('buffer_size', kind=int, default=None,
+ global_alias='trace_buffer_size',
+ description="""
+ Attempt to set ftrace buffer size to the specified value (in KB). Default buffer size
+ may need to be increased for long-running workloads, or if a large number
+ of events have been enabled. Note: there is a maximum size that the buffer can
+ be set, and that varies from device to device. Attempting to set buffer size higher
+ than this will fail. In that case, this instrument will set the size to the highest
+ possible value by going down from the specified size in ``buffer_size_step`` intervals.
+ """),
+ Parameter('buffer_size_step', kind=int, default=1000,
+ global_alias='trace_buffer_size_step',
+ description="""
+ Defines the decremental step used if the specified ``buffer_size`` could not be set.
+ This will be subtracted form the buffer size until set succeeds or size is reduced to
+ 1MB.
+ """),
+ Parameter('buffer_size_file', default='/d/tracing/buffer_size_kb',
+ description="""
+ Path to the debugs file that may be used to set ftrace buffer size. This should need
+ to be modified for the vast majority devices.
+ """),
+ Parameter('report', kind=boolean, default=True,
+ description="""
+ Specifies whether host-side reporting should be performed once the binary trace has been
+ pulled form the device.
+
+ .. note:: This requires the latest version of trace-cmd to be installed on the host (the
+ one in your distribution's repos may be too old).
+
+ """),
+ Parameter('no_install', kind=boolean, default=False,
+ description="""
+ Do not install the bundled trace-cmd and use the one on the device instead. If there is
+ not already a trace-cmd on the device, an error is raised.
+
+ """),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(TraceCmdInstrument, self).__init__(device, **kwargs)
+ self.trace_cmd = None
+ self.event_string = _build_trace_events(self.events)
+ self.output_file = os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)
+ self.temp_trace_file = self.device.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)
+
+ def on_run_init(self, context):
+ if not self.device.is_rooted:
+ raise InstrumentError('trace-cmd instrument cannot be used on an unrooted device.')
+ if not self.no_install:
+ host_file = context.resolver.get(Executable(self, self.device.abi, 'trace-cmd'))
+ self.trace_cmd = self.device.install_executable(host_file)
+ else:
+ if not self.device.is_installed('trace-cmd'):
+ raise ConfigError('No trace-cmd found on device and no_install=True is specified.')
+ self.trace_cmd = 'trace-cmd'
+ # Register ourselves as absolute last event before and
+ # first after so we can mark the trace at the right time
+ signal.connect(self.insert_start_mark, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
+ signal.connect(self.insert_end_mark, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
+
+ def setup(self, context):
+ if self.mode == 'start':
+ if self.buffer_size:
+ self._set_buffer_size()
+ self.device.execute('{} reset'.format(self.trace_cmd), as_root=True, timeout=180)
+ elif self.mode == 'record':
+ pass
+ else:
+ raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
+
+ def start(self, context):
+ self.start_time = time.time() # pylint: disable=attribute-defined-outside-init
+ if self.mode == 'start':
+ self.device.execute('{} start {}'.format(self.trace_cmd, self.event_string), as_root=True)
+ elif self.mode == 'record':
+ self.device.kick_off('{} record -o {} {}'.format(self.trace_cmd, self.output_file, self.event_string))
+ else:
+ raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
+
+ def stop(self, context):
+ self.stop_time = time.time() # pylint: disable=attribute-defined-outside-init
+ if self.mode == 'start':
+ self.device.execute('{} stop'.format(self.trace_cmd), timeout=60, as_root=True)
+ elif self.mode == 'record':
+ # There will be a trace-cmd worker process per CPU core plus a main
+ # control trace-cmd process. Interrupting the control process will
+ # trigger the generation of the single binary trace file.
+ trace_cmds = self.device.ps(name=self.trace_cmd)
+ if not trace_cmds:
+ raise InstrumentError('Could not find running trace-cmd on device.')
+ # The workers will have their PPID set to the PID of control.
+ parent_map = defaultdict(list)
+ for entry in trace_cmds:
+ parent_map[entry.ppid].append(entry.pid)
+ controls = [v[0] for _, v in parent_map.iteritems()
+ if len(v) == 1 and v[0] in parent_map]
+ if len(controls) > 1:
+ self.logger.warning('More than one trace-cmd instance found; stopping all of them.')
+ for c in controls:
+ self.device.kill(c, signal='INT', as_root=True)
+ else:
+ raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
+
+ def update_result(self, context): # NOQA pylint: disable=R0912
+ if self.mode == 'start':
+ self.device.execute('{} extract -o {}'.format(self.trace_cmd, self.output_file),
+ timeout=TIMEOUT, as_root=True)
+ elif self.mode == 'record':
+ self.logger.debug('Waiting for trace.dat to be generated.')
+ while self.device.ps(name=self.trace_cmd):
+ time.sleep(2)
+ else:
+ raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
+
+ # The size of trace.dat will depend on how long trace-cmd was running.
+ # Therefore timout for the pull command must also be adjusted
+ # accordingly.
+ pull_timeout = (self.stop_time - self.start_time)
+ self.device.pull_file(self.output_file, context.output_directory, timeout=pull_timeout)
+ context.add_iteration_artifact('bintrace', OUTPUT_TRACE_FILE, kind='data',
+ description='trace-cmd generated ftrace dump.')
+
+ local_trace_file = os.path.join(context.output_directory, OUTPUT_TRACE_FILE)
+ local_txt_trace_file = os.path.join(context.output_directory, OUTPUT_TEXT_FILE)
+
+ if self.report:
+ # To get the output of trace.dat, trace-cmd must be installed
+ # This is done host-side because the generated file is very large
+ if not os.path.isfile(local_trace_file):
+ self.logger.warning('Not generating trace.txt, as trace.bin does not exist.')
+ try:
+ command = 'trace-cmd report {} > {}'.format(local_trace_file, local_txt_trace_file)
+ self.logger.debug(command)
+ process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
+ _, error = process.communicate()
+ if process.returncode:
+ raise InstrumentError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
+ if error:
+ # logged at debug level, as trace-cmd always outputs some
+ # errors that seem benign.
+ self.logger.debug(error)
+ if os.path.isfile(local_txt_trace_file):
+ context.add_iteration_artifact('txttrace', OUTPUT_TEXT_FILE, kind='export',
+ description='trace-cmd generated ftrace dump.')
+ self.logger.debug('Verifying traces.')
+ with open(local_txt_trace_file) as fh:
+ for line in fh:
+ if 'EVENTS DROPPED' in line:
+ self.logger.warning('Dropped events detected.')
+ break
+ else:
+ self.logger.debug('Trace verified.')
+ else:
+ self.logger.warning('Could not generate trace.txt.')
+ except OSError:
+ raise InstrumentError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
+
+ def teardown(self, context):
+ self.device.delete_file(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE))
+
+ def on_run_end(self, context):
+ pass
+
+ def validate(self):
+ if self.report and os.system('which trace-cmd > /dev/null'):
+ raise InstrumentError('trace-cmd is not in PATH; is it installed?')
+ if self.buffer_size:
+ if self.mode == 'record':
+ self.logger.debug('trace_buffer_size specified with record mode; it will be ignored.')
+ else:
+ try:
+ int(self.buffer_size)
+ except ValueError:
+ raise ConfigError('trace_buffer_size must be an int.')
+
+ def insert_start_mark(self, context):
+ # trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_START in info field
+ self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False)
+
+ def insert_end_mark(self, context):
+ # trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_STOP in info field
+ self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False)
+
+ def _set_buffer_size(self):
+ target_buffer_size = self.buffer_size
+ attempt_buffer_size = target_buffer_size
+ buffer_size = 0
+ floor = 1000 if target_buffer_size > 1000 else target_buffer_size
+ while attempt_buffer_size >= floor:
+ self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
+ buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
+ if buffer_size == attempt_buffer_size:
+ break
+ else:
+ attempt_buffer_size -= self.buffer_size_step
+ if buffer_size == target_buffer_size:
+ return
+ while attempt_buffer_size < target_buffer_size:
+ attempt_buffer_size += self.buffer_size_step
+ self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
+ buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
+ if attempt_buffer_size != buffer_size:
+ self.logger.warning('Failed to set trace buffer size to {}, value set was {}'.format(target_buffer_size, buffer_size))
+ break
+
+
+def _build_trace_events(events):
+ event_string = ' '.join(['-e {}'.format(e) for e in events])
+ return event_string
+
diff --git a/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd b/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd
new file mode 100755
index 00000000..0d025d0d
--- /dev/null
+++ b/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd
Binary files differ
diff --git a/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd b/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd
new file mode 100755
index 00000000..a4456627
--- /dev/null
+++ b/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd
Binary files differ
diff --git a/wlauto/modules/__init__.py b/wlauto/modules/__init__.py
new file mode 100644
index 00000000..16224d6f
--- /dev/null
+++ b/wlauto/modules/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/modules/active_cooling.py b/wlauto/modules/active_cooling.py
new file mode 100644
index 00000000..7f9fbb03
--- /dev/null
+++ b/wlauto/modules/active_cooling.py
@@ -0,0 +1,64 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import Module, Parameter
+from wlauto.utils.serial_port import open_serial_connection
+
+
+class MbedFanActiveCooling(Module):
+
+ name = 'mbed-fan'
+
+ capabilities = ['active_cooling']
+
+ parameters = [
+ Parameter('port', default='/dev/ttyACM0',
+ description="""The serial port for the active cooling solution (see above)."""),
+ Parameter('buad', kind=int, default=115200,
+ description="""Baud for the serial port (see above)."""),
+ Parameter('fan_pin', kind=int, default=0,
+ description="""Which controller pin on the mbed the fan for the active cooling solution is
+ connected to (controller pin 0 is physical pin 22 on the mbed)."""),
+ ]
+
+ timeout = 30
+
+ def start_active_cooling(self):
+ with open_serial_connection(timeout=self.timeout,
+ port=self.port,
+ baudrate=self.buad) as target:
+ target.sendline('motor_{}_1'.format(self.fan_pin))
+
+ def stop_active_cooling(self):
+ with open_serial_connection(timeout=self.timeout,
+ port=self.port,
+ baudrate=self.buad) as target:
+ target.sendline('motor_{}_0'.format(self.fan_pin))
+
+
+class OdroidXU3ctiveCooling(Module):
+
+ name = 'odroidxu3-fan'
+
+ capabilities = ['active_cooling']
+
+ def start_active_cooling(self):
+ self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False)
+ self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/pwm_duty', 255, verify=False)
+
+ def stop_active_cooling(self):
+ self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False)
+ self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/pwm_duty', 1, verify=False)
diff --git a/wlauto/modules/flashing.py b/wlauto/modules/flashing.py
new file mode 100644
index 00000000..38cf95ce
--- /dev/null
+++ b/wlauto/modules/flashing.py
@@ -0,0 +1,253 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=attribute-defined-outside-init
+import os
+import time
+import tarfile
+import tempfile
+import shutil
+
+from wlauto import Module, Parameter
+from wlauto.exceptions import ConfigError, DeviceError
+from wlauto.utils.android import fastboot_flash_partition, fastboot_command
+from wlauto.utils.serial_port import open_serial_connection
+from wlauto.utils.uefi import UefiMenu
+from wlauto.utils.types import boolean
+from wlauto.utils.misc import merge_dicts
+
+
+class Flasher(Module):
+ """
+ Implements a mechanism for flashing a device. The images to be flashed can be
+ specified either as a tarball "image bundle" (in which case instructions for
+ flashing are provided as flasher-specific metadata also in the bundle), or as
+ individual image files, in which case instructions for flashing as specified
+ as part of flashing config.
+
+ .. note:: It is important that when resolving configuration, concrete flasher
+ implementations prioritise settings specified in the config over those
+ in the bundle (if they happen to clash).
+
+ """
+
+ capabilities = ['flash']
+
+ def flash(self, image_bundle=None, images=None):
+ """
+ Flashes the specified device using the specified config. As a post condition,
+ the device must be ready to run workloads upon returning from this method (e.g.
+ it must be fully-booted into the OS).
+
+ """
+ raise NotImplementedError()
+
+
+class FastbootFlasher(Flasher):
+
+ name = 'fastboot'
+ description = """
+ Enables automated flashing of images using the fastboot utility.
+ To use this flasher, a set of image files to be flused are required.
+ In addition a mapping between partitions and image file is required. There are two ways
+ to specify those requirements:
+
+ - Image mapping: In this mode, a mapping between partitions and images is given in the agenda.
+ - Image Bundle: In This mode a tarball is specified, which must contain all image files as well
+ as well as a partition file, named ``partitions.txt`` which contains the mapping between
+ partitions and images.
+
+ The format of ``partitions.txt`` defines one mapping per line as such: ::
+
+ kernel zImage-dtb
+ ramdisk ramdisk_image
+
+ """
+
+ delay = 0.5
+ serial_timeout = 30
+ partitions_file_name = 'partitions.txt'
+
+ def flash(self, image_bundle=None, images=None):
+ self.prelude_done = False
+ to_flash = {}
+ if image_bundle: # pylint: disable=access-member-before-definition
+ image_bundle = expand_path(image_bundle)
+ to_flash = self._bundle_to_images(image_bundle)
+ to_flash = merge_dicts(to_flash, images or {}, should_normalize=False)
+ for partition, image_path in to_flash.iteritems():
+ self.logger.debug('flashing {}'.format(partition))
+ self._flash_image(self.owner, partition, expand_path(image_path))
+ fastboot_command('reboot')
+
+ def _validate_image_bundle(self, image_bundle):
+ if not tarfile.is_tarfile(image_bundle):
+ raise ConfigError('File {} is not a tarfile'.format(image_bundle))
+ with tarfile.open(image_bundle) as tar:
+ files = [tf.name for tf in tar.getmembers()]
+ if not any(pf in files for pf in (self.partitions_file_name, '{}/{}'.format(files[0], self.partitions_file_name))):
+ ConfigError('Image bundle does not contain the required partition file (see documentation)')
+
+ def _bundle_to_images(self, image_bundle):
+ """
+ Extracts the bundle to a temporary location and creates a mapping between the contents of the bundle
+ and images to be flushed.
+ """
+ self._validate_image_bundle(image_bundle)
+ extract_dir = tempfile.mkdtemp()
+ with tarfile.open(image_bundle) as tar:
+ tar.extractall(path=extract_dir)
+ files = [tf.name for tf in tar.getmembers()]
+ if self.partitions_file_name not in files:
+ extract_dir = os.path.join(extract_dir, files[0])
+ partition_file = os.path.join(extract_dir, self.partitions_file_name)
+ return get_mapping(extract_dir, partition_file)
+
+ def _flash_image(self, device, partition, image_path):
+ if not self.prelude_done:
+ self._fastboot_prelude(device)
+ fastboot_flash_partition(partition, image_path)
+ time.sleep(self.delay)
+
+ def _fastboot_prelude(self, device):
+ with open_serial_connection(port=device.port,
+ baudrate=device.baudrate,
+ timeout=self.serial_timeout,
+ init_dtr=0,
+ get_conn=False) as target:
+ device.reset()
+ time.sleep(self.delay)
+ target.sendline(' ')
+ time.sleep(self.delay)
+ target.sendline('fast')
+ time.sleep(self.delay)
+ self.prelude_done = True
+
+
+class VersatileExpressFlasher(Flasher):
+
+ name = 'vexpress'
+
+ parameters = [
+ Parameter('image_name', default='Image',
+ description='The name of the kernel image to boot.'),
+ Parameter('image_args', default=None,
+ description='Kernel arguments with which the image will be booted.'),
+ Parameter('fdt_support', kind=boolean, default=True,
+ description='Specifies whether the image has device tree support.'),
+ Parameter('initrd', default=None,
+ description='If the kernel image uses an INITRD, this can be used to specify it.'),
+ Parameter('fdt_path', default=None,
+ description='If specified, this will be set as the FDT path.'),
+ ]
+
+ def flash(self, image_bundle=None, images=None):
+ device = self.owner
+ if not hasattr(device, 'port') or not hasattr(device, 'microsd_mount_point'):
+ msg = 'Device {} does not appear to support VExpress flashing.'
+ raise ConfigError(msg.format(device.name))
+ with open_serial_connection(port=device.port,
+ baudrate=device.baudrate,
+ timeout=device.timeout,
+ init_dtr=0) as target:
+ target.sendline('usb_on') # this will cause the MicroSD to be mounted on the host
+ device.wait_for_microsd_mount_point(target)
+ self.deploy_images(device, image_bundle, images)
+
+ self.logger.debug('Resetting the device.')
+ device.hard_reset(target)
+
+ with open_serial_connection(port=device.port,
+ baudrate=device.baudrate,
+ timeout=device.timeout,
+ init_dtr=0) as target:
+ menu = UefiMenu(target)
+ menu.open(timeout=120)
+ if menu.has_option(device.uefi_entry):
+ self.logger.debug('Deleting existing device entry.')
+ menu.delete_entry(device.uefi_entry)
+ self.create_uefi_enty(device, menu)
+ menu.select(device.uefi_entry)
+ target.expect(device.android_prompt, timeout=device.timeout)
+
+ def create_uefi_enty(self, device, menu):
+ menu.create_entry(device.uefi_entry,
+ self.image_name,
+ self.image_args,
+ self.fdt_support,
+ self.initrd,
+ self.fdt_path)
+
+ def deploy_images(self, device, image_bundle=None, images=None):
+ try:
+ if image_bundle:
+ self.deploy_image_bundle(device, image_bundle)
+ if images:
+ self.overlay_images(device, images)
+ os.system('sync')
+ except (IOError, OSError), e:
+ msg = 'Could not deploy images to {}; got: {}'
+ raise DeviceError(msg.format(device.microsd_mount_point, e))
+
+ def deploy_image_bundle(self, device, bundle):
+ self.logger.debug('Validating {}'.format(bundle))
+ validate_image_bundle(bundle)
+ self.logger.debug('Extracting {} into {}...'.format(bundle, device.microsd_mount_point))
+ with tarfile.open(bundle) as tar:
+ tar.extractall(device.microsd_mount_point)
+
+ def overlay_images(self, device, images):
+ for dest, src in images.iteritems():
+ dest = os.path.join(device.microsd_mount_point, dest)
+ self.logger.debug('Copying {} to {}'.format(src, dest))
+ shutil.copy(src, dest)
+
+
+# utility functions
+
+def get_mapping(base_dir, partition_file):
+ mapping = {}
+ with open(partition_file) as pf:
+ for line in pf:
+ pair = line.split()
+ if len(pair) != 2:
+ ConfigError('partitions.txt is not properly formated')
+ image_path = os.path.join(base_dir, pair[1])
+ if not os.path.isfile(expand_path(image_path)):
+ ConfigError('file {} was not found in the bundle or was misplaced'.format(pair[1]))
+ mapping[pair[0]] = image_path
+ return mapping
+
+
+def expand_path(original_path):
+ path = os.path.abspath(os.path.expanduser(original_path))
+ if not os.path.exists(path):
+ raise ConfigError('{} does not exist.'.format(path))
+ return path
+
+
+def validate_image_bundle(bundle):
+ if not tarfile.is_tarfile(bundle):
+ raise ConfigError('Image bundle {} does not appear to be a valid TAR file.'.format(bundle))
+ with tarfile.open(bundle) as tar:
+ try:
+ tar.getmember('config.txt')
+ except KeyError:
+ try:
+ tar.getmember('./config.txt')
+ except KeyError:
+ msg = 'Tarball {} does not appear to be a valid image bundle (did not see config.txt).'
+ raise ConfigError(msg.format(bundle))
+
diff --git a/wlauto/modules/reset.py b/wlauto/modules/reset.py
new file mode 100644
index 00000000..31003f33
--- /dev/null
+++ b/wlauto/modules/reset.py
@@ -0,0 +1,52 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import time
+
+from wlauto import Module, Parameter
+from wlauto.exceptions import DeviceError
+from wlauto.utils.netio import KshellConnection
+
+
+class NetioSwitchReset(Module):
+
+ #pylint: disable=E1101
+ name = 'netio_switch'
+ capabilities = ['reset_power']
+
+ parameters = [
+ Parameter('host', default='ippowerbar',
+ description='IP address or DNS name of the Netio power switch.'),
+ Parameter('port', kind=int, default=1234,
+ description='Port on which KSHELL is listening.'),
+ Parameter('username', default='admin',
+ description='User name for the administrator on the Netio.'),
+ Parameter('password', default='admin',
+ description='User name for the administrator on the Netio.'),
+ Parameter('psu', kind=int, default=1,
+ description='The device port number on the Netio, i.e. which '
+ 'PSU port the device is connected to.'),
+ ]
+
+ def hard_reset(self):
+ try:
+ conn = KshellConnection(host=self.host, port=self.port)
+ conn.login(self.username, self.password)
+ conn.disable_port(self.psu)
+ time.sleep(2)
+ conn.enable_port(self.psu)
+ conn.close()
+ except Exception as e:
+ raise DeviceError('Could not reset power: {}'.format(e))
diff --git a/wlauto/resource_getters/__init__.py b/wlauto/resource_getters/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/resource_getters/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/resource_getters/standard.py b/wlauto/resource_getters/standard.py
new file mode 100644
index 00000000..4de6d753
--- /dev/null
+++ b/wlauto/resource_getters/standard.py
@@ -0,0 +1,350 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+This module contains the standard set of resource getters used by Workload Automation.
+
+"""
+import os
+import sys
+import glob
+import shutil
+import inspect
+
+from wlauto import ResourceGetter, GetterPriority, Parameter, NO_ONE, settings, __file__ as __base_filepath
+from wlauto.exceptions import ResourceError
+from wlauto.utils.misc import ensure_directory_exists as _d
+from wlauto.utils.types import boolean
+
+
+class PackageFileGetter(ResourceGetter):
+
+ name = 'package_file'
+ description = """
+ Looks for exactly one file with the specified extension in the owner's directory. If a version
+ is specified on invocation of get, it will filter the discovered file based on that version.
+ Versions are treated as case-insensitive.
+ """
+
+ extension = None
+
+ def register(self):
+ self.resolver.register(self, self.extension, GetterPriority.package)
+
+ def get(self, resource, **kwargs):
+ resource_dir = os.path.dirname(sys.modules[resource.owner.__module__].__file__)
+ version = kwargs.get('version')
+ return get_from_location_by_extension(resource, resource_dir, self.extension, version)
+
+
+class EnvironmentFileGetter(ResourceGetter):
+
+ name = 'environment_file'
+ description = """Looks for exactly one file with the specified extension in the owner's directory. If a version
+ is specified on invocation of get, it will filter the discovered file based on that version.
+ Versions are treated as case-insensitive."""
+
+ extension = None
+
+ def register(self):
+ self.resolver.register(self, self.extension, GetterPriority.environment)
+
+ def get(self, resource, **kwargs):
+ resource_dir = resource.owner.dependencies_directory
+ version = kwargs.get('version')
+ return get_from_location_by_extension(resource, resource_dir, self.extension, version)
+
+
+class ReventGetter(ResourceGetter):
+ """Implements logic for identifying revent files."""
+
+ def get_base_location(self, resource):
+ raise NotImplementedError()
+
+ def register(self):
+ self.resolver.register(self, 'revent', GetterPriority.package)
+
+ def get(self, resource, **kwargs):
+ filename = '.'.join([resource.owner.device.name, resource.stage, 'revent']).lower()
+ location = _d(os.path.join(self.get_base_location(resource), 'revent_files'))
+ for candidate in os.listdir(location):
+ if candidate.lower() == filename.lower():
+ return os.path.join(location, candidate)
+
+
+class PackageApkGetter(PackageFileGetter):
+ name = 'package_apk'
+ extension = 'apk'
+
+
+class PackageJarGetter(PackageFileGetter):
+ name = 'package_jar'
+ extension = 'jar'
+
+
+class PackageReventGetter(ReventGetter):
+
+ name = 'package_revent'
+
+ def get_base_location(self, resource):
+ return _get_owner_path(resource)
+
+
+class EnvironmentApkGetter(EnvironmentFileGetter):
+ name = 'environment_apk'
+ extension = 'apk'
+
+
+class EnvironmentJarGetter(EnvironmentFileGetter):
+ name = 'environment_jar'
+ extension = 'jar'
+
+
+class EnvironmentReventGetter(ReventGetter):
+
+ name = 'enviroment_revent'
+
+ def get_base_location(self, resource):
+ return resource.owner.dependencies_directory
+
+
+class ExecutableGetter(ResourceGetter):
+
+ name = 'exe_getter'
+ resource_type = 'executable'
+ priority = GetterPriority.environment
+
+ def get(self, resource, **kwargs):
+ if settings.binaries_repository:
+ path = os.path.join(settings.binaries_repository, resource.platform, resource.filename)
+ if os.path.isfile(path):
+ return path
+
+
+class PackageExecutableGetter(ExecutableGetter):
+
+ name = 'package_exe_getter'
+ priority = GetterPriority.package
+
+ def get(self, resource, **kwargs):
+ path = os.path.join(_get_owner_path(resource), 'bin', resource.platform, resource.filename)
+ if os.path.isfile(path):
+ return path
+
+
+class EnvironmentExecutableGetter(ExecutableGetter):
+
+ name = 'env_exe_getter'
+
+ def get(self, resource, **kwargs):
+ path = os.path.join(settings.environment_root, 'bin', resource.platform, resource.filename)
+ if os.path.isfile(path):
+ return path
+
+
+class DependencyFileGetter(ResourceGetter):
+
+ name = 'filer'
+ description = """
+ Gets resources from the specified mount point. Copies them the local dependencies
+ directory, and returns the path to the local copy.
+
+ """
+ resource_type = 'file'
+ relative_path = '' # May be overridden by subclasses.
+
+ default_mount_point = '/'
+ priority = GetterPriority.remote
+
+ parameters = [
+ Parameter('mount_point', default='/', global_alias='filer_mount_point',
+ description='Local mount point for the remote filer.'),
+ ]
+
+ def __init__(self, resolver, **kwargs):
+ super(DependencyFileGetter, self).__init__(resolver, **kwargs)
+ self.mount_point = settings.filer_mount_point or self.default_mount_point
+
+ def get(self, resource, **kwargs):
+ force = kwargs.get('force')
+ remote_path = os.path.join(self.mount_point, self.relative_path, resource.path)
+ local_path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path))
+
+ if not os.path.isfile(local_path) or force:
+ if not os.path.isfile(remote_path):
+ return None
+ self.logger.debug('Copying {} to {}'.format(remote_path, local_path))
+ shutil.copy(remote_path, local_path)
+
+ return local_path
+
+
+class PackageCommonDependencyGetter(ResourceGetter):
+
+ name = 'packaged_common_dependency'
+ resource_type = 'file'
+ priority = GetterPriority.package - 1 # check after owner-specific locations
+
+ def get(self, resource, **kwargs):
+ path = os.path.join(settings.package_directory, 'common', resource.path)
+ if os.path.exists(path):
+ return path
+
+
+class EnvironmentCommonDependencyGetter(ResourceGetter):
+
+ name = 'environment_common_dependency'
+ resource_type = 'file'
+ priority = GetterPriority.environment - 1 # check after owner-specific locations
+
+ def get(self, resource, **kwargs):
+ path = os.path.join(settings.dependencies_directory,
+ os.path.basename(resource.path))
+ if os.path.exists(path):
+ return path
+
+
+class PackageDependencyGetter(ResourceGetter):
+
+ name = 'packaged_dependency'
+ resource_type = 'file'
+ priority = GetterPriority.package
+
+ def get(self, resource, **kwargs):
+ owner_path = inspect.getfile(resource.owner.__class__)
+ path = os.path.join(os.path.dirname(owner_path), resource.path)
+ if os.path.exists(path):
+ return path
+
+
+class EnvironmentDependencyGetter(ResourceGetter):
+
+ name = 'environment_dependency'
+ resource_type = 'file'
+ priority = GetterPriority.environment
+
+ def get(self, resource, **kwargs):
+ path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path))
+ if os.path.exists(path):
+ return path
+
+
+class ExtensionAssetGetter(DependencyFileGetter):
+
+ name = 'extension_asset'
+ resource_type = 'extension_asset'
+ relative_path = 'workload_automation/assets'
+
+
+class RemoteFilerGetter(ResourceGetter):
+
+ name = 'filer_assets'
+ description = """
+ Finds resources on a (locally mounted) remote filer and caches them locally.
+
+ This assumes that the filer is mounted on the local machine (e.g. as a samba share).
+
+ """
+ priority = GetterPriority.remote
+ resource_type = ['apk', 'file', 'jar', 'revent']
+
+ parameters = [
+ Parameter('remote_path', global_alias='remote_assets_path', default='',
+ description="""Path, on the local system, where the assets are located."""),
+ Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
+ description="""If ``True``, will always attempt to fetch assets from the remote, even if
+ a local cached copy is available."""),
+ ]
+
+ def get(self, resource, **kwargs):
+ version = kwargs.get('version')
+ if resource.owner:
+ remote_path = os.path.join(self.remote_path, resource.owner.name)
+ local_path = os.path.join(settings.environment_root, resource.owner.dependencies_directory)
+ return self.try_get_resource(resource, version, remote_path, local_path)
+ else:
+ result = None
+ for entry in os.listdir(remote_path):
+ remote_path = os.path.join(self.remote_path, entry)
+ local_path = os.path.join(settings.environment_root, settings.dependencies_directory, entry)
+ result = self.try_get_resource(resource, version, remote_path, local_path)
+ if result:
+ break
+ return result
+
+ def try_get_resource(self, resource, version, remote_path, local_path):
+ if not self.always_fetch:
+ result = self.get_from(resource, version, local_path)
+ if result:
+ return result
+ if remote_path:
+ # Didn't find it cached locally; now check the remoted
+ result = self.get_from(resource, version, remote_path)
+ if not result:
+ return result
+ else: # remote path is not set
+ return None
+ # Found it remotely, cache locally, then return it
+ local_full_path = os.path.join(_d(local_path), os.path.basename(result))
+ self.logger.debug('cp {} {}'.format(result, local_full_path))
+ shutil.copy(result, local_full_path)
+ return local_full_path
+
+ def get_from(self, resource, version, location): # pylint: disable=no-self-use
+ if resource.name in ['apk', 'jar']:
+ return get_from_location_by_extension(resource, location, resource.name, version)
+ elif resource.name == 'file':
+ filepath = os.path.join(location, resource.path)
+ if os.path.exists(filepath):
+ return filepath
+ elif resource.name == 'revent':
+ filename = '.'.join([resource.owner.device.name, resource.stage, 'revent']).lower()
+ alternate_location = os.path.join(location, 'revent_files')
+ # There tends to be some confusion as to where revent files should
+ # be placed. This looks both in the extension's directory, and in
+ # 'revent_files' subdirectory under it, if it exists.
+ if os.path.isdir(alternate_location):
+ for candidate in os.listdir(alternate_location):
+ if candidate.lower() == filename.lower():
+ return os.path.join(alternate_location, candidate)
+ for candidate in os.listdir(location):
+ if candidate.lower() == filename.lower():
+ return os.path.join(location, candidate)
+ else:
+ raise ValueError('Unexpected resource type: {}'.format(resource.name))
+
+
+# Utility functions
+
+def get_from_location_by_extension(resource, location, extension, version=None):
+ found_files = glob.glob(os.path.join(location, '*.{}'.format(extension)))
+ if version:
+ found_files = [ff for ff in found_files if version.lower() in os.path.basename(ff).lower()]
+ if len(found_files) == 1:
+ return found_files[0]
+ elif not found_files:
+ return None
+ else:
+ raise ResourceError('More than one .{} found in {} for {}.'.format(extension,
+ location,
+ resource.owner.name))
+
+
+def _get_owner_path(resource):
+ if resource.owner is NO_ONE:
+ return os.path.join(os.path.dirname(__base_filepath), 'common')
+ else:
+ return os.path.dirname(sys.modules[resource.owner.__module__].__file__)
diff --git a/wlauto/result_processors/__init__.py b/wlauto/result_processors/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/result_processors/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/result_processors/dvfs.py b/wlauto/result_processors/dvfs.py
new file mode 100644
index 00000000..b5a865e0
--- /dev/null
+++ b/wlauto/result_processors/dvfs.py
@@ -0,0 +1,375 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import csv
+import re
+
+from wlauto import ResultProcessor, settings, instrumentation
+from wlauto.exceptions import ConfigError, ResultProcessorError
+
+
+class DVFS(ResultProcessor):
+ name = 'dvfs'
+ description = """
+ Reports DVFS state residency data form ftrace power events.
+
+ This generates a ``dvfs.csv`` in the top-level results directory that,
+ for each workload iteration, reports the percentage of time each CPU core
+ spent in each of the DVFS frequency states (P-states), as well as percentage
+ of the time spent in idle, during the execution of the workload.
+
+ .. note:: ``trace-cmd`` instrument *MUST* be enabled in the instrumentation,
+ and at least ``'power*'`` events must be enabled.
+
+
+ """
+
+ def __init__(self, **kwargs):
+ super(DVFS, self).__init__(**kwargs)
+ self.device = None
+ self.infile = None
+ self.outfile = None
+ self.current_cluster = None
+ self.currentstates_of_clusters = []
+ self.current_frequency_of_clusters = []
+ self.timestamp = []
+ self.state_time_map = {} # hold state at timestamp
+ self.cpuid_time_map = {} # hold cpuid at timestamp
+ self.cpu_freq_time_spent = {}
+ self.cpuids_of_clusters = []
+ self.power_state = [0, 1, 2, 3]
+ self.UNKNOWNSTATE = 4294967295
+ self.multiply_factor = None
+ self.corename_of_clusters = []
+ self.numberofcores_in_cluster = []
+ self.minimum_frequency_cluster = []
+ self.idlestate_description = {}
+
+ def validate(self):
+ if not instrumentation.instrument_is_installed('trace-cmd'):
+ raise ConfigError('"dvfs" works only if "trace_cmd" in enabled in instrumentation')
+
+ def initialize(self, context): # pylint: disable=R0912
+ self.device = context.device
+ if not self.device.core_names:
+ message = 'Device does not specify its core types (core_names/core_clusters not set in device_config).'
+ raise ResultProcessorError(message)
+ number_of_clusters = max(self.device.core_clusters) + 1
+ # In IKS devices, actual number of cores is double
+ # from what we get from device.number_of_cores
+ if self.device.scheduler == 'iks':
+ self.multiply_factor = 2
+ elif self.device.scheduler == 'unknown':
+ # Device doesn't specify its scheduler type. It could be IKS, in
+ # which case reporeted values would be wrong, so error out.
+ message = ('The Device doesn not specify it\'s scheduler type. If you are '
+ 'using a generic device interface, please make sure to set the '
+ '"scheduler" parameter in the device config.')
+ raise ResultProcessorError(message)
+ else:
+ self.multiply_factor = 1
+ # separate out the cores in each cluster
+ # It is list of list of cores in cluster
+ listof_cores_clusters = []
+ for cluster in range(number_of_clusters):
+ listof_cores_clusters.append([core for core in self.device.core_clusters if core == cluster])
+ # Extract minimum frequency of each cluster and
+ # the idle power state with its descriptive name
+ #
+ total_cores = 0
+ current_cores = 0
+ for cluster, cores_list in enumerate(listof_cores_clusters):
+ self.corename_of_clusters.append(self.device.core_names[total_cores])
+ if self.device.scheduler != 'iks':
+ self.idlestate_description.update(self.device.get_cpuidle_states(total_cores))
+ else:
+ self.idlestate_description.update(self.device.get_cpuidle_states())
+ total_cores += len(cores_list)
+ self.numberofcores_in_cluster.append(len(cores_list))
+ for i in range(current_cores, total_cores):
+ if i in self.device.active_cpus:
+ self.minimum_frequency_cluster.append(int(self.device.get_cpu_min_frequency("cpu{}".format(i))))
+ break
+ current_cores = total_cores
+ length_frequency_cluster = len(self.minimum_frequency_cluster)
+ if length_frequency_cluster != number_of_clusters:
+ diff = number_of_clusters - length_frequency_cluster
+ offline_value = -1
+ for i in range(diff):
+ if self.device.scheduler != 'iks':
+ self.minimum_frequency_cluster.append(offline_value)
+ else:
+ self.minimum_frequency_cluster.append(self.device.iks_switch_frequency)
+
+ def process_iteration_result(self, result, context):
+ """
+ Parse the trace.txt for each iteration, calculate DVFS residency state/frequencies
+ and dump the result in csv and flush the data for next iteration.
+ """
+ self.infile = os.path.join(context.output_directory, 'trace.txt')
+ if os.path.isfile(self.infile):
+ self.logger.debug('Running result_processor "dvfs"')
+ self.outfile = os.path.join(settings.output_directory, 'dvfs.csv')
+ self.flush_parse_initialize()
+ self.calculate()
+ self.percentage()
+ self.generate_csv(context)
+ self.logger.debug('Completed result_processor "dvfs"')
+ else:
+ self.logger.debug('trace.txt not found.')
+
+ def flush_parse_initialize(self):
+ """
+ Store state, cpu_id for each timestamp from trace.txt and flush all the values for
+ next iterations.
+ """
+ self.current_cluster = 0
+ self.current_frequency_of_clusters = []
+ self.timestamp = []
+ self.currentstates_of_clusters = []
+ self.state_time_map = {}
+ self.cpuid_time_map = {}
+ self.cpu_freq_time_spent = {}
+ self.cpuids_of_clusters = []
+ self.parse() # Parse trace.txt generated from trace-cmd instrumentation
+ # Initialize the states of each core of clusters and frequency of
+ # each clusters with its minimum freq
+ # cpu_id is assigned for each of clusters.
+ # For IKS devices cpuid remains same in other clusters
+ # and for other it will increment by 1
+ count = 0
+ for cluster, cores_number in enumerate(self.numberofcores_in_cluster):
+ self.currentstates_of_clusters.append([-1 for dummy in range(cores_number)])
+ self.current_frequency_of_clusters.append(self.minimum_frequency_cluster[cluster])
+ if self.device.scheduler == 'iks':
+ self.cpuids_of_clusters.append([j for j in range(cores_number)])
+ else:
+ self.cpuids_of_clusters.append(range(count, count + cores_number))
+ count += cores_number
+
+ # Initialize the time spent in each state/frequency for each core.
+ for i in range(self.device.number_of_cores * self.multiply_factor):
+ self.cpu_freq_time_spent["cpu{}".format(i)] = {}
+ for j in self.unique_freq():
+ self.cpu_freq_time_spent["cpu{}".format(i)][j] = 0
+ # To determine offline -1 state is added
+ offline_value = -1
+ self.cpu_freq_time_spent["cpu{}".format(i)][offline_value] = 0
+ if 0 not in self.unique_freq():
+ self.cpu_freq_time_spent["cpu{}".format(i)][0] = 0
+
+ def update_cluster_freq(self, state, cpu_id):
+ """ Update the cluster frequency and current cluster"""
+ # For IKS devices cluster changes only possible when
+ # freq changes, for other it is determine by cpu_id.
+ if self.device.scheduler != 'iks':
+ self.current_cluster = self.get_cluster(cpu_id, state)
+ if self.get_state_name(state) == "freqstate":
+ self.current_cluster = self.get_cluster(cpu_id, state)
+ self.current_frequency_of_clusters[self.current_cluster] = state
+
+ def get_cluster(self, cpu_id, state):
+ # For IKS if current state is greater than switch
+ # freq then it is in cluster2 else cluster1
+ # For other, Look the current cpu_id and check this id
+ # belong to which cluster.
+ if self.device.scheduler == 'iks':
+ return 1 if state >= self.device.iks_switch_frequency else 0
+ else:
+ for cluster, cpuids_list in enumerate(self.cpuids_of_clusters):
+ if cpu_id in cpuids_list:
+ return cluster
+
+ def get_cluster_freq(self):
+ return self.current_frequency_of_clusters[self.current_cluster]
+
+ def update_state(self, state, cpu_id): # pylint: disable=R0912
+ """
+ Update state of each cores in every cluster.
+ This is done for each timestamp.
+ """
+ POWERDOWN = 2
+ offline_value = -1
+ # if state is in unknowstate, then change state of current cpu_id
+ # with cluster freq of current cluster.
+ # if state is in powerstate then change state with that power state.
+ if self.get_state_name(state) in ["unknownstate", "powerstate"]:
+ for i in range(len(self.cpuids_of_clusters[self.current_cluster])):
+ if cpu_id == self.cpuids_of_clusters[self.current_cluster][i]:
+ if self.get_state_name(state) == "unknownstate":
+ self.currentstates_of_clusters[self.current_cluster][i] = self.current_frequency_of_clusters[self.current_cluster]
+ elif self.get_state_name(state) == "powerstate":
+ self.currentstates_of_clusters[self.current_cluster][i] = state
+ # If state is in freqstate then update the state with current state.
+ # For IKS, if all cores is in power down and current state is freqstate
+ # then update the all the cores in current cluster to current state
+ # and other state cluster changed to Power down.
+ if self.get_state_name(state) == "freqstate":
+ for i, j in enumerate(self.currentstates_of_clusters[self.current_cluster]):
+ if j != offline_value:
+ self.currentstates_of_clusters[self.current_cluster][i] = state
+ if cpu_id == self.cpuids_of_clusters[self.current_cluster][i]:
+ self.currentstates_of_clusters[self.current_cluster][i] = state
+ if self.device.scheduler == 'iks':
+ check = False # All core in cluster is power down
+ for i in range(len(self.currentstates_of_clusters[self.current_cluster])):
+ if self.currentstates_of_clusters[self.current_cluster][i] != POWERDOWN:
+ check = True
+ break
+ if not check:
+ for i in range(len(self.currentstates_of_clusters[self.current_cluster])):
+ self.currentstates_of_clusters[self.current_cluster][i] = self.current_frequency_of_clusters[self.current_cluster]
+ for cluster, state_list in enumerate(self.currentstates_of_clusters):
+ if cluster != self.current_cluster:
+ for j in range(len(state_list)):
+ self.currentstates_of_clusters[i][j] = POWERDOWN
+
+ def unique_freq(self):
+ """ Determine the unique Frequency and state"""
+ unique_freq = []
+ for i in self.timestamp:
+ if self.state_time_map[i] not in unique_freq and self.state_time_map[i] != self.UNKNOWNSTATE:
+ unique_freq.append(self.state_time_map[i])
+ for i in self.minimum_frequency_cluster:
+ if i not in unique_freq:
+ unique_freq.append(i)
+ return unique_freq
+
+ def parse(self):
+ """
+ Parse the trace.txt ::
+
+ store timestamp, state, cpu_id
+ ---------------------------------------------------------------------------------
+ |timestamp| |state| |cpu_id|
+ <idle>-0 [001] 294.554380: cpu_idle: state=4294967295 cpu_id=1
+ <idle>-0 [001] 294.554454: power_start: type=1 state=0 cpu_id=1
+ <idle>-0 [001] 294.554458: cpu_idle: state=0 cpu_id=1
+ <idle>-0 [001] 294.554464: power_end: cpu_id=1
+ <idle>-0 [001] 294.554471: cpu_idle: state=4294967295 cpu_id=1
+ <idle>-0 [001] 294.554590: power_start: type=1 state=0 cpu_id=1
+ <idle>-0 [001] 294.554593: cpu_idle: state=0 cpu_id=1
+ <idle>-0 [001] 294.554636: power_end: cpu_id=1
+ <idle>-0 [001] 294.554639: cpu_idle: state=4294967295 cpu_id=1
+ <idle>-0 [001] 294.554669: power_start: type=1 state=0 cpu_id=1
+
+
+ """
+ pattern = re.compile(r'\s+(?P<time>\S+)\S+\s*(?P<desc>(cpu_idle:|cpu_frequency:))\s*state=(?P<state>\d+)\s*cpu_id=(?P<cpu_id>\d+)')
+ start_trace = False
+ stop_trace = False
+ with open(self.infile, 'r') as f:
+ for line in f:
+ #Start collecting data from label "TRACE_MARKER_START" and
+ #stop with label "TRACE_MARKER_STOP"
+ if line.find("TRACE_MARKER_START") != -1:
+ start_trace = True
+ if line.find("TRACE_MARKER_STOP") != -1:
+ stop_trace = True
+ if start_trace and not stop_trace:
+ match = pattern.search(line)
+ if match:
+ self.timestamp.append(float(match.group('time')))
+ self.state_time_map[float(match.group('time'))] = int(match.group('state'))
+ self.cpuid_time_map[float(match.group('time'))] = int(match.group('cpu_id'))
+
+ def get_state_name(self, state):
+ if state in self.power_state:
+ return "powerstate"
+ elif state == self.UNKNOWNSTATE:
+ return "unknownstate"
+ else:
+ return "freqstate"
+
+ def populate(self, time1, time2):
+ diff = time2 - time1
+ for cluster, states_list in enumerate(self.currentstates_of_clusters):
+ for k, j in enumerate(states_list):
+ if self.device.scheduler == 'iks' and cluster == 1:
+ self.cpu_freq_time_spent["cpu{}".format(self.cpuids_of_clusters[cluster][k] + len(self.currentstates_of_clusters[0]))][j] += diff
+ else:
+ self.cpu_freq_time_spent["cpu{}".format(self.cpuids_of_clusters[cluster][k])][j] += diff
+
+ def calculate(self):
+ for i in range(len(self.timestamp) - 1):
+ self.update_cluster_freq(self.state_time_map[self.timestamp[i]], self.cpuid_time_map[self.timestamp[i]])
+ self.update_state(self.state_time_map[self.timestamp[i]], self.cpuid_time_map[self.timestamp[i]])
+ self.populate(self.timestamp[i], self.timestamp[i + 1])
+
+ def percentage(self):
+ """Normalize the result with total execution time."""
+ temp = self.cpu_freq_time_spent.copy()
+ for i in self.cpu_freq_time_spent:
+ total = 0
+ for j in self.cpu_freq_time_spent[i]:
+ total += self.cpu_freq_time_spent[i][j]
+ for j in self.cpu_freq_time_spent[i]:
+ if total != 0:
+ temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total
+ else:
+ temp[i][j] = 0
+ return temp
+
+ def generate_csv(self, context): # pylint: disable=R0912,R0914
+ """ generate the '''dvfs.csv''' with the state, frequency and cores """
+ temp = self.percentage()
+ total_state = self.unique_freq()
+ offline_value = -1
+ ghz_conversion = 1000000
+ mhz_conversion = 1000
+ with open(self.outfile, 'a+') as f:
+ writer = csv.writer(f, delimiter=',')
+ reader = csv.reader(f)
+ # Create the header in the format below
+ # workload name, iteration, state, A7 CPU0,A7 CPU1,A7 CPU2,A7 CPU3,A15 CPU4,A15 CPU5
+ if sum(1 for row in reader) == 0:
+ header_row = ['workload', 'iteration', 'state']
+ count = 0
+ for cluster, states_list in enumerate(self.currentstates_of_clusters):
+ for dummy_index in range(len(states_list)):
+ header_row.append("{} CPU{}".format(self.corename_of_clusters[cluster], count))
+ count += 1
+ writer.writerow(header_row)
+ if offline_value in total_state:
+ total_state.remove(offline_value) # remove the offline state
+ for i in sorted(total_state):
+ temprow = []
+ temprow.extend([context.result.spec.label, context.result.iteration])
+ if "state{}".format(i) in self.idlestate_description:
+ temprow.append(self.idlestate_description["state{}".format(i)])
+ else:
+ state_value = float(i)
+ if state_value / ghz_conversion >= 1:
+ temprow.append("{} Ghz".format(state_value / ghz_conversion))
+ else:
+ temprow.append("{} Mhz".format(state_value / mhz_conversion))
+ for j in range(self.device.number_of_cores * self.multiply_factor):
+ temprow.append("{0:.3f}".format(temp["cpu{}".format(j)][i]))
+ writer.writerow(temprow)
+ check_off = True # Checking whether core is OFFLINE
+ for i in range(self.device.number_of_cores * self.multiply_factor):
+ temp_val = "{0:.3f}".format(temp["cpu{}".format(i)][offline_value])
+ if float(temp_val) > 1:
+ check_off = False
+ break
+ if check_off is False:
+ temprow = []
+ temprow.extend([context.result.spec.label, context.result.iteration])
+ temprow.append("OFFLINE")
+ for i in range(self.device.number_of_cores * self.multiply_factor):
+ temprow.append("{0:.3f}".format(temp["cpu{}".format(i)][offline_value]))
+ writer.writerow(temprow)
+
diff --git a/wlauto/result_processors/mongodb.py b/wlauto/result_processors/mongodb.py
new file mode 100644
index 00000000..054ccab6
--- /dev/null
+++ b/wlauto/result_processors/mongodb.py
@@ -0,0 +1,235 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+#pylint: disable=E1101,W0201
+import os
+import re
+import string
+import tarfile
+
+try:
+ import pymongo
+ from bson.objectid import ObjectId
+ from gridfs import GridFS
+except ImportError:
+ pymongo = None
+
+from wlauto import ResultProcessor, Parameter, Artifact
+from wlauto.exceptions import ResultProcessorError
+from wlauto.utils.misc import as_relative
+
+
+__bad_chars = '$.'
+KEY_TRANS_TABLE = string.maketrans(__bad_chars, '_' * len(__bad_chars))
+BUNDLE_NAME = 'files.tar.gz'
+
+
+class MongodbUploader(ResultProcessor):
+
+ name = 'mongodb'
+ description = """
+ Uploads run results to a MongoDB instance.
+
+ MongoDB is a popular document-based data store (NoSQL database).
+
+ """
+
+ parameters = [
+ Parameter('uri', kind=str, default=None,
+ description="""Connection URI. If specified, this will be used for connecting
+ to the backend, and host/port parameters will be ignored."""),
+ Parameter('host', kind=str, default='localhost', mandatory=True,
+ description='IP address/name of the machinge hosting the MongoDB server.'),
+ Parameter('port', kind=int, default=27017, mandatory=True,
+ description='Port on which the MongoDB server is listening.'),
+ Parameter('db', kind=str, default='wa', mandatory=True,
+ description='Database on the server used to store WA results.'),
+ Parameter('extra_params', kind=dict, default={},
+ description='''Additional connection parameters may be specfied using this (see
+ pymongo documentation.'''),
+ Parameter('authentication', kind=dict, default={},
+ description='''If specified, this will be passed to db.authenticate() upon connection;
+ please pymongo documentaion authentication examples for detail.'''),
+ ]
+
+ def initialize(self, context):
+ if pymongo is None:
+ raise ResultProcessorError('mongodb result processor requres pymongo package to be installed.')
+ try:
+ self.client = pymongo.MongoClient(self.host, self.port, **self.extra_params)
+ except pymongo.errors.PyMongoError, e:
+ raise ResultProcessorError('Error connecting to mongod: {}'.fromat(e))
+ self.dbc = self.client[self.db]
+ self.fs = GridFS(self.dbc)
+ if self.authentication:
+ if not self.dbc.authenticate(**self.authentication):
+ raise ResultProcessorError('Authentication to database {} failed.'.format(self.db))
+
+ self.run_result_dbid = ObjectId()
+ run_doc = context.run_info.to_dict()
+
+ wa_adapter = run_doc['device']
+ devprops = dict((k.translate(KEY_TRANS_TABLE), v)
+ for k, v in run_doc['device_properties'].iteritems())
+ run_doc['device'] = devprops
+ run_doc['device']['wa_adapter'] = wa_adapter
+ del run_doc['device_properties']
+
+ run_doc['output_directory'] = os.path.abspath(context.output_directory)
+ run_doc['artifacts'] = []
+ run_doc['workloads'] = context.config.to_dict()['workload_specs']
+ for workload in run_doc['workloads']:
+ workload['name'] = workload['workload_name']
+ del workload['workload_name']
+ workload['results'] = []
+ self.run_dbid = self.dbc.runs.insert(run_doc)
+
+ prefix = context.run_info.project if context.run_info.project else '[NOPROJECT]'
+ run_part = context.run_info.run_name or context.run_info.uuid.hex
+ self.gridfs_dir = os.path.join(prefix, run_part)
+ i = 0
+ while self.gridfs_directory_exists(self.gridfs_dir):
+ if self.gridfs_dir.endswith('-{}'.format(i)):
+ self.gridfs_dir = self.gridfs_dir[:-2]
+ i += 1
+ self.gridfs_dir += '-{}'.format(i)
+
+ # Keep track of all generated artefacts, so that we know what to
+ # include in the tarball. The tarball will contains raw artificats
+ # (other kinds would have been uploaded directly or do not contain
+ # new data) and all files in the results dir that have not been marked
+ # as artificats.
+ self.artifacts = []
+
+ def export_iteration_result(self, result, context):
+ r = {}
+ r['iteration'] = context.current_iteration
+ r['status'] = result.status
+ r['events'] = [e.to_dict() for e in result.events]
+ r['metrics'] = []
+ for m in result.metrics:
+ md = m.to_dict()
+ md['is_summary'] = m.name in context.workload.summary_metrics
+ r['metrics'].append(md)
+ iteration_artefacts = [self.upload_artifact(context, a) for a in context.iteration_artifacts]
+ r['artifacts'] = [e for e in iteration_artefacts if e is not None]
+ self.dbc.runs.update({'_id': self.run_dbid, 'workloads.id': context.spec.id},
+ {'$push': {'workloads.$.results': r}})
+
+ def export_run_result(self, result, context):
+ run_artifacts = [self.upload_artifact(context, a) for a in context.run_artifacts]
+ self.logger.debug('Generating results bundle...')
+ bundle = self.generate_bundle(context)
+ if bundle:
+ run_artifacts.append(self.upload_artifact(context, bundle))
+ else:
+ self.logger.debug('No untracked files found.')
+ run_stats = {
+ 'status': result.status,
+ 'events': [e.to_dict() for e in result.events],
+ 'end_time': context.run_info.end_time,
+ 'duration': context.run_info.duration.total_seconds(),
+ 'artifacts': [e for e in run_artifacts if e is not None],
+ }
+ self.dbc.runs.update({'_id': self.run_dbid}, {'$set': run_stats})
+
+ def finalize(self, context):
+ self.client.close()
+
+ def validate(self):
+ if self.uri:
+ has_warned = False
+ if self.host != self.parameters['host'].default:
+ self.logger.warning('both uri and host specified; host will be ignored')
+ has_warned = True
+ if self.port != self.parameters['port'].default:
+ self.logger.warning('both uri and port specified; port will be ignored')
+ has_warned = True
+ if has_warned:
+ self.logger.warning('To supress this warning, please remove either uri or '
+ 'host/port from your config.')
+
+ def upload_artifact(self, context, artifact):
+ artifact_path = os.path.join(context.output_directory, artifact.path)
+ self.artifacts.append((artifact_path, artifact))
+ if not os.path.exists(artifact_path):
+ self.logger.debug('Artifact {} has not been generated'.format(artifact_path))
+ return
+ elif artifact.kind in ['raw', 'export']:
+ self.logger.debug('Ignoring {} artifact {}'.format(artifact.kind, artifact_path))
+ return
+ else:
+ self.logger.debug('Uploading artifact {}'.format(artifact_path))
+ entry = artifact.to_dict()
+ path = entry['path']
+ del entry['path']
+ del entry['name']
+ del entry['level']
+ del entry['mandatory']
+
+ if context.workload is None:
+ entry['filename'] = os.path.join(self.gridfs_dir, as_relative(path))
+ else:
+ entry['filename'] = os.path.join(self.gridfs_dir,
+ '{}-{}-{}'.format(context.spec.id,
+ context.spec.label,
+ context.current_iteration),
+ as_relative(path))
+ with open(artifact_path, 'rb') as fh:
+ fsid = self.fs.put(fh, **entry)
+ entry['gridfs_id'] = fsid
+
+ return entry
+
+ def gridfs_directory_exists(self, path):
+ regex = re.compile('^{}'.format(path))
+ return self.fs.exists({'filename': regex})
+
+ def generate_bundle(self, context): # pylint: disable=R0914
+ """
+ The bundle will contain files generated during the run that have not
+ already been processed. This includes all files for which there isn't an
+ explicit artifact as well as "raw" artifacts that aren't uploaded individually.
+ Basically, this ensures that everything that is not explicilty marked as an
+ "export" (which means it's guarnteed not to contain information not accessible
+ from other artifacts/scores) is avialable in the DB. The bundle is compressed,
+ so it shouldn't take up too much space, however it also means that it's not
+ easy to query for or get individual file (a trade off between space and convinience).
+
+ """
+ to_upload = []
+ artpaths = []
+ outdir = context.output_directory
+ for artpath, artifact in self.artifacts:
+ artpaths.append(os.path.relpath(artpath, outdir))
+ if artifact.kind == 'raw':
+ to_upload.append((artpath, os.path.relpath(artpath, outdir)))
+ for root, _, files in os.walk(outdir):
+ for f in files:
+ path = os.path.relpath(os.path.join(root, f), outdir)
+ if path not in artpaths:
+ to_upload.append((os.path.join(outdir, path), path))
+
+ if not to_upload:
+ # Nothing unexpected/unprocessed has been generated during the run.
+ return None
+ else:
+ archive_path = os.path.join(outdir, BUNDLE_NAME)
+ with tarfile.open(archive_path, 'w:gz') as tf:
+ for fpath, arcpath in to_upload:
+ tf.add(fpath, arcpath)
+ return Artifact('mongo_bundle', BUNDLE_NAME, 'data',
+ description='bundle to be uploaded to mongodb.')
diff --git a/wlauto/result_processors/sqlite.py b/wlauto/result_processors/sqlite.py
new file mode 100644
index 00000000..7a2690dc
--- /dev/null
+++ b/wlauto/result_processors/sqlite.py
@@ -0,0 +1,183 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=attribute-defined-outside-init
+
+import os
+import sqlite3
+import json
+import uuid
+from datetime import datetime, timedelta
+from contextlib import contextmanager
+
+from wlauto import ResultProcessor, settings, Parameter
+from wlauto.exceptions import ResultProcessorError
+from wlauto.utils.types import boolean
+
+
+# IMPORTANT: when updating this schema, make sure to bump the version!
+SCHEMA_VERSION = '0.0.2'
+SCHEMA = [
+ '''CREATE TABLE runs (
+ uuid text,
+ start_time datetime,
+ end_time datetime,
+ duration integer
+ )''',
+ '''CREATE TABLE workload_specs (
+ id text,
+ run_oid text,
+ number_of_iterations integer,
+ label text,
+ workload_name text,
+ boot_parameters text,
+ runtime_parameters text,
+ workload_parameters text
+ )''',
+ '''CREATE TABLE metrics (
+ spec_oid int,
+ iteration integer,
+ metric text,
+ value text,
+ units text,
+ lower_is_better integer
+ )''',
+ '''CREATE VIEW results AS
+ SELECT uuid as run_uuid, spec_id, label as workload, iteration, metric, value, units, lower_is_better
+ FROM metrics AS m INNER JOIN (
+ SELECT ws.OID as spec_oid, ws.id as spec_id, uuid, label
+ FROM workload_specs AS ws INNER JOIN runs AS r ON ws.run_oid = r.OID
+ ) AS wsr ON wsr.spec_oid = m.spec_oid
+ ''',
+ '''CREATE TABLE __meta (
+ schema_version text
+ )''',
+ '''INSERT INTO __meta VALUES ("{}")'''.format(SCHEMA_VERSION),
+]
+
+
+sqlite3.register_adapter(datetime, lambda x: x.isoformat())
+sqlite3.register_adapter(timedelta, lambda x: x.total_seconds())
+sqlite3.register_adapter(uuid.UUID, str)
+
+
+class SqliteResultProcessor(ResultProcessor):
+
+ name = 'sqlite'
+ description = """
+ Stores results in an sqlite database. The following settings may be
+ specified in config.py:
+
+ This may be used accumulate results of multiple runs in a single file.
+
+ """
+
+ name = 'sqlite'
+ parameters = [
+ Parameter('database', default=None,
+ description=""" Full path to the sqlite database to be used. If this is not specified then
+ a new database file will be created in the output directory. This setting can be
+ used to accumulate results from multiple runs in a single database. If the
+ specified file does not exist, it will be created, however the directory of the
+ file must exist.
+
+ .. note:: The value must resolve to an absolute path,
+ relative paths are not allowed; however the
+ value may contain environment variables and/or
+ the home reference ~.
+ """),
+ Parameter('overwrite', kind=boolean, default=False,
+ description="""If ``True``, this will overwrite the database file
+ if it already exists. If ``False`` (the default) data
+ will be added to the existing file (provided schema
+ versions match -- otherwise an error will be raised).
+ """),
+
+ ]
+
+ def initialize(self, context):
+ self._last_spec = None
+ self._run_oid = None
+ self._spec_oid = None
+ if not os.path.exists(self.database):
+ self._initdb()
+ elif self.overwrite: # pylint: disable=no-member
+ os.remove(self.database)
+ self._initdb()
+ else:
+ self._validate_schema_version()
+ self._update_run(context.run_info.uuid)
+
+ def process_iteration_result(self, result, context):
+ if self._last_spec != context.spec:
+ self._update_spec(context.spec)
+ metrics = [(self._spec_oid, context.current_iteration, m.name, str(m.value), m.units, int(m.lower_is_better))
+ for m in result.metrics]
+ with self._open_connecton() as conn:
+ conn.executemany('INSERT INTO metrics VALUES (?,?,?,?,?,?)', metrics)
+
+ def process_run_result(self, result, context):
+ info = context.run_info
+ with self._open_connecton() as conn:
+ conn.execute('''UPDATE runs SET start_time=?, end_time=?, duration=?
+ WHERE OID=?''', (info.start_time, info.end_time, info.duration, self._run_oid))
+
+ def validate(self):
+ if not self.database: # pylint: disable=access-member-before-definition
+ self.database = os.path.join(settings.output_directory, 'results.sqlite')
+ self.database = os.path.expandvars(os.path.expanduser(self.database))
+
+ def _initdb(self):
+ with self._open_connecton() as conn:
+ for command in SCHEMA:
+ conn.execute(command)
+
+ def _validate_schema_version(self):
+ with self._open_connecton() as conn:
+ try:
+ c = conn.execute('SELECT schema_version FROM __meta')
+ found_version = c.fetchone()[0]
+ except sqlite3.OperationalError:
+ message = '{} does not appear to be a valid WA results database.'.format(self.database)
+ raise ResultProcessorError(message)
+ if found_version != SCHEMA_VERSION:
+ message = 'Schema version in {} ({}) does not match current version ({}).'
+ raise ResultProcessorError(message.format(self.database, found_version, SCHEMA_VERSION))
+
+ def _update_run(self, run_uuid):
+ with self._open_connecton() as conn:
+ conn.execute('INSERT INTO runs (uuid) VALUES (?)', (run_uuid,))
+ conn.commit()
+ c = conn.execute('SELECT OID FROM runs WHERE uuid=?', (run_uuid,))
+ self._run_oid = c.fetchone()[0]
+
+ def _update_spec(self, spec):
+ self._last_spec = spec
+ spec_tuple = (spec.id, self._run_oid, spec.number_of_iterations, spec.label, spec.workload_name,
+ json.dumps(spec.boot_parameters), json.dumps(spec.runtime_parameters),
+ json.dumps(spec.workload_parameters))
+ with self._open_connecton() as conn:
+ conn.execute('INSERT INTO workload_specs VALUES (?,?,?,?,?,?,?,?)', spec_tuple)
+ conn.commit()
+ c = conn.execute('SELECT OID FROM workload_specs WHERE run_oid=? AND id=?', (self._run_oid, spec.id))
+ self._spec_oid = c.fetchone()[0]
+
+ @contextmanager
+ def _open_connecton(self):
+ conn = sqlite3.connect(self.database)
+ try:
+ yield conn
+ finally:
+ conn.commit()
diff --git a/wlauto/result_processors/standard.py b/wlauto/result_processors/standard.py
new file mode 100644
index 00000000..f0f5c8cd
--- /dev/null
+++ b/wlauto/result_processors/standard.py
@@ -0,0 +1,124 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=R0201
+"""
+This module contains a few "standard" result processors that write results to
+text files in various formats.
+
+"""
+import os
+import csv
+import json
+
+from wlauto import ResultProcessor, settings
+
+
+class StandardProcessor(ResultProcessor):
+
+ name = 'standard'
+ description = """
+ Creates a ``result.txt`` file for every iteration that contains metrics
+ for that iteration.
+
+ The metrics are written in ::
+
+ metric = value [units]
+
+ format.
+
+ """
+
+ def process_iteration_result(self, result, context):
+ outfile = os.path.join(context.output_directory, 'result.txt')
+ with open(outfile, 'w') as wfh:
+ for metric in result.metrics:
+ line = '{} = {}'.format(metric.name, metric.value)
+ if metric.units:
+ line = ' '.join([line, metric.units])
+ line += '\n'
+ wfh.write(line)
+ context.add_artifact('iteration_result', 'result.txt', 'export')
+
+
+class CsvReportProcessor(ResultProcessor):
+ """
+ Creates a ``results.csv`` in the output directory containing results for
+ all iterations in CSV format, each line containing a single metric.
+
+ """
+
+ name = 'csv'
+
+ def process_run_result(self, result, context):
+ outfile = os.path.join(settings.output_directory, 'results.csv')
+ with open(outfile, 'wb') as wfh:
+ writer = csv.writer(wfh)
+ writer.writerow(['id', 'workload', 'iteration', 'metric', 'value', 'units'])
+ for result in result.iteration_results:
+ for metric in result.metrics:
+ row = [result.id, result.spec.label, result.iteration,
+ metric.name, str(metric.value), metric.units or '']
+ writer.writerow(row)
+ context.add_artifact('run_result_csv', 'results.csv', 'export')
+
+
+class JsonReportProcessor(ResultProcessor):
+ """
+ Creates a ``results.json`` in the output directory containing results for
+ all iterations in JSON format.
+
+ """
+
+ name = 'json'
+
+ def process_run_result(self, result, context):
+ outfile = os.path.join(settings.output_directory, 'results.json')
+ with open(outfile, 'wb') as wfh:
+ output = []
+ for result in result.iteration_results:
+ output.append({
+ 'id': result.id,
+ 'workload': result.workload.name,
+ 'iteration': result.iteration,
+ 'metrics': [dict([(k, v) for k, v in m.__dict__.iteritems()
+ if not k.startswith('_')])
+ for m in result.metrics],
+ })
+ json.dump(output, wfh, indent=4)
+ context.add_artifact('run_result_json', 'results.json', 'export')
+
+
+class SummaryCsvProcessor(ResultProcessor):
+ """
+ Similar to csv result processor, but only contains workloads' summary metrics.
+
+ """
+
+ name = 'summary_csv'
+
+ def process_run_result(self, result, context):
+ outfile = os.path.join(settings.output_directory, 'summary.csv')
+ with open(outfile, 'wb') as wfh:
+ writer = csv.writer(wfh)
+ writer.writerow(['id', 'workload', 'iteration', 'metric', 'value', 'units'])
+ for result in result.iteration_results:
+ for metric in result.metrics:
+ if metric.name in result.workload.summary_metrics:
+ row = [result.id, result.workload.name, result.iteration,
+ metric.name, str(metric.value), metric.units or '']
+ writer.writerow(row)
+ context.add_artifact('run_result_summary', 'summary.csv', 'export')
diff --git a/wlauto/result_processors/status.py b/wlauto/result_processors/status.py
new file mode 100644
index 00000000..37819175
--- /dev/null
+++ b/wlauto/result_processors/status.py
@@ -0,0 +1,51 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=R0201
+import os
+import time
+from collections import Counter
+from wlauto import ResultProcessor
+from wlauto.utils.misc import write_table
+
+
+class StatusTxtReporter(ResultProcessor):
+ name = 'status'
+ description = """
+ Outputs a txt file containing general status information about which runs
+ failed and which were successful
+
+ """
+
+ def process_run_result(self, result, context):
+ counter = Counter()
+ for ir in result.iteration_results:
+ counter[ir.status] += 1
+
+ outfile = os.path.join(context.run_output_directory, 'status.txt')
+ self.logger.info('Status available in {}'.format(outfile))
+ with open(outfile, 'w') as wfh:
+ wfh.write('Run name: {}\n'.format(context.run_info.run_name))
+ wfh.write('Run status: {}\n'.format(context.run_result.status))
+ wfh.write('Date: {}\n'.format(time.strftime("%c")))
+ wfh.write('{}/{} iterations completed without error\n'.format(counter['OK'], len(result.iteration_results)))
+ wfh.write('\n')
+ status_lines = [map(str, [ir.id, ir.spec.label, ir.iteration, ir.status,
+ ir.events and ir.events[0].message.split('\n')[0] or ''])
+ for ir in result.iteration_results]
+ write_table(status_lines, wfh, align='<<>><')
+ context.add_artifact('run_status_summary', 'status.txt', 'export')
+
diff --git a/wlauto/result_processors/syeg.py b/wlauto/result_processors/syeg.py
new file mode 100644
index 00000000..e6e9fdb4
--- /dev/null
+++ b/wlauto/result_processors/syeg.py
@@ -0,0 +1,150 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+#pylint: disable=E1101,W0201
+import os
+import csv
+import math
+import re
+
+from wlauto import ResultProcessor, Parameter, File
+from wlauto.utils.misc import get_meansd
+
+
+class SyegResultProcessor(ResultProcessor):
+
+ name = 'syeg_csv'
+ description = """
+ Generates a CSV results file in the format expected by SYEG toolchain.
+
+ Multiple iterations get parsed into columns, adds additional columns for mean
+ and standard deviation, append number of threads to metric names (where
+ applicable) and add some metadata based on external mapping files.
+
+ """
+
+ parameters = [
+ Parameter('outfile', kind=str, default='syeg_out.csv',
+ description='The name of the output CSV file.'),
+ ]
+
+ def initialize(self, context):
+ self.levelmap = self._read_map(context, 'final_sub.csv',
+ 'Could not find metrics level mapping.')
+ self.typemap = self._read_map(context, 'types.csv',
+ 'Could not find benchmark suite types mapping.')
+
+ def process_run_result(self, result, context):
+ syeg_results = {}
+ max_iterations = max(ir.iteration for ir in result.iteration_results)
+ for ir in result.iteration_results:
+ for metric in ir.metrics:
+ key = ir.spec.label + metric.name
+ if key not in syeg_results:
+ syeg_result = SyegResult(max_iterations)
+ syeg_result.suite = ir.spec.label
+ syeg_result.version = getattr(ir.workload, 'apk_version', None)
+ syeg_result.test = metric.name
+ if hasattr(ir.workload, 'number_of_threads'):
+ syeg_result.test += ' NT {} (Iterations/sec)'.format(ir.workload.number_of_threads)
+ syeg_result.final_sub = self.levelmap.get(metric.name)
+ syeg_result.lower_is_better = metric.lower_is_better
+ syeg_result.device = context.device.name
+ syeg_result.type = self._get_type(ir.workload.name, metric.name)
+ syeg_results[key] = syeg_result
+ syeg_results[key].runs[ir.iteration - 1] = metric.value
+
+ columns = ['device', 'suite', 'test', 'version', 'final_sub', 'best', 'average', 'deviation']
+ columns += ['run{}'.format(i + 1) for i in xrange(max_iterations)]
+ columns += ['type', 'suite_version']
+
+ outfile = os.path.join(context.output_directory, self.outfile)
+ with open(outfile, 'wb') as wfh:
+ writer = csv.writer(wfh)
+ writer.writerow(columns)
+ for syeg_result in syeg_results.values():
+ writer.writerow([getattr(syeg_result, c) for c in columns])
+ context.add_artifact('syeg_csv', outfile, 'export')
+
+ def _get_type(self, workload, metric):
+ metric = metric.lower()
+ type_ = self.typemap.get(workload)
+ if type_ == 'mixed':
+ if 'native' in metric:
+ type_ = 'native'
+ if ('java' in metric) or ('dalvik' in metric):
+ type_ = 'dalvik'
+ return type_
+
+ def _read_map(self, context, filename, errormsg):
+ mapfile = context.resolver.get(File(self, filename))
+ if mapfile:
+ with open(mapfile) as fh:
+ reader = csv.reader(fh)
+ return dict([c.strip() for c in r] for r in reader)
+ else:
+ self.logger.warning(errormsg)
+ return {}
+
+
+class SyegResult(object):
+
+ @property
+ def average(self):
+ if not self._mean:
+ self._mean, self._sd = get_meansd(self.run_values)
+ return self._mean
+
+ @property
+ def deviation(self):
+ if not self._sd:
+ self._mean, self._sd = get_meansd(self.run_values)
+ return self._sd
+
+ @property
+ def run_values(self):
+ return [r for r in self.runs if not math.isnan(r)]
+
+ @property
+ def best(self):
+ if self.lower_is_better:
+ return min(self.run_values)
+ else:
+ return max(self.run_values)
+
+ @property
+ def suite_version(self):
+ return ' '.join(map(str, [self.suite, self.version]))
+
+ def __init__(self, max_iter):
+ self.runs = [float('nan') for _ in xrange(max_iter)]
+ self.device = None
+ self.suite = None
+ self.test = None
+ self.version = None
+ self.final_sub = None
+ self.lower_is_better = None
+ self.type = None
+ self._mean = None
+ self._sd = None
+
+ def __getattr__(self, name):
+ match = re.search(r'run(\d+)', name)
+ if not match:
+ raise AttributeError(name)
+ return self.runs[int(match.group(1)) - 1]
+
+
diff --git a/wlauto/tests/README b/wlauto/tests/README
new file mode 100644
index 00000000..d5168289
--- /dev/null
+++ b/wlauto/tests/README
@@ -0,0 +1,12 @@
+To run these tests you need to have nose package installed. You can get it from PyPI by using pip:
+
+ pip install nose
+
+Or, if you're on Ubuntu, you can get it from distribution repositories:
+
+ sudo apt-get install python-nose
+
+Once you have it, you can the tests by executing the follwing the project's top-level directory (the
+one with setup.py):
+
+ nosetests
diff --git a/wlauto/tests/__init__.py b/wlauto/tests/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/tests/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/tests/data/extensions/devices/test_device.py b/wlauto/tests/data/extensions/devices/test_device.py
new file mode 100644
index 00000000..75eeb1f3
--- /dev/null
+++ b/wlauto/tests/data/extensions/devices/test_device.py
@@ -0,0 +1,49 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import Device
+
+
+class TestDevice(Device):
+
+ name = 'test-device'
+
+ def __init__(self, *args, **kwargs):
+ self.modules = []
+ self.boot_called = 0
+ self.push_file_called = 0
+ self.pull_file_called = 0
+ self.execute_called = 0
+ self.set_sysfile_int_called = 0
+ self.close_called = 0
+
+ def boot(self):
+ self.boot_called += 1
+
+ def push_file(self, source, dest):
+ self.push_file_called += 1
+
+ def pull_file(self, source, dest):
+ self.pull_file_called += 1
+
+ def execute(self, command):
+ self.execute_called += 1
+
+ def set_sysfile_int(self, file, value):
+ self.set_sysfile_int_called += 1
+
+ def close(self, command):
+ self.close_called += 1
diff --git a/wlauto/tests/data/interrupts/after b/wlauto/tests/data/interrupts/after
new file mode 100755
index 00000000..93145098
--- /dev/null
+++ b/wlauto/tests/data/interrupts/after
@@ -0,0 +1,98 @@
+ CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
+ 65: 0 0 0 0 0 0 0 0 GIC dma-pl330.2
+ 66: 0 0 0 0 0 0 0 0 GIC dma-pl330.0
+ 67: 0 0 0 0 0 0 0 0 GIC dma-pl330.1
+ 74: 0 0 0 0 0 0 0 0 GIC s3c2410-wdt
+ 85: 2 0 0 0 0 0 0 0 GIC exynos4210-uart
+ 89: 368 0 0 0 0 0 0 0 GIC s3c2440-i2c.1
+ 90: 0 0 0 0 0 0 0 0 GIC s3c2440-i2c.2
+ 92: 1294 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.0
+ 95: 831 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.3
+103: 1 0 0 0 0 0 0 0 GIC ehci_hcd:usb1, ohci_hcd:usb2
+104: 7304 0 0 0 0 0 0 0 GIC xhci_hcd:usb3, exynos-ss-udc.0
+105: 0 0 0 0 0 0 0 0 GIC xhci_hcd:usb5
+106: 0 0 0 0 0 0 0 0 GIC mali.0
+107: 16429 0 0 0 0 0 0 0 GIC dw-mci
+108: 1 0 0 0 0 0 0 0 GIC dw-mci
+109: 0 0 0 0 0 0 0 0 GIC dw-mci
+114: 28074 0 0 0 0 0 0 0 GIC mipi-dsi
+117: 0 0 0 0 0 0 0 0 GIC exynos-gsc
+118: 0 0 0 0 0 0 0 0 GIC exynos-gsc
+121: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx
+123: 7 0 0 0 0 0 0 0 GIC s5p-fimg2d
+126: 0 0 0 0 0 0 0 0 GIC s5p-mixer
+127: 0 0 0 0 0 0 0 0 GIC hdmi-int
+128: 0 0 0 0 0 0 0 0 GIC s5p-mfc-v6
+142: 0 0 0 0 0 0 0 0 GIC dma-pl330.3
+146: 0 0 0 0 0 0 0 0 GIC s5p-tvout-cec
+149: 1035 0 0 0 0 0 0 0 GIC mali.0
+152: 26439 0 0 0 0 0 0 0 GIC mct_tick0
+153: 0 2891 0 0 0 0 0 0 GIC mct_tick1
+154: 0 0 3969 0 0 0 0 0 GIC mct_tick2
+155: 0 0 0 2385 0 0 0 0 GIC mct_tick3
+160: 0 0 0 0 8038 0 0 0 GIC mct_tick4
+161: 0 0 0 0 0 8474 0 0 GIC mct_tick5
+162: 0 0 0 0 0 0 7842 0 GIC mct_tick6
+163: 0 0 0 0 0 0 0 7827 GIC mct_tick7
+200: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx
+201: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.29
+218: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.25
+220: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.27
+224: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.19
+251: 320 0 0 0 0 0 0 0 GIC mali.0
+252: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+253: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+254: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+272: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.5
+274: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.6
+280: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.11
+282: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.30
+284: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.12
+286: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.17
+288: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.4
+290: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.20
+294: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+296: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+298: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+300: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+302: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+306: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0
+316: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.2
+325: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0
+332: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+340: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+342: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+344: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+405: 327 0 0 0 0 0 0 0 combiner s3c_fb
+409: 0 0 0 0 0 0 0 0 combiner mcuctl
+414: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.28
+434: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.22
+436: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.23
+438: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.26
+443: 12 0 0 0 0 0 0 0 combiner mct_comp_irq
+446: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.21
+449: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.13
+453: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.15
+474: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.24
+512: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_POWER
+518: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus
+524: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_HOMEPAGE
+526: 1 0 0 0 0 0 0 0 exynos-eint HOST_DETECT
+527: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_id
+531: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus
+532: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_id
+537: 3 0 0 0 0 0 0 0 exynos-eint mxt540e_ts
+538: 0 0 0 0 0 0 0 0 exynos-eint sec-pmic-irq
+543: 1 0 0 0 0 0 0 0 exynos-eint hdmi-ext
+544: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEDOWN
+545: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEUP
+546: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_MENU
+547: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_BACK
+655: 0 0 0 0 0 0 0 0 sec-pmic rtc-alarm0
+IPI0: 0 0 0 0 0 0 0 0 Timer broadcast interrupts
+IPI1: 8823 7185 4642 5652 2370 2069 1452 1351 Rescheduling interrupts
+IPI2: 4 7 8 6 8 7 8 8 Function call interrupts
+IPI3: 1 0 0 0 0 0 0 0 Single function call interrupts
+IPI4: 0 0 0 0 0 0 0 0 CPU stop interrupts
+IPI5: 0 0 0 0 0 0 0 0 CPU backtrace
+Err: 0
diff --git a/wlauto/tests/data/interrupts/before b/wlauto/tests/data/interrupts/before
new file mode 100755
index 00000000..a332b8e9
--- /dev/null
+++ b/wlauto/tests/data/interrupts/before
@@ -0,0 +1,97 @@
+ CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
+ 65: 0 0 0 0 0 0 0 0 GIC dma-pl330.2
+ 66: 0 0 0 0 0 0 0 0 GIC dma-pl330.0
+ 67: 0 0 0 0 0 0 0 0 GIC dma-pl330.1
+ 74: 0 0 0 0 0 0 0 0 GIC s3c2410-wdt
+ 85: 2 0 0 0 0 0 0 0 GIC exynos4210-uart
+ 89: 368 0 0 0 0 0 0 0 GIC s3c2440-i2c.1
+ 90: 0 0 0 0 0 0 0 0 GIC s3c2440-i2c.2
+ 92: 1204 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.0
+ 95: 831 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.3
+103: 1 0 0 0 0 0 0 0 GIC ehci_hcd:usb1, ohci_hcd:usb2
+104: 7199 0 0 0 0 0 0 0 GIC xhci_hcd:usb3, exynos-ss-udc.0
+105: 0 0 0 0 0 0 0 0 GIC xhci_hcd:usb5
+106: 0 0 0 0 0 0 0 0 GIC mali.0
+107: 16429 0 0 0 0 0 0 0 GIC dw-mci
+108: 1 0 0 0 0 0 0 0 GIC dw-mci
+109: 0 0 0 0 0 0 0 0 GIC dw-mci
+114: 26209 0 0 0 0 0 0 0 GIC mipi-dsi
+117: 0 0 0 0 0 0 0 0 GIC exynos-gsc
+118: 0 0 0 0 0 0 0 0 GIC exynos-gsc
+121: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx
+123: 7 0 0 0 0 0 0 0 GIC s5p-fimg2d
+126: 0 0 0 0 0 0 0 0 GIC s5p-mixer
+127: 0 0 0 0 0 0 0 0 GIC hdmi-int
+128: 0 0 0 0 0 0 0 0 GIC s5p-mfc-v6
+142: 0 0 0 0 0 0 0 0 GIC dma-pl330.3
+146: 0 0 0 0 0 0 0 0 GIC s5p-tvout-cec
+149: 1004 0 0 0 0 0 0 0 GIC mali.0
+152: 26235 0 0 0 0 0 0 0 GIC mct_tick0
+153: 0 2579 0 0 0 0 0 0 GIC mct_tick1
+154: 0 0 3726 0 0 0 0 0 GIC mct_tick2
+155: 0 0 0 2262 0 0 0 0 GIC mct_tick3
+161: 0 0 0 0 0 2554 0 0 GIC mct_tick5
+162: 0 0 0 0 0 0 1911 0 GIC mct_tick6
+163: 0 0 0 0 0 0 0 1928 GIC mct_tick7
+200: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx
+201: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.29
+218: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.25
+220: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.27
+224: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.19
+251: 312 0 0 0 0 0 0 0 GIC mali.0
+252: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+253: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+254: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+272: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.5
+274: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.6
+280: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.11
+282: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.30
+284: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.12
+286: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.17
+288: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.4
+290: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.20
+294: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+296: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+298: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+300: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+302: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+306: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0
+316: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.2
+325: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0
+332: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+340: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+342: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+344: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+405: 322 0 0 0 0 0 0 0 combiner s3c_fb
+409: 0 0 0 0 0 0 0 0 combiner mcuctl
+414: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.28
+434: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.22
+436: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.23
+438: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.26
+443: 12 0 0 0 0 0 0 0 combiner mct_comp_irq
+446: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.21
+449: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.13
+453: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.15
+474: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.24
+512: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_POWER
+518: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus
+524: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_HOMEPAGE
+526: 1 0 0 0 0 0 0 0 exynos-eint HOST_DETECT
+527: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_id
+531: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus
+532: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_id
+537: 3 0 0 0 0 0 0 0 exynos-eint mxt540e_ts
+538: 0 0 0 0 0 0 0 0 exynos-eint sec-pmic-irq
+543: 1 0 0 0 0 0 0 0 exynos-eint hdmi-ext
+544: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEDOWN
+545: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEUP
+546: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_MENU
+547: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_BACK
+655: 0 0 0 0 0 0 0 0 sec-pmic rtc-alarm0
+IPI0: 0 0 0 0 0 0 0 0 Timer broadcast interrupts
+IPI1: 8751 7147 4615 5623 2334 2066 1449 1348 Rescheduling interrupts
+IPI2: 3 6 7 6 7 6 7 7 Function call interrupts
+IPI3: 1 0 0 0 0 0 0 0 Single function call interrupts
+IPI4: 0 0 0 0 0 0 0 0 CPU stop interrupts
+IPI5: 0 0 0 0 0 0 0 0 CPU backtrace
+Err: 0
diff --git a/wlauto/tests/data/interrupts/result b/wlauto/tests/data/interrupts/result
new file mode 100755
index 00000000..b9ec2dd1
--- /dev/null
+++ b/wlauto/tests/data/interrupts/result
@@ -0,0 +1,98 @@
+ CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
+ 65: 0 0 0 0 0 0 0 0 GIC dma-pl330.2
+ 66: 0 0 0 0 0 0 0 0 GIC dma-pl330.0
+ 67: 0 0 0 0 0 0 0 0 GIC dma-pl330.1
+ 74: 0 0 0 0 0 0 0 0 GIC s3c2410-wdt
+ 85: 0 0 0 0 0 0 0 0 GIC exynos4210-uart
+ 89: 0 0 0 0 0 0 0 0 GIC s3c2440-i2c.1
+ 90: 0 0 0 0 0 0 0 0 GIC s3c2440-i2c.2
+ 92: 90 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.0
+ 95: 0 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.3
+ 103: 0 0 0 0 0 0 0 0 GIC ehci_hcd:usb1, ohci_hcd:usb2
+ 104: 105 0 0 0 0 0 0 0 GIC xhci_hcd:usb3, exynos-ss-udc.0
+ 105: 0 0 0 0 0 0 0 0 GIC xhci_hcd:usb5
+ 106: 0 0 0 0 0 0 0 0 GIC mali.0
+ 107: 0 0 0 0 0 0 0 0 GIC dw-mci
+ 108: 0 0 0 0 0 0 0 0 GIC dw-mci
+ 109: 0 0 0 0 0 0 0 0 GIC dw-mci
+ 114: 1865 0 0 0 0 0 0 0 GIC mipi-dsi
+ 117: 0 0 0 0 0 0 0 0 GIC exynos-gsc
+ 118: 0 0 0 0 0 0 0 0 GIC exynos-gsc
+ 121: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx
+ 123: 0 0 0 0 0 0 0 0 GIC s5p-fimg2d
+ 126: 0 0 0 0 0 0 0 0 GIC s5p-mixer
+ 127: 0 0 0 0 0 0 0 0 GIC hdmi-int
+ 128: 0 0 0 0 0 0 0 0 GIC s5p-mfc-v6
+ 142: 0 0 0 0 0 0 0 0 GIC dma-pl330.3
+ 146: 0 0 0 0 0 0 0 0 GIC s5p-tvout-cec
+ 149: 31 0 0 0 0 0 0 0 GIC mali.0
+ 152: 204 0 0 0 0 0 0 0 GIC mct_tick0
+ 153: 0 312 0 0 0 0 0 0 GIC mct_tick1
+ 154: 0 0 243 0 0 0 0 0 GIC mct_tick2
+ 155: 0 0 0 123 0 0 0 0 GIC mct_tick3
+> 160: 0 0 0 0 8038 0 0 0 GIC mct_tick4
+ 161: 0 0 0 0 0 5920 0 0 GIC mct_tick5
+ 162: 0 0 0 0 0 0 5931 0 GIC mct_tick6
+ 163: 0 0 0 0 0 0 0 5899 GIC mct_tick7
+ 200: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx
+ 201: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.29
+ 218: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.25
+ 220: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.27
+ 224: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.19
+ 251: 8 0 0 0 0 0 0 0 GIC mali.0
+ 252: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+ 253: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+ 254: 0 0 0 0 0 0 0 0 GIC exynos5-scaler
+ 272: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.5
+ 274: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.6
+ 280: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.11
+ 282: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.30
+ 284: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.12
+ 286: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.17
+ 288: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.4
+ 290: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.20
+ 294: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+ 296: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+ 298: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+ 300: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+ 302: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+ 306: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0
+ 316: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.2
+ 325: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0
+ 332: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+ 340: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+ 342: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9
+ 344: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16
+ 405: 5 0 0 0 0 0 0 0 combiner s3c_fb
+ 409: 0 0 0 0 0 0 0 0 combiner mcuctl
+ 414: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.28
+ 434: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.22
+ 436: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.23
+ 438: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.26
+ 443: 0 0 0 0 0 0 0 0 combiner mct_comp_irq
+ 446: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.21
+ 449: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.13
+ 453: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.15
+ 474: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.24
+ 512: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_POWER
+ 518: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus
+ 524: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_HOMEPAGE
+ 526: 0 0 0 0 0 0 0 0 exynos-eint HOST_DETECT
+ 527: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_id
+ 531: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus
+ 532: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_id
+ 537: 0 0 0 0 0 0 0 0 exynos-eint mxt540e_ts
+ 538: 0 0 0 0 0 0 0 0 exynos-eint sec-pmic-irq
+ 543: 0 0 0 0 0 0 0 0 exynos-eint hdmi-ext
+ 544: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEDOWN
+ 545: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEUP
+ 546: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_MENU
+ 547: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_BACK
+ 655: 0 0 0 0 0 0 0 0 sec-pmic rtc-alarm0
+ IPI0: 0 0 0 0 0 0 0 0 Timer broadcast interrupts
+ IPI1: 72 38 27 29 36 3 3 3 Rescheduling interrupts
+ IPI2: 1 1 1 0 1 1 1 1 Function call interrupts
+ IPI3: 0 0 0 0 0 0 0 0 Single function call interrupts
+ IPI4: 0 0 0 0 0 0 0 0 CPU stop interrupts
+ IPI5: 0 0 0 0 0 0 0 0 CPU backtrace
+ Err: 0
diff --git a/wlauto/tests/data/logcat.2.log b/wlauto/tests/data/logcat.2.log
new file mode 100644
index 00000000..eafed2b8
--- /dev/null
+++ b/wlauto/tests/data/logcat.2.log
@@ -0,0 +1,14 @@
+--------- beginning of /dev/log/main
+D/TextView( 2468): 7:07
+D/TextView( 2468): 7:07
+D/TextView( 2468): Thu, June 27
+--------- beginning of /dev/log/system
+D/TextView( 3099): CaffeineMark results
+D/TextView( 3099): Overall score:
+D/TextView( 3099): Rating
+D/TextView( 3099): Rank
+D/TextView( 3099): 0
+D/TextView( 3099): Details
+D/TextView( 3099): Publish
+D/TextView( 3099): Top 10
+D/TextView( 3099): 3672
diff --git a/wlauto/tests/data/logcat.log b/wlauto/tests/data/logcat.log
new file mode 100644
index 00000000..48703402
--- /dev/null
+++ b/wlauto/tests/data/logcat.log
@@ -0,0 +1,10 @@
+--------- beginning of /dev/log/main
+--------- beginning of /dev/log/system
+D/TextView( 2462): 5:05
+D/TextView( 2462): 5:05
+D/TextView( 2462): Mon, June 24
+D/TextView( 3072): Stop Test
+D/TextView( 3072): Testing CPU and memory…
+D/TextView( 3072): 0%
+D/TextView( 3072): Testing CPU and memory…
+
diff --git a/wlauto/tests/data/test-agenda.yaml b/wlauto/tests/data/test-agenda.yaml
new file mode 100644
index 00000000..85163a40
--- /dev/null
+++ b/wlauto/tests/data/test-agenda.yaml
@@ -0,0 +1,25 @@
+global:
+ iterations: 8
+ boot_parameters:
+ os_mode: mp_a15_bootcluster
+ runtime_parameters:
+ a7_governor: Interactive
+ a15_governor: Interactive2
+ a7_cores: 3
+ a15_cores: 2
+workloads:
+ - id: 1c
+ workload_name: bbench_with_audio
+ - id: 1d
+ workload_name: Bbench_with_audio
+ runtime_parameters:
+ os_mode: mp_a7_only
+ a7_cores: 0
+ iterations: 4
+ - id: 1e
+ workload_name: audio
+ - id: 1f
+ workload_name: antutu
+ runtime_parameters:
+ a7_cores: 1
+ a15_cores: 1
diff --git a/wlauto/tests/data/test-config.py b/wlauto/tests/data/test-config.py
new file mode 100644
index 00000000..56c3288b
--- /dev/null
+++ b/wlauto/tests/data/test-config.py
@@ -0,0 +1,17 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+device = 'TEST'
diff --git a/wlauto/tests/test_agenda.py b/wlauto/tests/test_agenda.py
new file mode 100644
index 00000000..feadee87
--- /dev/null
+++ b/wlauto/tests/test_agenda.py
@@ -0,0 +1,195 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E0611
+# pylint: disable=R0201
+import os
+from StringIO import StringIO
+from unittest import TestCase
+
+from nose.tools import assert_equal, assert_in, raises
+
+from wlauto.core.agenda import Agenda
+from wlauto.exceptions import ConfigError
+
+
+YAML_TEST_FILE = os.path.join(os.path.dirname(__file__), 'data', 'test-agenda.yaml')
+
+invalid_agenda_text = """
+workloads:
+ - id: 1
+ workload_parameters:
+ test: 1
+"""
+invalid_agenda = StringIO(invalid_agenda_text)
+invalid_agenda.name = 'invalid1'
+
+duplicate_agenda_text = """
+global:
+ iterations: 1
+workloads:
+ - id: 1
+ workload_name: antutu
+ workload_parameters:
+ test: 1
+ - id: 1
+ workload_name: andebench
+"""
+duplicate_agenda = StringIO(duplicate_agenda_text)
+duplicate_agenda.name = 'invalid2'
+
+short_agenda_text = """
+workloads: [antutu, linpack, andebench]
+"""
+short_agenda = StringIO(short_agenda_text)
+short_agenda.name = 'short'
+
+default_ids_agenda_text = """
+workloads:
+ - antutu
+ - id: 1
+ name: linpack
+ - id: test
+ name: andebench
+ params:
+ number_of_threads: 1
+ - vellamo
+"""
+default_ids_agenda = StringIO(default_ids_agenda_text)
+default_ids_agenda.name = 'default_ids'
+
+sectioned_agenda_text = """
+sections:
+ - id: sec1
+ runtime_params:
+ dp: one
+ workloads:
+ - antutu
+ - andebench
+ - name: linpack
+ runtime_params:
+ dp: two
+ - id: sec2
+ runtime_params:
+ dp: three
+ workloads:
+ - antutu
+workloads:
+ - nenamark
+"""
+sectioned_agenda = StringIO(sectioned_agenda_text)
+sectioned_agenda.name = 'sectioned'
+
+dup_sectioned_agenda_text = """
+sections:
+ - id: sec1
+ workloads:
+ - antutu
+ - id: sec1
+ workloads:
+ - andebench
+workloads:
+ - nenamark
+"""
+dup_sectioned_agenda = StringIO(dup_sectioned_agenda_text)
+dup_sectioned_agenda.name = 'dup-sectioned'
+
+caps_agenda_text = """
+config:
+ device: TC2
+global:
+ runtime_parameters:
+ sysfile_values:
+ /sys/test/MyFile: 1
+ /sys/test/other file: 2
+workloads:
+ - id: 1
+ name: linpack
+"""
+caps_agenda = StringIO(caps_agenda_text)
+caps_agenda.name = 'caps'
+
+bad_syntax_agenda_text = """
+config:
+ # tab on the following line
+ reboot_policy: never
+workloads:
+ - antutu
+"""
+bad_syntax_agenda = StringIO(bad_syntax_agenda_text)
+bad_syntax_agenda.name = 'bad_syntax'
+
+section_ids_test_text = """
+config:
+ device: TC2
+ reboot_policy: never
+workloads:
+ - name: bbench
+ id: bbench
+ - name: audio
+sections:
+ - id: foo
+ - id: bar
+"""
+section_ids_agenda = StringIO(section_ids_test_text)
+section_ids_agenda.name = 'section_ids'
+
+
+class AgendaTest(TestCase):
+
+ def test_yaml_load(self):
+ agenda = Agenda(YAML_TEST_FILE)
+ assert_equal(len(agenda.workloads), 4)
+
+ def test_duplicate_id(self):
+ try:
+ Agenda(duplicate_agenda)
+ except ConfigError, e:
+ assert_in('duplicate', e.message.lower()) # pylint: disable=E1101
+ else:
+ raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
+
+ def test_yaml_missing_field(self):
+ try:
+ Agenda(invalid_agenda_text)
+ except ConfigError, e:
+ assert_in('workload name', e.message)
+ else:
+ raise Exception('ConfigError was not raised for an invalid agenda.')
+
+ def test_defaults(self):
+ agenda = Agenda(short_agenda)
+ assert_equal(len(agenda.workloads), 3)
+ assert_equal(agenda.workloads[0].workload_name, 'antutu')
+ assert_equal(agenda.workloads[0].id, '1')
+
+ def test_default_id_assignment(self):
+ agenda = Agenda(default_ids_agenda)
+ assert_equal(agenda.workloads[0].id, '2')
+ assert_equal(agenda.workloads[3].id, '3')
+
+ def test_sections(self):
+ agenda = Agenda(sectioned_agenda)
+ assert_equal(agenda.sections[0].workloads[0].workload_name, 'antutu')
+ assert_equal(agenda.sections[1].runtime_parameters['dp'], 'three')
+
+ @raises(ConfigError)
+ def test_dup_sections(self):
+ Agenda(dup_sectioned_agenda)
+
+ @raises(ConfigError)
+ def test_bad_syntax(self):
+ Agenda(bad_syntax_agenda)
diff --git a/wlauto/tests/test_config.py b/wlauto/tests/test_config.py
new file mode 100644
index 00000000..a7e9a5d2
--- /dev/null
+++ b/wlauto/tests/test_config.py
@@ -0,0 +1,151 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E0611,R0201
+import os
+import tempfile
+from unittest import TestCase
+
+from nose.tools import assert_equal, assert_in, raises
+
+from wlauto.core.bootstrap import ConfigLoader
+from wlauto.core.agenda import AgendaWorkloadEntry, AgendaGlobalEntry
+from wlauto.core.configuration import RunConfiguration
+from wlauto.exceptions import ConfigError
+
+
+DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
+
+BAD_CONFIG_TEXT = """device = 'TEST
+device_config = 'TEST-CONFIG'"""
+
+
+class MockExtensionLoader(object):
+
+ def __init__(self):
+ self.aliases = {}
+ self.global_param_aliases = {}
+ self.extensions = {}
+
+ def get_extension_class(self, name, kind=None): # pylint: disable=unused-argument
+ if name == 'defaults_workload':
+ return DefaultsWorkload()
+ else:
+ return NamedMock(name)
+
+ def resolve_alias(self, name):
+ return name, {}
+
+ def get_default_config(self, name): # pylint: disable=unused-argument
+ return {}
+
+ def has_extension(self, name):
+ return name in self.aliases or name in self.extensions
+
+
+class MockAgenda(object):
+
+ def __init__(self, *args):
+ self.config = {}
+ self.global_ = AgendaGlobalEntry()
+ self.sections = []
+ self.workloads = args
+
+
+class NamedMock(object):
+
+ def __init__(self, name):
+ self.__attrs = {
+ 'global_alias': None
+ }
+ self.name = name
+ self.parameters = []
+
+ def __getattr__(self, name):
+ if not name in self.__attrs:
+ self.__attrs[name] = NamedMock(name)
+ return self.__attrs[name]
+
+
+class DefaultsWorkload(object):
+
+ def __init__(self):
+ self.name = 'defaults_workload'
+ self.parameters = [NamedMock('param')]
+ self.parameters[0].default = [1, 2]
+
+
+class ConfigLoaderTest(TestCase):
+
+ def setUp(self):
+ self.filepath = tempfile.mktemp()
+ with open(self.filepath, 'w') as wfh:
+ wfh.write(BAD_CONFIG_TEXT)
+
+ def test_load(self):
+ test_cfg_file = os.path.join(DATA_DIR, 'test-config.py')
+ config = ConfigLoader()
+ config.update(test_cfg_file)
+ assert_equal(config.device, 'TEST')
+
+ @raises(ConfigError)
+ def test_load_bad(self):
+ config_loader = ConfigLoader()
+ config_loader.update(self.filepath)
+
+ def test_load_duplicate(self):
+ config_loader = ConfigLoader()
+ config_loader.update(dict(instrumentation=['test']))
+ config_loader.update(dict(instrumentation=['test']))
+ assert_equal(config_loader.instrumentation, ['test'])
+
+ def tearDown(self):
+ os.unlink(self.filepath)
+
+
+class ConfigTest(TestCase):
+
+ def setUp(self):
+ self.config = RunConfiguration(MockExtensionLoader())
+ self.config.load_config({'device': 'MockDevice'})
+
+ def test_case(self):
+ devparams = {
+ 'sysfile_values': {
+ '/sys/test/MyFile': 1,
+ '/sys/test/other file': 2,
+ }
+ }
+ ws = AgendaWorkloadEntry(id='a', iterations=1, name='linpack', runtime_parameters=devparams)
+ self.config.set_agenda(MockAgenda(ws))
+ spec = self.config.workload_specs[0]
+ assert_in('/sys/test/MyFile', spec.runtime_parameters['sysfile_values'])
+ assert_in('/sys/test/other file', spec.runtime_parameters['sysfile_values'])
+
+ def test_list_defaults_params(self):
+ ws = AgendaWorkloadEntry(id='a', iterations=1,
+ name='defaults_workload', workload_parameters={'param':[3]})
+ self.config.set_agenda(MockAgenda(ws))
+ spec = self.config.workload_specs[0]
+ assert_equal(spec.workload_parameters, {'param': [3]})
+
+ def test_global_instrumentation(self):
+ self.config.load_config({'instrumentation': ['global_instrument']})
+ ws = AgendaWorkloadEntry(id='a', iterations=1, name='linpack', instrumentation=['local_instrument'])
+ self.config.set_agenda(MockAgenda(ws))
+ self.config.finalize()
+ assert_equal(self.config.workload_specs[0].instrumentation,
+ ['local_instrument', 'global_instrument'])
diff --git a/wlauto/tests/test_device.py b/wlauto/tests/test_device.py
new file mode 100644
index 00000000..7bd1d69b
--- /dev/null
+++ b/wlauto/tests/test_device.py
@@ -0,0 +1,99 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=abstract-method,no-self-use,no-name-in-module
+from collections import defaultdict, OrderedDict
+from unittest import TestCase
+
+from nose.tools import raises, assert_equal
+
+from wlauto import Device, Parameter, RuntimeParameter, CoreParameter
+from wlauto.exceptions import ConfigError
+
+
+class TestDevice(Device):
+
+ name = 'test-device'
+ path_module = 'posixpath'
+
+ parameters = [
+ Parameter('core_names', default=['a7', 'a7', 'a15'], override=True),
+ Parameter('core_clusters', default=[0, 0, 1], override=True),
+ ]
+
+ runtime_parameters = [
+ RuntimeParameter('test_param', 'getter', 'setter'),
+ RuntimeParameter('test_param2', 'getter', 'setter'),
+ CoreParameter('${core}_param', 'core_getter', 'core_setter'),
+ ]
+
+ def __init__(self, *args, **kwargs):
+ super(TestDevice, self).__init__(*args, **kwargs)
+ self.value = None
+ self.core_values = defaultdict()
+
+ def getter(self):
+ return self.value
+
+ def setter(self, value):
+ if self.value is None:
+ self.value = value
+
+ def core_getter(self, core):
+ return self.core_values.get(core)
+
+ def core_setter(self, core, value):
+ self.core_values[core] = value
+
+
+class RuntimeParametersTest(TestCase):
+
+ def test_runtime_param(self):
+ device = _instantiate(TestDevice)
+ device.set_runtime_parameters(dict(test_param=5))
+ assert_equal(device.value, 5)
+ assert_equal(device.get_runtime_parameters().get('test_param'), 5)
+
+ def test_core_param(self):
+ device = _instantiate(TestDevice)
+ device.set_runtime_parameters(dict(a15_param=1, a7_param=2))
+ assert_equal(device.core_values, {'a15': 1, 'a7': 2})
+ assert_equal(device.get_runtime_parameters().get('a15_param'), 1)
+ assert_equal(device.get_runtime_parameters().get('a7_param'), 2)
+
+ @raises(ConfigError)
+ def test_bad_runtime_param(self):
+ device = _instantiate(TestDevice)
+ device.set_runtime_parameters(dict(bad_param=1))
+
+ def test_get_unset_runtime_params(self):
+ device = _instantiate(TestDevice)
+ expected = {'test_param': None, 'test_param2': None, 'a15_param': None, 'a7_param': None}
+ assert_equal(device.get_runtime_parameters(), expected)
+
+ def test_param_set_order(self):
+ device = _instantiate(TestDevice)
+ device.set_runtime_parameters(OrderedDict([('test_param2', 1), ('test_param', 5)]))
+ assert_equal(device.value, 1)
+ device.value = None
+ device.set_runtime_parameters(OrderedDict([('test_param', 5), ('test_param2', 1)]))
+ assert_equal(device.value, 5)
+
+
+def _instantiate(cls, *args, **kwargs):
+ # Needed to get around Extension's __init__ checks
+ return cls(*args, **kwargs)
+
diff --git a/wlauto/tests/test_diff.py b/wlauto/tests/test_diff.py
new file mode 100644
index 00000000..cc1683cc
--- /dev/null
+++ b/wlauto/tests/test_diff.py
@@ -0,0 +1,44 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E0611
+# pylint: disable=R0201
+import os
+import tempfile
+from unittest import TestCase
+
+from nose.tools import assert_equal
+
+from wlauto.instrumentation.misc import _diff_interrupt_files
+
+
+class InterruptDiffTest(TestCase):
+
+ def test_interrupt_diff(self):
+ file_dir = os.path.join(os.path.dirname(__file__), 'data', 'interrupts')
+ before_file = os.path.join(file_dir, 'before')
+ after_file = os.path.join(file_dir, 'after')
+ expected_result_file = os.path.join(file_dir, 'result')
+ output_file = tempfile.mktemp()
+
+ _diff_interrupt_files(before_file, after_file, output_file)
+ with open(output_file) as fh:
+ output_diff = fh.read()
+ with open(expected_result_file) as fh:
+ expected_diff = fh.read()
+ assert_equal(output_diff, expected_diff)
+
+
diff --git a/wlauto/tests/test_execution.py b/wlauto/tests/test_execution.py
new file mode 100644
index 00000000..fb18f24b
--- /dev/null
+++ b/wlauto/tests/test_execution.py
@@ -0,0 +1,1035 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E0611
+# pylint: disable=R0201
+# pylint: disable=protected-access
+# pylint: disable=abstract-method
+# pylint: disable=attribute-defined-outside-init
+# pylint: disable=no-member
+from unittest import TestCase
+from nose.tools import assert_equal, assert_raises, raises
+
+from wlauto.core.execution import BySpecRunner, ByIterationRunner
+from wlauto.exceptions import DeviceError
+from wlauto.core.configuration import WorkloadRunSpec, RebootPolicy
+from wlauto.core.instrumentation import Instrument
+from wlauto.core.device import Device
+from wlauto.core import instrumentation, signal
+from wlauto.core.workload import Workload
+from wlauto.core.result import IterationResult
+from wlauto.core.signal import Signal
+
+
+class SignalCatcher(Instrument):
+ name = 'Signal Catcher'
+
+ def __init__(self):
+ Instrument.__init__(self, None)
+ self.signals_received = []
+ for sig in signal.__dict__.values():
+ if isinstance(sig, Signal):
+ signal.connect(self.handler, sig)
+
+ def handler(self, *_, **kwargs):
+ self.signals_received.append(kwargs.pop('signal').name)
+
+
+class Mock(object):
+ def __init__(self):
+ self.__members = {}
+
+ def __getattr__(self, name):
+ if name not in self.__members:
+ self.__members[name] = Mock()
+ return self.__members[name]
+
+ def __call__(self, *args, **kwargs):
+ pass
+
+
+class BadDevice(Device):
+
+ def __init__(self, when_to_fail, exception=DeviceError):
+ #pylint: disable=super-init-not-called
+ self.when_to_fail = when_to_fail
+ self.exception = exception
+
+ def connect(self):
+ if 'connect' == self.when_to_fail:
+ raise self.exception("Connection failure")
+
+ def initialize(self, _):
+ if 'initialize' == self.when_to_fail:
+ raise self.exception("Initialisation failure")
+
+ def get_properties(self, _):
+ if 'get_properties' == self.when_to_fail:
+ raise self.exception("Failure getting propeties")
+
+ def start(self):
+ if 'start' == self.when_to_fail:
+ raise self.exception("Start failure")
+
+ def set_device_parameters(self, **_):
+ if 'set_device_parameters' == self.when_to_fail:
+ raise self.exception("Failure setting parameter")
+
+ def stop(self):
+ if 'stop' == self.when_to_fail:
+ raise self.exception("Stop failure")
+
+ def disconnect(self):
+ if 'disconnect' == self.when_to_fail:
+ raise self.exception("Disconnection failure")
+
+ def ping(self):
+ return True
+
+
+class BadWorkload(Workload):
+
+ def __init__(self, exception, when_to_fail):
+ #pylint: disable=super-init-not-called
+ self.exception = exception
+ self.when_to_fail = when_to_fail
+
+ def setup(self, _):
+ if "setup" in self.when_to_fail:
+ raise self.exception("Setup failed")
+
+ def run(self, _):
+ if "run" in self.when_to_fail:
+ raise self.exception("Run failed")
+
+ def update_result(self, _):
+ if "update_result" in self.when_to_fail:
+ raise self.exception("Result update failed")
+
+ def teardown(self, _):
+ if "teardown" in self.when_to_fail:
+ raise self.exception("Teardown failed")
+
+
+class RunnerTest(TestCase):
+
+ errors = 0
+
+ def signal_check(self, expected_signals, workloads, reboot_policy="never", runner_class=BySpecRunner):
+ context = Mock()
+ context.reboot_policy = RebootPolicy(reboot_policy)
+ context.config.workload_specs = workloads
+
+ instrument = _instantiate(SignalCatcher)
+ instrumentation.install(instrument)
+
+ runner = runner_class(Mock(), context, Mock())
+ runner.init_queue(context.config.workload_specs)
+
+ try:
+ runner.run()
+ finally:
+ instrumentation.uninstall(instrument)
+
+ assert_equal(instrument.signals_received, expected_signals)
+
+ def test_single_run(self):
+ expected_signals = [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+ workloads = [WorkloadRunSpec(id='1', number_of_iterations=1, instrumentation=['Signal Catcher'])]
+ workloads[0]._workload = Mock()
+
+ self.signal_check(expected_signals, workloads)
+
+ def test_multiple_run_byspec(self):
+ expected_signals = [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+ workloads = [
+ WorkloadRunSpec(id='1', number_of_iterations=1, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='2', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='3', number_of_iterations=3, instrumentation=['Signal Catcher'])
+ ]
+ workloads[0]._workload = Mock()
+ workloads[1]._workload = Mock()
+ workloads[2]._workload = Mock()
+
+ self.signal_check(expected_signals, workloads)
+
+ def test_multiple_run_byiteration(self):
+ expected_signals = [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+ workloads = [
+ WorkloadRunSpec(id='1', number_of_iterations=1, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='2', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='3', number_of_iterations=3, instrumentation=['Signal Catcher']),
+ ]
+ workloads[0]._workload = Mock()
+ workloads[1]._workload = Mock()
+ workloads[2]._workload = Mock()
+
+ self.signal_check(expected_signals, workloads, runner_class=ByIterationRunner)
+
+ def test_reboot_policies(self):
+ expected_never = [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+
+ expected_initial = [
+ signal.RUN_START.name,
+ signal.BEFORE_INITIAL_BOOT.name,
+ signal.BEFORE_BOOT.name,
+ signal.SUCCESSFUL_BOOT.name,
+ signal.AFTER_BOOT.name,
+ signal.SUCCESSFUL_INITIAL_BOOT.name,
+ signal.AFTER_INITIAL_BOOT.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+
+ expected_each_spec = [
+ signal.RUN_START.name,
+ signal.BEFORE_INITIAL_BOOT.name,
+ signal.BEFORE_BOOT.name,
+ signal.SUCCESSFUL_BOOT.name,
+ signal.AFTER_BOOT.name,
+ signal.SUCCESSFUL_INITIAL_BOOT.name,
+ signal.AFTER_INITIAL_BOOT.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.BEFORE_BOOT.name,
+ signal.SUCCESSFUL_BOOT.name,
+ signal.AFTER_BOOT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+
+ expected_each_iteration = [
+ signal.RUN_START.name,
+ signal.BEFORE_INITIAL_BOOT.name,
+ signal.BEFORE_BOOT.name,
+ signal.SUCCESSFUL_BOOT.name,
+ signal.AFTER_BOOT.name,
+ signal.SUCCESSFUL_INITIAL_BOOT.name,
+ signal.AFTER_INITIAL_BOOT.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.BEFORE_BOOT.name,
+ signal.SUCCESSFUL_BOOT.name,
+ signal.AFTER_BOOT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.BEFORE_BOOT.name,
+ signal.SUCCESSFUL_BOOT.name,
+ signal.AFTER_BOOT.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+
+ workloads = [
+ WorkloadRunSpec(id='1', number_of_iterations=1, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='2', number_of_iterations=1, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='3', number_of_iterations=2, instrumentation=['Signal Catcher'])
+ ]
+ workloads[0]._workload = Mock()
+ workloads[1]._workload = Mock()
+ workloads[2]._workload = Mock()
+
+ self.signal_check(expected_never, workloads[0:1], reboot_policy="never")
+ self.signal_check(expected_initial, workloads[0:1], reboot_policy="initial")
+ self.signal_check(expected_each_spec, workloads[0:2], reboot_policy="each_spec")
+ self.signal_check(expected_each_iteration, workloads[1:3], reboot_policy="each_iteration")
+
+ def test_spec_skipping(self):
+ expected_signals = [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+
+ workloads = [
+ WorkloadRunSpec(id='1', number_of_iterations=5, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='2', number_of_iterations=1, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='3', number_of_iterations=4, instrumentation=['Signal Catcher'])
+ ]
+
+ workloads[0]._workload = Mock()
+ workloads[1]._workload = Mock()
+ workloads[2]._workload = Mock()
+ workloads[0].enabled = False
+ workloads[2].enabled = False
+
+ self.signal_check(expected_signals, workloads)
+
+ def test_bad_workload_status(self):
+ workloads = [
+ WorkloadRunSpec(id='1', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='2', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='3', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='4', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='5', number_of_iterations=2, instrumentation=['Signal Catcher'])
+ ]
+
+ workloads[0]._workload = BadWorkload(Exception, ["setup"])
+ workloads[1]._workload = BadWorkload(Exception, ["run"])
+ workloads[2]._workload = BadWorkload(Exception, ["update_result"])
+ workloads[3]._workload = BadWorkload(Exception, ["teardown"])
+ workloads[4]._workload = Mock()
+
+ context = Mock()
+ context.reboot_policy = RebootPolicy("never")
+ context.config.workload_specs = workloads
+
+ runner = BySpecRunner(Mock(), context, Mock())
+ runner.init_queue(context.config.workload_specs)
+
+ instrument = _instantiate(SignalCatcher)
+ instrumentation.install(instrument)
+
+ try:
+ runner.run()
+ finally:
+ instrumentation.uninstall(instrument)
+
+ #Check queue was handled correctly
+ assert_equal(len(runner.completed_jobs), 10)
+ assert_equal(len(runner.job_queue), 0)
+
+ #Check job status'
+ expected_status = [
+ IterationResult.FAILED, IterationResult.SKIPPED,
+ IterationResult.FAILED, IterationResult.FAILED,
+ IterationResult.PARTIAL, IterationResult.PARTIAL,
+ IterationResult.NONCRITICAL, IterationResult.NONCRITICAL,
+ IterationResult.OK, IterationResult.OK
+ ]
+ for i in range(0, len(runner.completed_jobs)):
+ assert_equal(runner.completed_jobs[i].result.status, expected_status[i])
+
+ #Check signals were sent correctly
+ expected_signals = [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name, #Fail Setup
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.ITERATION_END.name,
+ #Skipped iteration
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name, #Fail Run
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ #signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name, - not sent because run failed
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ #signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name, - not sent because run failed
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name, # Fail Result Update
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name, # Fail Teardown
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.WORKLOAD_SPEC_START.name, #OK
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+
+ assert_equal(expected_signals, instrument.signals_received)
+
+ def test_CTRL_C(self):
+ workloads = [
+ WorkloadRunSpec(id='1', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='2', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='3', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ WorkloadRunSpec(id='4', number_of_iterations=2, instrumentation=['Signal Catcher']),
+ ]
+
+ workloads[0]._workload = BadWorkload(KeyboardInterrupt, ["setup"])
+ workloads[1]._workload = BadWorkload(KeyboardInterrupt, ["run"])
+ workloads[2]._workload = BadWorkload(KeyboardInterrupt, ["update_result"])
+ workloads[3]._workload = BadWorkload(KeyboardInterrupt, ["teardown"])
+
+ expected_status = [IterationResult.ABORTED, IterationResult.ABORTED]
+
+ expected_signals = [
+ [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ],
+ [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ],
+ [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ],
+ [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ],
+ ]
+
+ for i in xrange(0, len(workloads)):
+ context = Mock()
+ context.reboot_policy = RebootPolicy("never")
+ context.config.workload_specs = [workloads[i]]
+
+ runner = BySpecRunner(Mock(), context, Mock())
+ runner.init_queue(context.config.workload_specs)
+
+ instrument = _instantiate(SignalCatcher)
+ instrumentation.install(instrument)
+
+ try:
+ runner.run()
+ finally:
+ instrumentation.uninstall(instrument)
+
+ #Check queue was handled correctly
+ assert_equal(len(runner.completed_jobs), 2)
+ assert_equal(len(runner.job_queue), 0)
+
+ #check correct signals were sent
+ assert_equal(expected_signals[i], instrument.signals_received)
+
+ #Check job status'
+ for j in range(0, len(runner.completed_jobs)):
+ assert_equal(runner.completed_jobs[j].result.status, expected_status[j])
+
+ def test_no_teardown_after_setup_fail(self):
+ expected_signals = [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+
+ workloads = [WorkloadRunSpec(id='1', number_of_iterations=1, instrumentation=['Signal Catcher'])]
+ workloads[0]._workload = BadWorkload(Exception, ["setup"])
+
+ self.signal_check(expected_signals, workloads)
+
+ def test_teardown_on_run_and_result_update_fail(self):
+ expected_signals = [
+ signal.RUN_START.name,
+ signal.RUN_INIT.name,
+ signal.WORKLOAD_SPEC_START.name,
+ signal.ITERATION_START.name,
+ signal.BEFORE_WORKLOAD_SETUP.name,
+ signal.SUCCESSFUL_WORKLOAD_SETUP.name,
+ signal.AFTER_WORKLOAD_SETUP.name,
+ signal.BEFORE_WORKLOAD_EXECUTION.name,
+ signal.AFTER_WORKLOAD_EXECUTION.name,
+ signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
+ signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
+ signal.BEFORE_WORKLOAD_TEARDOWN.name,
+ signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
+ signal.AFTER_WORKLOAD_TEARDOWN.name,
+ signal.ITERATION_END.name,
+ signal.WORKLOAD_SPEC_END.name,
+ signal.RUN_FIN.name,
+ signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
+ signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
+ signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
+ signal.RUN_END.name
+ ]
+ workloads = [WorkloadRunSpec(id='1', number_of_iterations=1, instrumentation=['Signal Catcher'])]
+ workloads[0]._workload = BadWorkload(Exception, ["run", "update_result"])
+
+ self.signal_check(expected_signals, workloads)
+
+ def bad_device(self, method):
+ workloads = [WorkloadRunSpec(id='1', number_of_iterations=1, instrumentation=[])]
+ workloads[0]._workload = Mock()
+
+ context = Mock()
+ context.reboot_policy = RebootPolicy("never")
+ context.config.workload_specs = workloads
+
+ runner = BySpecRunner(BadDevice(method), context, Mock())
+ runner.init_queue(context.config.workload_specs)
+ runner.run()
+
+ @raises(DeviceError)
+ def test_bad_connect(self):
+ assert_raises(DeviceError, self.bad_device('connect'))
+
+ @raises(DeviceError)
+ def test_bad_initialize(self):
+ assert_raises(DeviceError, self.bad_device('initialize'))
+
+ def test_bad_start(self):
+ self.bad_device('start') # error must not propagate
+
+ def test_bad_stop(self):
+ self.bad_device('stop') # error must not propagate
+
+ def test_bad_disconnect(self):
+ self.bad_device('disconnect') # error must not propagate
+
+ @raises(DeviceError)
+ def test_bad_get_properties(self):
+ assert_raises(DeviceError, self.bad_device('get_properties'))
+
+
+def _instantiate(cls, *args, **kwargs):
+ # Needed to get around Extension's __init__ checks
+ return cls(*args, **kwargs)
+
diff --git a/wlauto/tests/test_extension.py b/wlauto/tests/test_extension.py
new file mode 100644
index 00000000..41794f93
--- /dev/null
+++ b/wlauto/tests/test_extension.py
@@ -0,0 +1,286 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E0611,R0201,E1101
+from unittest import TestCase
+
+from nose.tools import assert_equal, raises, assert_true
+
+from wlauto.core.extension import Extension, Parameter, Param, ExtensionMeta, Module
+from wlauto.utils.types import list_of_ints
+from wlauto.exceptions import ConfigError
+
+
+class MyMeta(ExtensionMeta):
+
+ virtual_methods = ['validate', 'virtual1', 'virtual2']
+
+
+class MyBaseExtension(Extension):
+
+ __metaclass__ = MyMeta
+
+ name = 'base'
+
+ parameters = [
+ Parameter('base'),
+ ]
+
+ def __init__(self, **kwargs):
+ super(MyBaseExtension, self).__init__(**kwargs)
+ self.v1 = 0
+ self.v2 = 0
+ self.v3 = ''
+
+ def virtual1(self):
+ self.v1 += 1
+ self.v3 = 'base'
+
+ def virtual2(self):
+ self.v2 += 1
+
+
+class MyAcidExtension(MyBaseExtension):
+
+ name = 'acid'
+
+ parameters = [
+ Parameter('hydrochloric', kind=list_of_ints, default=[1, 2]),
+ 'citric',
+ ('carbonic', int),
+ ]
+
+ def __init__(self, **kwargs):
+ super(MyAcidExtension, self).__init__(**kwargs)
+ self.vv1 = 0
+ self.vv2 = 0
+
+ def virtual1(self):
+ self.vv1 += 1
+ self.v3 = 'acid'
+
+ def virtual2(self):
+ self.vv2 += 1
+
+
+class MyOtherExtension(MyBaseExtension):
+
+ name = 'other'
+
+ parameters = [
+ Param('mandatory', mandatory=True),
+ Param('optional', allowed_values=['test', 'check']),
+ ]
+
+class MyOtherOtherExtension(MyOtherExtension):
+
+ name = 'otherother'
+
+ parameters = [
+ Param('mandatory', override=True),
+ ]
+
+
+class MyOverridingExtension(MyAcidExtension):
+
+ name = 'overriding'
+
+ parameters = [
+ Parameter('hydrochloric', override=True, default=[3, 4]),
+ ]
+
+
+class MyThirdTeerExtension(MyOverridingExtension):
+
+ name = 'thirdteer'
+
+
+class MultiValueParamExt(Extension):
+
+ name = 'multivalue'
+
+ parameters = [
+ Parameter('test', kind=list_of_ints, allowed_values=[42, 7, 73]),
+ ]
+
+
+class MyCoolModule(Module):
+
+ name = 'cool_module'
+
+ capabilities = ['fizzle']
+
+ def initialize(self):
+ self.fizzle_factor = 0 # pylint: disable=attribute-defined-outside-init
+
+ def fizzle(self):
+ self.fizzle_factor += 1
+
+
+class MyEvenCoolerModule(Module):
+
+ name = 'even_cooler_module'
+
+ capabilities = ['fizzle']
+
+ def fizzle(self):
+ self.owner.self_fizzle_factor += 2
+
+
+class MyModularExtension(Extension):
+
+ name = 'modular'
+
+ parameters = [
+ Parameter('modules', override=True, default=['cool_module']),
+ ]
+
+
+class MyOtherModularExtension(Extension):
+
+ name = 'other_modular'
+
+ parameters = [
+ Parameter('modules', override=True, default=[
+ 'cool_module',
+ 'even_cooler_module',
+ ]),
+ ]
+
+ def __init__(self, **kwargs):
+ super(MyOtherModularExtension, self).__init__(**kwargs)
+ self.self_fizzle_factor = 0
+
+
+class FakeLoader(object):
+
+ modules = [
+ MyCoolModule,
+ MyEvenCoolerModule,
+ ]
+
+ def get_module(self, name, owner, **kwargs): # pylint: disable=unused-argument
+ for module in self.modules:
+ if module.name == name:
+ return _instantiate(module, owner)
+
+
+class ExtensionMetaTest(TestCase):
+
+ def test_propagation(self):
+ acid_params = [p.name for p in MyAcidExtension.parameters]
+ assert_equal(acid_params, ['modules', 'base', 'hydrochloric', 'citric', 'carbonic'])
+
+ @raises(ValueError)
+ def test_duplicate_param_spec(self):
+ class BadExtension(MyBaseExtension): # pylint: disable=W0612
+ parameters = [
+ Parameter('base'),
+ ]
+
+ def test_param_override(self):
+ class OverridingExtension(MyBaseExtension): # pylint: disable=W0612
+ parameters = [
+ Parameter('base', override=True, default='cheese'),
+ ]
+ assert_equal(OverridingExtension.parameters['base'].default, 'cheese')
+
+ @raises(ValueError)
+ def test_invalid_param_spec(self):
+ class BadExtension(MyBaseExtension): # pylint: disable=W0612
+ parameters = [
+ 7,
+ ]
+
+ def test_virtual_methods(self):
+ acid = _instantiate(MyAcidExtension)
+ acid.virtual1()
+ assert_equal(acid.v1, 1)
+ assert_equal(acid.vv1, 1)
+ assert_equal(acid.v2, 0)
+ assert_equal(acid.vv2, 0)
+ assert_equal(acid.v3, 'acid')
+ acid.virtual2()
+ acid.virtual2()
+ assert_equal(acid.v1, 1)
+ assert_equal(acid.vv1, 1)
+ assert_equal(acid.v2, 2)
+ assert_equal(acid.vv2, 2)
+
+
+class ParametersTest(TestCase):
+
+ def test_setting(self):
+ myext = _instantiate(MyAcidExtension, hydrochloric=[5, 6], citric=5, carbonic=42)
+ assert_equal(myext.hydrochloric, [5, 6])
+ assert_equal(myext.citric, '5')
+ assert_equal(myext.carbonic, 42)
+
+ def test_validation_ok(self):
+ myext = _instantiate(MyOtherExtension, mandatory='check', optional='check')
+ myext.validate()
+
+ def test_default_override(self):
+ myext = _instantiate(MyOverridingExtension)
+ assert_equal(myext.hydrochloric, [3, 4])
+ myotherext = _instantiate(MyThirdTeerExtension)
+ assert_equal(myotherext.hydrochloric, [3, 4])
+
+ def test_multivalue_param(self):
+ myext = _instantiate(MultiValueParamExt, test=[7, 42])
+ myext.validate()
+ assert_equal(myext.test, [7, 42])
+
+ @raises(ConfigError)
+ def test_bad_multivalue_param(self):
+ myext = _instantiate(MultiValueParamExt, test=[5])
+ myext.validate()
+
+ @raises(ConfigError)
+ def test_validation_no_mandatory(self):
+ myext = _instantiate(MyOtherExtension, optional='check')
+ myext.validate()
+
+ @raises(ConfigError)
+ def test_validation_no_mandatory_in_derived(self):
+ _instantiate(MyOtherOtherExtension)
+
+ @raises(ConfigError)
+ def test_validation_bad_value(self):
+ myext = _instantiate(MyOtherExtension, mandatory=1, optional='invalid')
+ myext.validate()
+
+
+class ModuleTest(TestCase):
+
+ def test_fizzle(self):
+ myext = _instantiate(MyModularExtension)
+ myext.load_modules(FakeLoader())
+ assert_true(myext.can('fizzle'))
+ myext.fizzle()
+ assert_equal(myext.fizzle_factor, 1)
+
+ def test_self_fizzle(self):
+ myext = _instantiate(MyOtherModularExtension)
+ myext.load_modules(FakeLoader())
+ myext.fizzle()
+ assert_equal(myext.self_fizzle_factor, 2)
+
+
+def _instantiate(cls, *args, **kwargs):
+ # Needed to get around Extension's __init__ checks
+ return cls(*args, **kwargs)
+
diff --git a/wlauto/tests/test_extension_loader.py b/wlauto/tests/test_extension_loader.py
new file mode 100644
index 00000000..7db4c73b
--- /dev/null
+++ b/wlauto/tests/test_extension_loader.py
@@ -0,0 +1,51 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E0611,R0201
+import os
+from unittest import TestCase
+
+from nose.tools import assert_equal, assert_greater
+
+from wlauto.core.extension_loader import ExtensionLoader
+
+
+EXTDIR = os.path.join(os.path.dirname(__file__), 'data', 'extensions')
+
+
+class ExtensionLoaderTest(TestCase):
+
+ def test_load_device(self):
+ loader = ExtensionLoader(paths=[EXTDIR, ], load_defaults=False)
+ device = loader.get_device('test-device')
+ assert_equal(device.name, 'test-device')
+
+ def test_list_by_kind(self):
+ loader = ExtensionLoader(paths=[EXTDIR, ], load_defaults=False)
+ exts = loader.list_devices()
+ assert_equal(len(exts), 1)
+ assert_equal(exts[0].name, 'test-device')
+
+ def test_clear_and_reload(self):
+ loader = ExtensionLoader()
+ assert_greater(len(loader.list_devices()), 1)
+ loader.clear()
+ loader.update(paths=[EXTDIR, ])
+ devices = loader.list_devices()
+ assert_equal(len(devices), 1)
+ assert_equal(devices[0].name, 'test-device')
+ assert_equal(len(loader.list_extensions()), 1)
+
diff --git a/wlauto/tests/test_instrumentation.py b/wlauto/tests/test_instrumentation.py
new file mode 100644
index 00000000..6dd22689
--- /dev/null
+++ b/wlauto/tests/test_instrumentation.py
@@ -0,0 +1,226 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0231,W0613,E0611,W0603,R0201
+from unittest import TestCase
+
+from nose.tools import assert_equal, raises, assert_true, assert_false
+
+from wlauto import Instrument
+from wlauto.core import signal, instrumentation
+from wlauto.instrumentation import instrument_is_installed, clear_instrumentation
+
+
+class MockInstrument(Instrument):
+
+ name = 'mock'
+
+ def __init__(self):
+ Instrument.__init__(self, None)
+ self.before = 0
+ self.after = 0
+
+ def before_workload_execution(self, context):
+ self.before += 1
+
+ def after_workload_execution(self, context):
+ self.after += 1
+
+
+class MockInstrument2(Instrument):
+
+ name = 'mock_2'
+
+ def __init__(self):
+ Instrument.__init__(self, None)
+ self.before = 0
+ self.after = 0
+ self.result = 0
+
+ def before_workload_execution(self, context):
+ self.before += 1
+
+ def after_workload_execution(self, context):
+ self.after += 1
+
+ def after_workload_result_update(self, context):
+ self.result += 1
+
+
+class MockInstrument3(Instrument):
+
+ name = 'mock_3'
+
+ def __init__(self):
+ Instrument.__init__(self, None)
+
+ def slow_before_workload_execution(self, context):
+ global counter
+ counter += 1
+
+
+class MockInstrument4(Instrument):
+
+ name = 'mock_4'
+
+ def __init__(self):
+ Instrument.__init__(self, None)
+
+ def slow_before_first_iteration_boot(self, context):
+ global counter
+ counter = 4
+
+
+class MockInstrument5(Instrument):
+
+ name = 'mock_5'
+
+ def __init__(self):
+ Instrument.__init__(self, None)
+
+ def fast_before_first_iteration_boot(self, context):
+ global counter
+ counter += 2
+
+
+class MockInstrument6(Instrument):
+
+ name = 'mock_6'
+
+ def __init__(self):
+ Instrument.__init__(self, None)
+
+ def before_first_iteration_boot(self, context):
+ global counter
+ counter *= 10
+
+
+class BadInstrument(Instrument):
+
+ name = 'bad'
+
+ def __init__(self):
+ pass
+
+ # Not specifying the context argument.
+ def teardown(self):
+ pass
+
+
+counter = 0
+
+
+class InstrumentationTest(TestCase):
+
+ def tearDown(self):
+ clear_instrumentation()
+
+ def test_install(self):
+ instrument = _instantiate(MockInstrument)
+ instrument2 = _instantiate(MockInstrument2)
+ instrumentation.install(instrument)
+ instrumentation.install(instrument2)
+ signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
+ signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
+ signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
+ assert_equal(instrument.before, 1)
+ assert_equal(instrument.after, 1)
+ assert_equal(instrument2.before, 1)
+ assert_equal(instrument2.after, 1)
+ assert_equal(instrument2.result, 1)
+
+ def test_enable_disable(self):
+ instrument = _instantiate(MockInstrument)
+ instrument2 = _instantiate(MockInstrument2)
+ instrumentation.install(instrument)
+ instrumentation.install(instrument2)
+
+ instrumentation.disable_all()
+ signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
+ signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
+ signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
+ assert_equal(instrument.before, 0)
+ assert_equal(instrument.after, 0)
+ assert_equal(instrument2.before, 0)
+ assert_equal(instrument2.after, 0)
+ assert_equal(instrument2.result, 0)
+
+ instrumentation.enable(instrument)
+ signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
+ signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
+ signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
+ assert_equal(instrument.before, 1)
+ assert_equal(instrument.after, 1)
+ assert_equal(instrument2.before, 0)
+ assert_equal(instrument2.after, 0)
+ assert_equal(instrument2.result, 0)
+
+ instrumentation.enable_all()
+ signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
+ signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
+ signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
+ assert_equal(instrument.before, 2)
+ assert_equal(instrument.after, 2)
+ assert_equal(instrument2.before, 1)
+ assert_equal(instrument2.after, 1)
+ assert_equal(instrument2.result, 1)
+
+ def test_local_instrument(self):
+ global counter
+ counter = 0
+ self.install_local_instrument()
+ signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
+ assert_equal(counter, 1)
+
+ def test_priority_prefix_instrument(self):
+ global counter
+ counter = 0
+ instrument1 = _instantiate(MockInstrument4)
+ instrument2 = _instantiate(MockInstrument5)
+ instrument3 = _instantiate(MockInstrument6)
+ instrumentation.install(instrument1)
+ instrumentation.install(instrument2)
+ instrumentation.install(instrument3)
+ signal.send(signal.BEFORE_FIRST_ITERATION_BOOT, self, context=None)
+ assert_equal(counter, 42)
+
+ @raises(ValueError)
+ def test_bad_argspec(self):
+ instrument = _instantiate(BadInstrument)
+ instrumentation.install(instrument)
+
+ def test_check_installed(self):
+ instrumentation.install(_instantiate(MockInstrument))
+ assert_true(instrument_is_installed('mock'))
+ assert_true(instrument_is_installed(MockInstrument))
+ assert_false(instrument_is_installed(MockInstrument2))
+
+ def install_local_instrument(self):
+ instrument = _instantiate(MockInstrument3)
+ instrumentation.install(instrument)
+
+ @raises(ValueError)
+ def test_duplicate_install(self):
+ instrument = _instantiate(MockInstrument)
+ instrument2 = _instantiate(MockInstrument)
+ instrumentation.install(instrument)
+ instrumentation.install(instrument2)
+
+
+def _instantiate(cls):
+ # Needed to get around Extension's __init__ checks
+ return cls()
+
diff --git a/wlauto/tests/test_results_manager.py b/wlauto/tests/test_results_manager.py
new file mode 100644
index 00000000..955b8655
--- /dev/null
+++ b/wlauto/tests/test_results_manager.py
@@ -0,0 +1,130 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0231,W0613,E0611,W0603,R0201
+from unittest import TestCase
+
+from nose.tools import assert_equal, assert_true, assert_false, assert_raises
+
+from wlauto.core.result import ResultProcessor, ResultManager
+from wlauto.exceptions import WAError
+
+
+class MockResultProcessor1(ResultProcessor):
+
+ name = 'result_processor_with_exception'
+
+ def process_iteration_result(self, result, context):
+ raise Exception()
+
+ def process_run_result(self, result, context):
+ raise Exception()
+
+
+class MockResultProcessor2(ResultProcessor):
+
+ name = 'result_processor_with_wa_error'
+
+ def process_iteration_result(self, result, context):
+ raise WAError()
+
+ def process_run_result(self, result, context):
+ raise WAError()
+
+
+class MockResultProcessor3(ResultProcessor):
+
+ name = 'result_processor_with_keybaord_interrupt'
+
+ def process_iteration_result(self, result, context):
+ raise KeyboardInterrupt()
+
+ def process_run_result(self, result, context):
+ raise KeyboardInterrupt()
+
+
+class MockResultProcessor4(ResultProcessor):
+
+ name = 'result_processor'
+
+ def __init__(self):
+ super(MockResultProcessor4, self).__init__()
+ self.is_invoked = False
+
+ def process_iteration_result(self, result, context):
+ self.is_invoked = True
+
+ def process_run_result(self, result, context):
+ self.is_invoked = True
+
+
+class ResultManagerTest(TestCase):
+
+ def test_keyboard_interrupt(self):
+ processor_keyboard_interrupt = _instantiate(MockResultProcessor3)
+
+ # adding the results processor to the result manager
+ manager = ResultManager()
+ assert_false(manager.processors)
+
+ # adding the results processor to the result manager
+ manager.install(processor_keyboard_interrupt)
+
+ assert_equal(len(manager.processors), 1)
+ assert_raises(KeyboardInterrupt, manager.add_result, None, None)
+
+ def test_add_result(self):
+ processor_generic_exception = _instantiate(MockResultProcessor1)
+ processor_wa_error = _instantiate(MockResultProcessor2)
+ processor = _instantiate(MockResultProcessor4)
+
+ # adding the results processor to the result manager
+ manager = ResultManager()
+ assert_false(manager.processors)
+
+ # adding the results processor to the result manager
+ manager.install(processor_generic_exception)
+ manager.install(processor_wa_error)
+ manager.install(processor)
+
+ assert_equal(len(manager.processors), 3)
+ manager.add_result(None, None)
+
+ assert_true(processor.is_invoked)
+
+ def test_process_results(self):
+ processor_generic_exception = _instantiate(MockResultProcessor1)
+ processor_wa_error = _instantiate(MockResultProcessor2)
+ processor = _instantiate(MockResultProcessor4)
+
+ # adding the results processor to the result manager
+ manager = ResultManager()
+ assert_false(manager.processors)
+
+ # adding the results processor to the result manager
+ manager.install(processor_generic_exception)
+ manager.install(processor_wa_error)
+ manager.install(processor)
+
+ assert_equal(len(manager.processors), 3)
+ manager.process_run_result(None, None)
+
+ assert_true(processor.is_invoked)
+
+
+def _instantiate(cls):
+ # Needed to get around Extension's __init__ checks
+ return cls()
diff --git a/wlauto/tests/test_utils.py b/wlauto/tests/test_utils.py
new file mode 100644
index 00000000..f201589f
--- /dev/null
+++ b/wlauto/tests/test_utils.py
@@ -0,0 +1,63 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=R0201
+from unittest import TestCase
+
+from nose.tools import raises, assert_equal # pylint: disable=E0611
+
+from wlauto.utils.android import check_output
+from wlauto.utils.misc import merge_dicts, TimeoutError
+
+
+class TestCheckOutput(TestCase):
+
+ def test_ok(self):
+ check_output("python -c 'import time; time.sleep(0.1)'", timeout=0.5, shell=True)
+
+ @raises(TimeoutError)
+ def test_bad(self):
+ check_output("python -c 'import time; time.sleep(1)'", timeout=0.5, shell=True)
+
+
+class TestMerge(TestCase):
+
+ def test_dict_merge(self):
+ base = {'a': 1, 'b': {'x': 9, 'z': 10}}
+ other = {'b': {'x': 7, 'y': 8}, 'c': [1, 2, 3]}
+ result = merge_dicts(base, other)
+ assert_equal(result['a'], 1)
+ assert_equal(result['b']['x'], 7)
+ assert_equal(result['b']['y'], 8)
+ assert_equal(result['b']['z'], 10)
+ assert_equal(result['c'], [1, 2, 3])
+
+ def test_merge_dict_lists(self):
+ base = {'a': [1, 3, 2]}
+ other = {'a': [3, 4, 5]}
+ result = merge_dicts(base, other)
+ assert_equal(result['a'], [1, 3, 2, 3, 4, 5])
+ result = merge_dicts(base, other, list_duplicates='first')
+ assert_equal(result['a'], [1, 3, 2, 4, 5])
+ result = merge_dicts(base, other, list_duplicates='last')
+ assert_equal(result['a'], [1, 2, 3, 4, 5])
+
+ @raises(ValueError)
+ def test_type_mismatch(self):
+ base = {'a': [1, 2, 3]}
+ other = {'a': 'test'}
+ merge_dicts(base, other, match_types=True)
+
diff --git a/wlauto/tools/__init__.py b/wlauto/tools/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/tools/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/tools/extdoc.py b/wlauto/tools/extdoc.py
new file mode 100644
index 00000000..8c6592ec
--- /dev/null
+++ b/wlauto/tools/extdoc.py
@@ -0,0 +1,134 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+This module contains utilities for generating user documentation for Workload
+Automation Extensions.
+
+"""
+import re
+import inspect
+
+
+PARAGRAPH_SEP = re.compile(r'\n\n+')
+LINE_START = re.compile(r'\n\s*')
+
+
+def get_paragraphs(text):
+ """returns a list of paragraphs contained in the text"""
+ return [LINE_START.sub(' ', p) for p in PARAGRAPH_SEP.split(text)]
+
+
+class ExtensionDocumenter(object):
+
+ @property
+ def name(self):
+ return self.ext.name
+
+ @property
+ def summary(self):
+ """Returns the summary description for this Extension, which, by
+ convention, is the first paragraph of the description."""
+ return get_paragraphs(self.description)[0]
+
+ @property
+ def description(self):
+ """
+ The description for an extension is specified in the ``description``
+ attribute, or (legacy) as a docstring for the extension's class. If
+ neither method is used in the Extension, an empty string is returned.
+
+ Description is assumed to be formed as reStructuredText. Leading and
+ trailing whitespace will be stripped away.
+
+ """
+ if hasattr(self.ext, 'description'):
+ return self.ext.description.strip()
+ elif self.ext.__class__.__doc__:
+ return self.ext.__class__.__doc__.strip()
+ else:
+ return ''
+
+ @property
+ def parameters(self):
+ return [ExtensionParameterDocumenter(p) for p in self.ext.parameters]
+
+ def __init__(self, ext):
+ self.ext = ext
+
+
+class ExtensionParameterDocumenter(object):
+
+ @property
+ def name(self):
+ return self.param.name
+
+ @property
+ def kind(self):
+ return self.param.get_type_name()
+
+ @property
+ def default(self):
+ return self.param.default
+
+ @property
+ def description(self):
+ return self.param.description
+
+ @property
+ def constraint(self):
+ constraints = []
+ if self.param.allowed_values:
+ constraints.append('value must be in {}'.format(self.param.allowed_values))
+ if self.param.constraint:
+ constraint_text = self.param.constraint.__name__
+ if constraint_text == '<lambda>':
+ constraint_text = _parse_lambda(inspect.getsource(self.param.constraint))
+ constraints.append(constraint_text)
+ return ' and '.join(constraints)
+
+ def __init__(self, param):
+ self.param = param
+
+
+# Utility functions
+
+
+def _parse_lambda(text):
+ """Parse the definition of a lambda function in to a readable string."""
+ text = text.split('lambda')[1]
+ param, rest = text.split(':')
+ param = param.strip()
+ # There are three things that could terminate a lambda: an (unparenthesized)
+ # comma, a new line and an (unmatched) close paren.
+ term_chars = [',', '\n', ')']
+ func_text = ''
+ inside_paren = 0 # an int rather than a bool to keep track of nesting
+ for c in rest:
+ if c in term_chars and not inside_paren:
+ break
+ elif c == ')': # must be inside paren
+ inside_paren -= 1
+ elif c == '(':
+ inside_paren += 1
+ func_text += c
+
+ # Rename the lambda parameter to 'value' so that the resulting
+ # "description" makes more sense.
+ func_text = re.sub(r'\b{}\b'.format(param), 'value', func_text)
+
+ return func_text
+
diff --git a/wlauto/utils/__init__.py b/wlauto/utils/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/utils/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/utils/android.py b/wlauto/utils/android.py
new file mode 100644
index 00000000..4bbb0c0c
--- /dev/null
+++ b/wlauto/utils/android.py
@@ -0,0 +1,368 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Utility functions for working with Android devices through adb.
+
+"""
+# pylint: disable=E1103
+import os
+import time
+import subprocess
+import logging
+import re
+
+from wlauto.exceptions import DeviceError, ConfigError, HostError
+from wlauto.utils.misc import check_output, escape_single_quotes, escape_double_quotes, get_null
+
+
+MAX_TRIES = 5
+
+logger = logging.getLogger('android')
+
+# See:
+# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
+ANDROID_VERSION_MAP = {
+ 19: 'KITKAT',
+ 18: 'JELLY_BEAN_MR2',
+ 17: 'JELLY_BEAN_MR1',
+ 16: 'JELLY_BEAN',
+ 15: 'ICE_CREAM_SANDWICH_MR1',
+ 14: 'ICE_CREAM_SANDWICH',
+ 13: 'HONEYCOMB_MR2',
+ 12: 'HONEYCOMB_MR1',
+ 11: 'HONEYCOMB',
+ 10: 'GINGERBREAD_MR1',
+ 9: 'GINGERBREAD',
+ 8: 'FROYO',
+ 7: 'ECLAIR_MR1',
+ 6: 'ECLAIR_0_1',
+ 5: 'ECLAIR',
+ 4: 'DONUT',
+ 3: 'CUPCAKE',
+ 2: 'BASE_1_1',
+ 1: 'BASE',
+}
+
+# TODO: these are set to their actual values near the bottom of the file. There
+# is some HACKery involved to ensure that ANDROID_HOME does not need to be set
+# or adb added to path for root when installing as root, and the whole
+# implemenationt is kinda clunky and messier than I'd like. The only file that
+# rivals this one in levels of mess is bootstrap.py (for very much the same
+# reasons). There must be a neater way to ensure that enviromental dependencies
+# are met when they are needed, and are not imposed when they are not.
+android_home = None
+platform_tools = None
+adb = None
+aapt = None
+fastboot = None
+
+
+class _AndroidEnvironment(object):
+
+ def __init__(self):
+ self.android_home = None
+ self.platform_tools = None
+ self.adb = None
+ self.aapt = None
+ self.fastboot = None
+
+
+class AndroidProperties(object):
+
+ def __init__(self, text):
+ self._properties = {}
+ self.parse(text)
+
+ def parse(self, text):
+ self._properties = dict(re.findall(r'\[(.*?)\]:\s+\[(.*?)\]', text))
+
+ def __iter__(self):
+ return iter(self._properties)
+
+ def __getattr__(self, name):
+ return self._properties.get(name)
+
+ __getitem__ = __getattr__
+
+
+class ApkInfo(object):
+
+ version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
+ name_regex = re.compile(r"name='(?P<name>[^']+)'")
+
+ def __init__(self, path=None):
+ self.path = path
+ self.package = None
+ self.activity = None
+ self.label = None
+ self.version_name = None
+ self.version_code = None
+ self.parse(path)
+
+ def parse(self, apk_path):
+ _check_env()
+ command = [aapt, 'dump', 'badging', apk_path]
+ logger.debug(' '.join(command))
+ output = subprocess.check_output(command)
+ for line in output.split('\n'):
+ if line.startswith('application-label:'):
+ self.label = line.split(':')[1].strip().replace('\'', '')
+ elif line.startswith('package:'):
+ match = self.version_regex.search(line)
+ if match:
+ self.package = match.group('name')
+ self.version_code = match.group('vcode')
+ self.version_name = match.group('vname')
+ elif line.startswith('launchable-activity:'):
+ match = self.name_regex.search(line)
+ self.activity = match.group('name')
+ else:
+ pass # not interested
+
+
+def fastboot_command(command, timeout=None):
+ _check_env()
+ full_command = "fastboot {}".format(command)
+ logger.debug(full_command)
+ output, _ = check_output(full_command, timeout, shell=True)
+ return output
+
+
+def fastboot_flash_partition(partition, path_to_image):
+ command = 'flash {} {}'.format(partition, path_to_image)
+ fastboot_command(command)
+
+
+def adb_get_device():
+ """
+ Returns the serial number of a connected android device.
+
+ If there are more than one device connected to the machine, or it could not
+ find any device connected, :class:`wlauto.exceptions.ConfigError` is raised.
+ """
+ _check_env()
+ # TODO this is a hacky way to issue a adb command to all listed devices
+
+ # The output of calling adb devices consists of a heading line then
+ # a list of the devices sperated by new line
+ # The last line is a blank new line. in otherwords, if there is a device found
+ # then the output length is 2 + (1 for each device)
+ output = adb_command('0', "devices").splitlines() # pylint: disable=E1103
+ output_length = len(output)
+ if output_length == 3:
+ # output[1] is the 2nd line in the output which has the device name
+ # Splitting the line by '\t' gives a list of two indexes, which has
+ # device serial in 0 number and device type in 1.
+ return output[1].split('\t')[0]
+ elif output_length > 3:
+ raise ConfigError('Number of discovered devices is {}, it should be 1'.format(output_length - 2))
+ else:
+ raise ConfigError('No device is connected and available')
+
+
+def adb_connect(device, timeout=None):
+ _check_env()
+ command = "adb connect " + device
+ if ":5555" in device:
+ logger.debug(command)
+
+ output, _ = check_output(command, shell=True, timeout=timeout)
+ logger.debug(output)
+ #### due to a rare adb bug sometimes an extra :5555 is appended to the IP address
+ if output.find('5555:5555') != -1:
+ logger.debug('ADB BUG with extra 5555')
+ command = "adb connect " + device.replace(':5555', '')
+
+ tries = 0
+ while not poll_for_file(device, "/proc/cpuinfo"):
+ logger.debug("adb connect failed, retrying now...")
+ tries += 1
+ if tries > MAX_TRIES:
+ raise DeviceError('Cannot connect to adb server on the device.')
+ logger.debug(command)
+ output, _ = check_output(command, shell=True, timeout=timeout)
+ time.sleep(10)
+
+ if output.find('connected to') == -1:
+ raise DeviceError('Could not connect to {}'.format(device))
+
+
+def adb_disconnect(device):
+ _check_env()
+ if ":5555" in device:
+ command = "adb disconnect " + device
+ logger.debug(command)
+ retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
+ if retval:
+ raise DeviceError('"{}" returned {}'.format(command, retval))
+
+
+def poll_for_file(device, dfile):
+ _check_env()
+ device_string = '-s {}'.format(device) if device else ''
+ command = "adb " + device_string + " shell \" if [ -f " + dfile + " ] ; then true ; else false ; fi\" "
+ logger.debug(command)
+ result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
+ if not result:
+ return True
+ else:
+ return False
+
+
+am_start_error = re.compile(r"Error: Activity class {[\w|.|/]*} does not exist")
+
+
+def adb_shell(device, command, timeout=None, check_exit_code=False, as_root=False): # NOQA
+ _check_env()
+ if as_root:
+ command = 'echo "{}" | su'.format(escape_double_quotes(command))
+ device_string = '-s {}'.format(device) if device else ''
+ full_command = 'adb {} shell "{}"'.format(device_string, escape_double_quotes(command))
+ logger.debug(full_command)
+ if check_exit_code:
+ actual_command = "adb {} shell '({}); echo $?'".format(device_string, escape_single_quotes(command))
+ raw_output, error = check_output(actual_command, timeout, shell=True)
+ if raw_output:
+ try:
+ output, exit_code, _ = raw_output.rsplit('\n', 2)
+ except ValueError:
+ exit_code, _ = raw_output.rsplit('\n', 1)
+ output = ''
+ else: # raw_output is empty
+ exit_code = '969696' # just because
+ output = ''
+
+ exit_code = exit_code.strip()
+ if exit_code.isdigit():
+ if int(exit_code):
+ message = 'Got exit code {}\nfrom: {}\nSTDOUT: {}\nSTDERR: {}'.format(exit_code, full_command,
+ output, error)
+ raise DeviceError(message)
+ elif am_start_error.findall(output):
+ message = 'Could not start activity; got the following:'
+ message += '\n{}'.format(am_start_error.findall(output)[0])
+ raise DeviceError(message)
+ else: # not all digits
+ if am_start_error.findall(output):
+ message = 'Could not start activity; got the following:'
+ message += '\n{}'.format(am_start_error.findall(output)[0])
+ raise DeviceError(message)
+ else:
+ raise DeviceError('adb has returned early; did not get an exit code. Was kill-server invoked?')
+ else: # do not check exit code
+ output, _ = check_output(full_command, timeout, shell=True)
+ return output
+
+
+def adb_background_shell(device, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
+ """Runs the sepcified command in a subprocess, returning the the Popen object."""
+ _check_env()
+ if as_root:
+ command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
+ device_string = '-s {}'.format(device) if device else ''
+ full_command = 'adb {} shell "{}"'.format(device_string, escape_double_quotes(command))
+ logger.debug(full_command)
+ return subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
+
+
+class AdbDevice(object):
+
+ def __init__(self, name, status):
+ self.name = name
+ self.status = status
+
+ def __cmp__(self, other):
+ if isinstance(other, AdbDevice):
+ return cmp(self.name, other.name)
+ else:
+ return cmp(self.name, other)
+
+
+def adb_list_devices():
+ _check_env()
+ output = adb_command(None, 'devices')
+ devices = []
+ for line in output.splitlines():
+ parts = [p.strip() for p in line.split()]
+ if len(parts) == 2:
+ devices.append(AdbDevice(*parts))
+ return devices
+
+
+def adb_command(device, command, timeout=None):
+ _check_env()
+ device_string = '-s {}'.format(device) if device else ''
+ full_command = "adb {} {}".format(device_string, command)
+ logger.debug(full_command)
+ output, _ = check_output(full_command, timeout, shell=True)
+ return output
+
+
+# Messy environment initialisation stuff...
+
+
+def _initialize_with_android_home(env):
+ logger.debug('Using ANDROID_HOME from the environment.')
+ env.android_home = android_home
+ env.platform_tools = os.path.join(android_home, 'platform-tools')
+ os.environ['PATH'] += os.pathsep + env.platform_tools
+ _init_common(env)
+ return env
+
+
+def _initialize_without_android_home(env):
+ if os.name == 'nt':
+ raise HostError('Please set ANDROID_HOME to point to the location of the Android SDK.')
+ # Assuming Unix in what follows.
+ if subprocess.call('adb version >{}'.format(get_null()), shell=True):
+ raise HostError('ANDROID_HOME is not set and adb is not in PATH. Have you installed Android SDK?')
+ logger.debug('Discovering ANDROID_HOME from adb path.')
+ env.platform_tools = os.path.dirname(subprocess.check_output('which adb', shell=True))
+ env.android_home = os.path.dirname(env.platform_tools)
+ _init_common(env)
+ return env
+
+
+def _init_common(env):
+ logger.debug('ANDROID_HOME: {}'.format(env.android_home))
+ build_tools_directory = os.path.join(env.android_home, 'build-tools')
+ if not os.path.isdir(build_tools_directory):
+ msg = 'ANDROID_HOME ({}) does not appear to have valid Android SKD install (cannot find build-tools)'
+ raise HostError(msg.format(env.android_home))
+ versions = os.listdir(build_tools_directory)
+ for version in reversed(sorted(versions)):
+ aapt_path = os.path.join(build_tools_directory, version, 'aapt')
+ if os.path.isfile(aapt_path):
+ logger.debug('Using aapt for version {}'.format(version))
+ env.aapt = aapt_path
+ break
+ else:
+ raise HostError('aapt not found. Please make sure at least one Android platform is installed.')
+
+
+def _check_env():
+ global android_home, platform_tools, adb, aapt # pylint: disable=W0603
+ if not android_home:
+ android_home = os.getenv('ANDROID_HOME')
+ if android_home:
+ _env = _initialize_with_android_home(_AndroidEnvironment())
+ else:
+ _env = _initialize_without_android_home(_AndroidEnvironment())
+ android_home = _env.android_home
+ platform_tools = _env.platform_tools
+ adb = _env.adb
+ aapt = _env.aapt
diff --git a/wlauto/utils/cli.py b/wlauto/utils/cli.py
new file mode 100644
index 00000000..1339201e
--- /dev/null
+++ b/wlauto/utils/cli.py
@@ -0,0 +1,27 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from wlauto.core.version import get_wa_version
+
+
+def init_argument_parser(parser):
+ parser.add_argument('-c', '--config', help='specify an additional config.py')
+ parser.add_argument('-v', '--verbose', action='count',
+ help='The scripts will produce verbose output.')
+ parser.add_argument('--debug', action='store_true',
+ help='Enable debug mode. Note: this implies --verbose.')
+ parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
+ return parser
+
diff --git a/wlauto/utils/cpuinfo.py b/wlauto/utils/cpuinfo.py
new file mode 100644
index 00000000..0bfc4863
--- /dev/null
+++ b/wlauto/utils/cpuinfo.py
@@ -0,0 +1,44 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+class Cpuinfo(object):
+
+ @property
+ def architecture(self):
+ for section in self.sections:
+ if 'CPU architecture' in section:
+ return section['CPU architecture']
+ if 'architecture' in section:
+ return section['architecture']
+
+ def __init__(self, text):
+ self.sections = None
+ self.text = None
+ self.parse(text)
+
+ def parse(self, text):
+ self.sections = []
+ current_section = {}
+ self.text = text.strip()
+ for line in self.text.split('\n'):
+ line = line.strip()
+ if line:
+ key, value = line.split(':', 1)
+ current_section[key.strip()] = value.strip()
+ else: # not line
+ self.sections.append(current_section)
+ current_section = {}
+ self.sections.append(current_section)
diff --git a/wlauto/utils/doc.py b/wlauto/utils/doc.py
new file mode 100644
index 00000000..b749dff7
--- /dev/null
+++ b/wlauto/utils/doc.py
@@ -0,0 +1,305 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Utilities for working with and formatting documentation.
+
+"""
+import os
+import re
+import inspect
+from itertools import cycle
+
+USER_HOME = os.path.expanduser('~')
+
+BULLET_CHARS = '-*'
+
+
+def get_summary(aclass):
+ """
+ Returns the summary description for an extension class. The summary is the
+ first paragraph (separated by blank line) of the description taken either from
+ the ``descripton`` attribute of the class, or if that is not present, from the
+ class' docstring.
+
+ """
+ return get_description(aclass).split('\n\n')[0]
+
+
+def get_description(aclass):
+ """
+ Return the description of the specified extension class. The description is taken
+ either from ``description`` attribute of the class or its docstring.
+
+ """
+ if hasattr(aclass, 'description') and aclass.description:
+ return inspect.cleandoc(aclass.description)
+ if aclass.__doc__:
+ return inspect.getdoc(aclass)
+ else:
+ return 'no documentation found for {}'.format(aclass.__name__)
+
+
+def get_type_name(obj):
+ """Returns the name of the type object or function specified. In case of a lambda,
+ the definiition is returned with the parameter replaced by "value"."""
+ match = re.search(r"<(type|class|function) '?(.*?)'?>", str(obj))
+ if isinstance(obj, tuple):
+ name = obj[1]
+ elif match.group(1) == 'function':
+ text = str(obj)
+ name = text.split()[1]
+ if name == '<lambda>':
+ source = inspect.getsource(obj).strip().replace('\n', ' ')
+ match = re.search(r'lambda\s+(\w+)\s*:\s*(.*?)\s*[\n,]', source)
+ if not match:
+ raise ValueError('could not get name for {}'.format(obj))
+ name = match.group(2).replace(match.group(1), 'value')
+ else:
+ name = match.group(2)
+ if '.' in name:
+ name = name.split('.')[-1]
+ return name
+
+
+def count_leading_spaces(text):
+ """
+ Counts the number of leading space characters in a string.
+
+ TODO: may need to update this to handle whitespace, but shouldn't
+ be necessary as there should be no tabs in Python source.
+
+ """
+ nspaces = 0
+ for c in text:
+ if c == ' ':
+ nspaces += 1
+ else:
+ break
+ return nspaces
+
+
+def format_column(text, width):
+ """
+ Formats text into a column of specified width. If a line is too long,
+ it will be broken on a word boundary. The new lines will have the same
+ number of leading spaces as the original line.
+
+ Note: this will not attempt to join up lines that are too short.
+
+ """
+ formatted = []
+ for line in text.split('\n'):
+ line_len = len(line)
+ if line_len <= width:
+ formatted.append(line)
+ else:
+ words = line.split(' ')
+ new_line = words.pop(0)
+ while words:
+ next_word = words.pop(0)
+ if (len(new_line) + len(next_word) + 1) < width:
+ new_line += ' ' + next_word
+ else:
+ formatted.append(new_line)
+ new_line = ' ' * count_leading_spaces(new_line) + next_word
+ formatted.append(new_line)
+ return '\n'.join(formatted)
+
+
+def format_bullets(text, width, char='-', shift=3, outchar=None):
+ """
+ Formats text into bulleted list. Assumes each line of input that starts with
+ ``char`` (possibly preceeded with whitespace) is a new bullet point. Note: leading
+ whitespace in the input will *not* be preserved. Instead, it will be determined by
+ ``shift`` parameter.
+
+ :text: the text to be formated
+ :width: format width (note: must be at least ``shift`` + 4).
+ :char: character that indicates a new bullet point in the input text.
+ :shift: How far bulleted entries will be indented. This indicates the indentation
+ level of the bullet point. Text indentation level will be ``shift`` + 3.
+ :outchar: character that will be used to mark bullet points in the output. If
+ left as ``None``, ``char`` will be used.
+
+ """
+ bullet_lines = []
+ output = ''
+
+ def __process_bullet(bullet_lines):
+ if bullet_lines:
+ bullet = format_paragraph(indent(' '.join(bullet_lines), shift + 2), width)
+ bullet = bullet[:3] + outchar + bullet[4:]
+ del bullet_lines[:]
+ return bullet + '\n'
+ else:
+ return ''
+
+ if outchar is None:
+ outchar = char
+ for line in text.split('\n'):
+ line = line.strip()
+ if line.startswith(char): # new bullet
+ output += __process_bullet(bullet_lines)
+ line = line[1:].strip()
+ bullet_lines.append(line)
+ output += __process_bullet(bullet_lines)
+ return output
+
+
+def format_simple_table(rows, headers=None, align='>', show_borders=True, borderchar='='): # pylint: disable=R0914
+ """Formats a simple table."""
+ if not rows:
+ return ''
+ rows = [map(str, r) for r in rows]
+ num_cols = len(rows[0])
+
+ # cycle specified alignments until we have num_cols of them. This is
+ # consitent with how such cases are handled in R, pandas, etc.
+ it = cycle(align)
+ align = [it.next() for _ in xrange(num_cols)]
+
+ cols = zip(*rows)
+ col_widths = [max(map(len, c)) for c in cols]
+ row_format = ' '.join(['{:%s%s}' % (align[i], w) for i, w in enumerate(col_widths)])
+ row_format += '\n'
+
+ border = row_format.format(*[borderchar * cw for cw in col_widths])
+
+ result = border if show_borders else ''
+ if headers:
+ result += row_format.format(*headers)
+ result += border
+ for row in rows:
+ result += row_format.format(*row)
+ if show_borders:
+ result += border
+ return result
+
+
+def format_paragraph(text, width):
+ """
+ Format the specified text into a column of specified with. The text is
+ assumed to be a single paragraph and existing line breaks will not be preserved.
+ Leading spaces (of the initial line), on the other hand, will be preserved.
+
+ """
+ text = re.sub('\n\n*\\s*', ' ', text.strip('\n'))
+ return format_column(text, width)
+
+
+def format_body(text, width):
+ """
+ Format the specified text into a column of specified width. The text is
+ assumed to be a "body" of one or more paragraphs separated by one or more
+ blank lines. The initial indentation of the first line of each paragraph
+ will be presevered, but any other formatting may be clobbered.
+
+ """
+ text = re.sub('\n\\s*\n', '\n\n', text.strip('\n')) # get rid of all-whitespace lines
+ paragraphs = re.split('\n\n+', text)
+ formatted_paragraphs = []
+ for p in paragraphs:
+ if p.strip() and p.strip()[0] in BULLET_CHARS:
+ formatted_paragraphs.append(format_bullets(p, width))
+ else:
+ formatted_paragraphs.append(format_paragraph(p, width))
+ return '\n\n'.join(formatted_paragraphs)
+
+
+def strip_inlined_text(text):
+ """
+ This function processes multiline inlined text (e.g. form docstrings)
+ to strip away leading spaces and leading and trailing new lines.
+
+ """
+ text = text.strip('\n')
+ lines = [ln.rstrip() for ln in text.split('\n')]
+
+ # first line is special as it may not have the indet that follows the
+ # others, e.g. if it starts on the same as the multiline quote (""").
+ nspaces = count_leading_spaces(lines[0])
+
+ if len([ln for ln in lines if ln]) > 1:
+ to_strip = min(count_leading_spaces(ln) for ln in lines[1:] if ln)
+ if nspaces >= to_strip:
+ stripped = [lines[0][to_strip:]]
+ else:
+ stripped = [lines[0][nspaces:]]
+ stripped += [ln[to_strip:] for ln in lines[1:]]
+ else:
+ stripped = [lines[0][nspaces:]]
+ return '\n'.join(stripped).strip('\n')
+
+
+def indent(text, spaces=4):
+ """Indent the lines i the specified text by ``spaces`` spaces."""
+ indented = []
+ for line in text.split('\n'):
+ if line:
+ indented.append(' ' * spaces + line)
+ else: # do not indent emtpy lines
+ indented.append(line)
+ return '\n'.join(indented)
+
+
+def format_literal(lit):
+ if isinstance(lit, basestring):
+ return '``\'{}\'``'.format(lit)
+ elif hasattr(lit, 'pattern'): # regex
+ return '``r\'{}\'``'.format(lit.pattern)
+ else:
+ return '``{}``'.format(lit)
+
+
+def get_params_rst(ext):
+ text = ''
+ for param in ext.parameters:
+ text += '{} : {} {}\n'.format(param.name, get_type_name(param.kind),
+ param.mandatory and '(mandatory)' or ' ')
+ desc = strip_inlined_text(param.description or '')
+ text += indent('{}\n'.format(desc))
+ if param.allowed_values:
+ text += indent('\nallowed values: {}\n'.format(', '.join(map(format_literal, param.allowed_values))))
+ elif param.constraint:
+ text += indent('\nconstraint: ``{}``\n'.format(get_type_name(param.constraint)))
+ if param.default:
+ value = param.default
+ if isinstance(value, basestring) and value.startswith(USER_HOME):
+ value = value.replace(USER_HOME, '~')
+ text += indent('\ndefault: {}\n'.format(format_literal(value)))
+ text += '\n'
+ return text
+
+
+def underline(text, symbol='='):
+ return '{}\n{}\n\n'.format(text, symbol * len(text))
+
+
+def get_rst_from_extension(ext):
+ text = underline(ext.name, '-')
+ if hasattr(ext, 'description'):
+ desc = strip_inlined_text(ext.description or '')
+ elif ext.__doc__:
+ desc = strip_inlined_text(ext.__doc__)
+ else:
+ desc = ''
+ text += desc + '\n\n'
+ params_rst = get_params_rst(ext)
+ if params_rst:
+ text += underline('parameters', '~') + params_rst
+ return text + '\n'
+
diff --git a/wlauto/utils/formatter.py b/wlauto/utils/formatter.py
new file mode 100644
index 00000000..90e1105b
--- /dev/null
+++ b/wlauto/utils/formatter.py
@@ -0,0 +1,148 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+
+
+INDENTATION_FROM_TITLE = 4
+
+
+class TextFormatter(object):
+
+ """
+ This is a base class for text formatting. It mainly ask to implement two
+ methods which are add_item and format_data. The formar will add new text to
+ the formatter, whereas the latter will return a formatted text. The name
+ attribute represents the name of the foramtter.
+ """
+
+ name = None
+ data = None
+
+ def __init__(self):
+ pass
+
+ def add_item(self, new_data, item_title):
+ """
+ Add new item to the text formatter.
+
+ :param new_data: The data to be added
+ :param item_title: A title for the added data
+ """
+ raise NotImplementedError()
+
+ def format_data(self):
+ """
+ It returns a formatted text
+ """
+ raise NotImplementedError()
+
+
+class DescriptionListFormatter(TextFormatter):
+
+ name = 'description_list_formatter'
+ data = None
+
+ def get_text_width(self):
+ if not self._text_width:
+ _, width = os.popen('stty size', 'r').read().split()
+ self._text_width = int(width)
+ return self._text_width
+
+ def set_text_width(self, value):
+ self._text_width = value
+
+ text_width = property(get_text_width, set_text_width)
+
+ def __init__(self, title=None, width=None):
+ super(DescriptionListFormatter, self).__init__()
+ self.data_title = title
+ self._text_width = width
+ self.longest_word_length = 0
+ self.data = []
+
+ def add_item(self, new_data, item_title):
+ if len(item_title) > self.longest_word_length:
+ self.longest_word_length = len(item_title)
+ self.data[len(self.data):] = [(item_title, self._remove_newlines(new_data))]
+
+ def format_data(self):
+ parag_indentation = self.longest_word_length + INDENTATION_FROM_TITLE
+ string_formatter = '{}:<{}{} {}'.format('{', parag_indentation, '}', '{}')
+
+ formatted_data = ''
+ if self.data_title:
+ formatted_data += self.data_title
+
+ line_width = self.text_width - parag_indentation
+ for title, paragraph in self.data:
+ if paragraph:
+ formatted_data += '\n'
+ title_len = self.longest_word_length - len(title)
+ title += ':'
+ if title_len > 0:
+ title = (' ' * title_len) + title
+
+ parag_lines = self._break_lines(paragraph, line_width).splitlines()
+ if parag_lines:
+ formatted_data += string_formatter.format(title, parag_lines[0])
+ for line in parag_lines[1:]:
+ formatted_data += '\n' + string_formatter.format('', line)
+
+ self.text_width = None
+ return formatted_data
+
+ # Return text's paragraphs sperated in a list, such that each index in the
+ # list is a single text paragraph with no new lines
+ def _remove_newlines(self, new_data): # pylint: disable=R0201
+ parag_list = ['']
+ parag_num = 0
+ prv_parag = None
+ # For each paragraph sperated by a new line
+ for paragraph in new_data.splitlines():
+ if paragraph:
+ parag_list[parag_num] += ' ' + paragraph
+ # if the previous line is NOT empty, then add new empty index for
+ # the next paragraph
+ elif prv_parag:
+ parag_num = 1
+ parag_list.append('')
+ prv_parag = paragraph
+
+ # sometimes, we end up with an empty string as the last item so we reomve it
+ if not parag_list[-1]:
+ return parag_list[:-1]
+ return parag_list
+
+ def _break_lines(self, parag_list, line_width): # pylint: disable=R0201
+ formatted_paragraphs = []
+ for para in parag_list:
+ words = para.split()
+ if words:
+ formatted_text = words.pop(0)
+ current_width = len(formatted_text)
+ # for each word in the paragraph, line width is an accumlation of
+ # word length + 1 (1 is for the space after each word).
+ for word in words:
+ word = word.strip()
+ if current_width + len(word) + 1 >= line_width:
+ formatted_text += '\n' + word
+ current_width = len(word)
+ else:
+ formatted_text += ' ' + word
+ current_width += len(word) + 1
+ formatted_paragraphs.append(formatted_text)
+ return '\n\n'.join(formatted_paragraphs)
diff --git a/wlauto/utils/hwmon.py b/wlauto/utils/hwmon.py
new file mode 100644
index 00000000..90998ab3
--- /dev/null
+++ b/wlauto/utils/hwmon.py
@@ -0,0 +1,77 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto.exceptions import DeviceError
+
+
+HWMON_ROOT = '/sys/class/hwmon'
+
+
+class HwmonSensor(object):
+
+ def __init__(self, device, kind, label, filepath):
+ self.device = device
+ self.kind = kind
+ self.label = label
+ self.filepath = filepath
+ self.readings = []
+
+ def take_reading(self):
+ reading = self.device.get_sysfile_value(self.filepath, int)
+ self.readings.append(reading)
+
+ def clear_readings(self):
+ self.readings = []
+
+
+def discover_sensors(device, sensor_kinds):
+ """
+ Discovers HWMON sensors available on the device.
+
+ :device: Device on which to discover HWMON sensors. Must be an instance
+ of :class:`AndroidDevice`.
+ :sensor_kinds: A list of names of sensor types to be discovered. The names
+ must be as they appear prefixed to ``*_input`` files in
+ sysfs. E.g. ``'energy'``.
+
+ :returns: A list of :class:`HwmonSensor` instantces for each found sensor. If
+ no sensors of the specified types were discovered, an empty list
+ will be returned.
+
+ """
+ hwmon_devices = device.listdir(HWMON_ROOT)
+ path = device.path
+ sensors = []
+ for hwmon_device in hwmon_devices:
+ try:
+ device_path = path.join(HWMON_ROOT, hwmon_device, 'device')
+ name = device.get_sysfile_value(path.join(device_path, 'name'))
+ except DeviceError: # probably a virtual device
+ device_path = path.join(HWMON_ROOT, hwmon_device)
+ name = device.get_sysfile_value(path.join(device_path, 'name'))
+
+ for sensor_kind in sensor_kinds:
+ i = 1
+ input_path = path.join(device_path, '{}{}_input'.format(sensor_kind, i))
+ while device.file_exists(input_path):
+ label_path = path.join(device_path, '{}{}_label'.format(sensor_kind, i))
+ if device.file_exists(label_path):
+ name += ' ' + device.get_sysfile_value(label_path)
+ sensors.append(HwmonSensor(device, sensor_kind, name, input_path))
+ i += 1
+ input_path = path.join(device_path, '{}{}_input'.format(sensor_kind, i))
+ return sensors
+
diff --git a/wlauto/utils/log.py b/wlauto/utils/log.py
new file mode 100644
index 00000000..a0fa1374
--- /dev/null
+++ b/wlauto/utils/log.py
@@ -0,0 +1,223 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E1101
+import logging
+import string
+import threading
+
+import colorama
+
+from wlauto.core.bootstrap import settings
+import wlauto.core.signal as signal
+
+
+COLOR_MAP = {
+ logging.DEBUG: colorama.Fore.BLUE,
+ logging.INFO: colorama.Fore.GREEN,
+ logging.WARNING: colorama.Fore.YELLOW,
+ logging.ERROR: colorama.Fore.RED,
+ logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED,
+}
+
+RESET_COLOR = colorama.Style.RESET_ALL
+
+
+def init_logging(verbosity):
+ root_logger = logging.getLogger()
+ root_logger.setLevel(logging.DEBUG)
+
+ error_handler = ErrorSignalHandler(logging.DEBUG)
+ root_logger.addHandler(error_handler)
+
+ console_handler = logging.StreamHandler()
+ if verbosity == 1:
+ console_handler.setLevel(logging.DEBUG)
+ if 'colour_enabled' in settings.logging and not settings.logging['colour_enabled']:
+ console_handler.setFormatter(LineFormatter(settings.logging['verbose_format']))
+ else:
+ console_handler.setFormatter(ColorFormatter(settings.logging['verbose_format']))
+ else:
+ console_handler.setLevel(logging.INFO)
+ if 'colour_enabled' in settings.logging and not settings.logging['colour_enabled']:
+ console_handler.setFormatter(LineFormatter(settings.logging['regular_format']))
+ else:
+ console_handler.setFormatter(ColorFormatter(settings.logging['regular_format']))
+ root_logger.addHandler(console_handler)
+
+ logging.basicConfig(level=logging.DEBUG)
+
+
+def add_log_file(filepath, level=logging.DEBUG):
+ root_logger = logging.getLogger()
+ file_handler = logging.FileHandler(filepath)
+ file_handler.setLevel(level)
+ file_handler.setFormatter(LineFormatter(settings.logging['file_format']))
+ root_logger.addHandler(file_handler)
+
+
+class ErrorSignalHandler(logging.Handler):
+ """
+ Emits signals for ERROR and WARNING level traces.
+
+ """
+
+ def emit(self, record):
+ if record.levelno == logging.ERROR:
+ signal.send(signal.ERROR_LOGGED, self)
+ elif record.levelno == logging.WARNING:
+ signal.send(signal.WARNING_LOGGED, self)
+
+
+class ColorFormatter(logging.Formatter):
+ """
+ Formats logging records with color and prepends record info
+ to each line of the message.
+
+ BLUE for DEBUG logging level
+ GREEN for INFO logging level
+ YELLOW for WARNING logging level
+ RED for ERROR logging level
+ BOLD RED for CRITICAL logging level
+
+ """
+
+ def __init__(self, fmt=None, datefmt=None):
+ super(ColorFormatter, self).__init__(fmt, datefmt)
+ template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}')
+ template_text = '${color}' + template_text + RESET_COLOR
+ self.fmt_template = string.Template(template_text)
+
+ def format(self, record):
+ self._set_color(COLOR_MAP[record.levelno])
+
+ record.message = record.getMessage()
+ if self.usesTime():
+ record.asctime = self.formatTime(record, self.datefmt)
+
+ d = record.__dict__
+ parts = []
+ for line in record.message.split('\n'):
+ d.update({'message': line.strip('\r')})
+ parts.append(self._fmt % d)
+
+ return '\n'.join(parts)
+
+ def _set_color(self, color):
+ self._fmt = self.fmt_template.substitute(color=color)
+
+
+class LineFormatter(logging.Formatter):
+ """
+ Logs each line of the message separately.
+
+ """
+
+ def __init__(self, fmt=None, datefmt=None):
+ super(LineFormatter, self).__init__(fmt, datefmt)
+
+ def format(self, record):
+ record.message = record.getMessage()
+ if self.usesTime():
+ record.asctime = self.formatTime(record, self.datefmt)
+
+ d = record.__dict__
+ parts = []
+ for line in record.message.split('\n'):
+ d.update({'message': line.strip('\r')})
+ parts.append(self._fmt % d)
+
+ return '\n'.join(parts)
+
+
+class BaseLogWriter(object):
+
+ def __init__(self, name, level=logging.DEBUG):
+ """
+ File-like object class designed to be used for logging from streams
+ Each complete line (terminated by new line character) gets logged
+ at DEBUG level. In complete lines are buffered until the next new line.
+
+ :param name: The name of the logger that will be used.
+
+ """
+ self.logger = logging.getLogger(name)
+ self.buffer = ''
+ if level == logging.DEBUG:
+ self.do_write = self.logger.debug
+ elif level == logging.INFO:
+ self.do_write = self.logger.info
+ elif level == logging.WARNING:
+ self.do_write = self.logger.warning
+ elif level == logging.ERROR:
+ self.do_write = self.logger.error
+ else:
+ raise Exception('Unknown logging level: {}'.format(level))
+
+ def flush(self):
+ # Defined to match the interface expected by pexpect.
+ return self
+
+ def close(self):
+ if self.buffer:
+ self.logger.debug(self.buffer)
+ self.buffer = ''
+ return self
+
+ def __del__(self):
+ # Ensure we don't lose bufferd output
+ self.close()
+
+
+class LogWriter(BaseLogWriter):
+
+ def write(self, data):
+ data = data.replace('\r\n', '\n').replace('\r', '\n')
+ if '\n' in data:
+ parts = data.split('\n')
+ parts[0] = self.buffer + parts[0]
+ for part in parts[:-1]:
+ self.do_write(part)
+ self.buffer = parts[-1]
+ else:
+ self.buffer += data
+ return self
+
+
+class LineLogWriter(BaseLogWriter):
+
+ def write(self, data):
+ self.do_write(data)
+
+
+class StreamLogger(threading.Thread):
+ """
+ Logs output from a stream in a thread.
+
+ """
+
+ def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter):
+ super(StreamLogger, self).__init__()
+ self.writer = klass(name, level)
+ self.stream = stream
+ self.daemon = True
+
+ def run(self):
+ line = self.stream.readline()
+ while line:
+ self.writer.write(line.rstrip('\n'))
+ line = self.stream.readline()
+ self.writer.close()
diff --git a/wlauto/utils/misc.py b/wlauto/utils/misc.py
new file mode 100644
index 00000000..279667ca
--- /dev/null
+++ b/wlauto/utils/misc.py
@@ -0,0 +1,703 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Miscellaneous functions that don't fit anywhere else.
+
+"""
+from __future__ import division
+import os
+import sys
+import re
+import math
+import imp
+import string
+import threading
+import signal
+import subprocess
+import pkgutil
+import traceback
+import logging
+import random
+from datetime import datetime, timedelta
+from operator import mul
+from StringIO import StringIO
+from itertools import cycle
+from functools import partial
+from distutils.spawn import find_executable
+
+import yaml
+from dateutil import tz
+
+
+def preexec_function():
+ # Ignore the SIGINT signal by setting the handler to the standard
+ # signal handler SIG_IGN.
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ # Change process group in case we have to kill the subprocess and all of
+ # its children later.
+ # TODO: this is Unix-specific; would be good to find an OS-agnostic way
+ # to do this in case we wanna port WA to Windows.
+ os.setpgrp()
+
+
+check_output_logger = logging.getLogger('check_output')
+
+
+# Defined here rather than in wlauto.exceptions due to module load dependencies
+class TimeoutError(Exception):
+ """Raised when a subprocess command times out. This is basically a ``WAError``-derived version
+ of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
+ programming error (e.g. not setting long enough timers), it is often due to some failure in the
+ environment, and there fore should be classed as a "user error"."""
+
+ def __init__(self, command, output):
+ super(TimeoutError, self).__init__('Timed out: {}'.format(command))
+ self.command = command
+ self.output = output
+
+ def __str__(self):
+ return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
+
+
+def check_output(command, timeout=None, **kwargs):
+ """This is a version of subprocess.check_output that adds a timeout parameter to kill
+ the subprocess if it does not return within the specified time."""
+
+ def callback(pid):
+ try:
+ check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
+ os.killpg(pid, signal.SIGKILL)
+ except OSError:
+ pass # process may have already terminated.
+
+ if 'stdout' in kwargs:
+ raise ValueError('stdout argument not allowed, it will be overridden.')
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ preexec_fn=preexec_function, **kwargs)
+
+ if timeout:
+ timer = threading.Timer(timeout, callback, [process.pid, ])
+ timer.start()
+
+ try:
+ output, error = process.communicate()
+ finally:
+ if timeout:
+ timer.cancel()
+
+ retcode = process.poll()
+ if retcode:
+ if retcode == -9: # killed, assume due to timeout callback
+ raise TimeoutError(command, output='\n'.join([output, error]))
+ else:
+ raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output, error]))
+ return output, error
+
+
+def walk_modules(path):
+ """
+ Given package name, return a list of all modules (including submodules, etc)
+ in that package.
+
+ """
+ root_mod = __import__(path, {}, {}, [''])
+ mods = [root_mod]
+ for _, name, ispkg in pkgutil.iter_modules(root_mod.__path__):
+ submod_path = '.'.join([path, name])
+ if ispkg:
+ mods.extend(walk_modules(submod_path))
+ else:
+ submod = __import__(submod_path, {}, {}, [''])
+ mods.append(submod)
+ return mods
+
+
+def ensure_directory_exists(dirpath):
+ """A filter for directory paths to ensure they exist."""
+ if not os.path.isdir(dirpath):
+ os.makedirs(dirpath)
+ return dirpath
+
+
+def ensure_file_directory_exists(filepath):
+ """
+ A filter for file paths to ensure the directory of the
+ file exists and the file can be created there. The file
+ itself is *not* going to be created if it doesn't already
+ exist.
+
+ """
+ ensure_directory_exists(os.path.dirname(filepath))
+ return filepath
+
+
+def diff_tokens(before_token, after_token):
+ """
+ Creates a diff of two tokens.
+
+ If the two tokens are the same it just returns returns the token
+ (whitespace tokens are considered the same irrespective of type/number
+ of whitespace characters in the token).
+
+ If the tokens are numeric, the difference between the two values
+ is returned.
+
+ Otherwise, a string in the form [before -> after] is returned.
+
+ """
+ if before_token.isspace() and after_token.isspace():
+ return after_token
+ elif before_token.isdigit() and after_token.isdigit():
+ try:
+ diff = int(after_token) - int(before_token)
+ return str(diff)
+ except ValueError:
+ return "[%s -> %s]" % (before_token, after_token)
+ elif before_token == after_token:
+ return after_token
+ else:
+ return "[%s -> %s]" % (before_token, after_token)
+
+
+def prepare_table_rows(rows):
+ """Given a list of lists, make sure they are prepared to be formatted into a table
+ by making sure each row has the same number of columns and stringifying all values."""
+ rows = [map(str, r) for r in rows]
+ max_cols = max(map(len, rows))
+ for row in rows:
+ pad = max_cols - len(row)
+ for _ in xrange(pad):
+ row.append('')
+ return rows
+
+
+def write_table(rows, wfh, align='>', headers=None): # pylint: disable=R0914
+ """Write a column-aligned table to the specified file object."""
+ if not rows:
+ return
+ rows = prepare_table_rows(rows)
+ num_cols = len(rows[0])
+
+ # cycle specified alignments until we have max_cols of them. This is
+ # consitent with how such cases are handled in R, pandas, etc.
+ it = cycle(align)
+ align = [it.next() for _ in xrange(num_cols)]
+
+ cols = zip(*rows)
+ col_widths = [max(map(len, c)) for c in cols]
+ row_format = ' '.join(['{:%s%s}' % (align[i], w) for i, w in enumerate(col_widths)])
+ row_format += '\n'
+
+ if headers:
+ wfh.write(row_format.format(*headers))
+ underlines = ['-' * len(h) for h in headers]
+ wfh.write(row_format.format(*underlines))
+
+ for row in rows:
+ wfh.write(row_format.format(*row))
+
+
+def get_null():
+ """Returns the correct null sink based on the OS."""
+ return 'NUL' if os.name == 'nt' else '/dev/null'
+
+
+def get_traceback(exc=None):
+ """
+ Returns the string with the traceback for the specifiec exc
+ object, or for the current exception exc is not specified.
+
+ """
+ if exc is None:
+ exc = sys.exc_info()
+ if not exc:
+ return None
+ tb = exc[2]
+ sio = StringIO()
+ traceback.print_tb(tb, file=sio)
+ del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info
+ return sio.getvalue()
+
+
+def merge_dicts(*args, **kwargs):
+ if not len(args) >= 2:
+ raise ValueError('Must specify at least two dicts to merge.')
+ func = partial(_merge_two_dicts, **kwargs)
+ return reduce(func, args)
+
+
+def _merge_two_dicts(base, other, list_duplicates='all', match_types=False, # pylint: disable=R0912,R0914
+ dict_type=dict, should_normalize=True, should_merge_lists=True):
+ """Merge dicts normalizing their keys."""
+ merged = dict_type()
+ base_keys = base.keys()
+ other_keys = other.keys()
+ norm = normalize if should_normalize else lambda x, y: x
+
+ base_only = []
+ other_only = []
+ both = []
+ union = []
+ for k in base_keys:
+ if k in other_keys:
+ both.append(k)
+ else:
+ base_only.append(k)
+ union.append(k)
+ for k in other_keys:
+ if k in base_keys:
+ union.append(k)
+ else:
+ union.append(k)
+ other_only.append(k)
+
+ for k in union:
+ if k in base_only:
+ merged[k] = norm(base[k], dict_type)
+ elif k in other_only:
+ merged[k] = norm(other[k], dict_type)
+ elif k in both:
+ base_value = base[k]
+ other_value = other[k]
+ base_type = type(base_value)
+ other_type = type(other_value)
+ if (match_types and (base_type != other_type) and
+ (base_value is not None) and (other_value is not None)):
+ raise ValueError('Type mismatch for {} got {} ({}) and {} ({})'.format(k, base_value, base_type,
+ other_value, other_type))
+ if isinstance(base_value, dict):
+ merged[k] = _merge_two_dicts(base_value, other_value, list_duplicates, match_types, dict_type)
+ elif isinstance(base_value, list):
+ if should_merge_lists:
+ merged[k] = _merge_two_lists(base_value, other_value, list_duplicates, dict_type)
+ else:
+ merged[k] = _merge_two_lists([], other_value, list_duplicates, dict_type)
+
+ elif isinstance(base_value, set):
+ merged[k] = norm(base_value.union(other_value), dict_type)
+ else:
+ merged[k] = norm(other_value, dict_type)
+ else: # Should never get here
+ raise AssertionError('Unexpected merge key: {}'.format(k))
+
+ return merged
+
+
+def merge_lists(*args, **kwargs):
+ if not len(args) >= 2:
+ raise ValueError('Must specify at least two lists to merge.')
+ func = partial(_merge_two_lists, **kwargs)
+ return reduce(func, args)
+
+
+def _merge_two_lists(base, other, duplicates='all', dict_type=dict): # pylint: disable=R0912
+ """Merge lists, normalizing their entries."""
+ if duplicates == 'all':
+ merged_list = []
+ for v in normalize(base, dict_type) + normalize(other, dict_type):
+ if not _check_remove_item(merged_list, v):
+ merged_list.append(v)
+ return merged_list
+ elif duplicates == 'first':
+ merged_list = []
+ for v in normalize(base + other, dict_type):
+ if not _check_remove_item(merged_list, v):
+ if v not in merged_list:
+ merged_list.append(v)
+ return merged_list
+ elif duplicates == 'last':
+ merged_list = []
+ for v in normalize(base + other, dict_type):
+ if not _check_remove_item(merged_list, v):
+ if v in merged_list:
+ del merged_list[merged_list.index(v)]
+ merged_list.append(v)
+ return merged_list
+ else:
+ raise ValueError('Unexpected value for list duplcates argument: {}. '.format(duplicates) +
+ 'Must be in {"all", "first", "last"}.')
+
+
+def _check_remove_item(the_list, item):
+ """Helper function for merge_lists that implements checking wether an items
+ should be removed from the list and doing so if needed. Returns ``True`` if
+ the item has been removed and ``False`` otherwise."""
+ if not isinstance(item, basestring):
+ return False
+ if not item.startswith('~'):
+ return False
+ actual_item = item[1:]
+ if actual_item in the_list:
+ del the_list[the_list.index(actual_item)]
+ return True
+
+
+def normalize(value, dict_type=dict):
+ """Normalize values. Recursively normalizes dict keys to be lower case,
+ no surrounding whitespace, underscore-delimited strings."""
+ if isinstance(value, dict):
+ normalized = dict_type()
+ for k, v in value.iteritems():
+ key = k.strip().lower().replace(' ', '_')
+ normalized[key] = normalize(v, dict_type)
+ return normalized
+ elif isinstance(value, list):
+ return [normalize(v, dict_type) for v in value]
+ elif isinstance(value, tuple):
+ return tuple([normalize(v, dict_type) for v in value])
+ else:
+ return value
+
+
+VALUE_REGEX = re.compile(r'(\d+(?:\.\d+)?)\s*(\w*)')
+
+UNITS_MAP = {
+ 's': 'seconds',
+ 'ms': 'milliseconds',
+ 'us': 'microseconds',
+ 'ns': 'nanoseconds',
+ 'V': 'volts',
+ 'A': 'amps',
+ 'mA': 'milliamps',
+ 'J': 'joules',
+}
+
+
+def parse_value(value_string):
+ """parses a string representing a numerical value and returns
+ a tuple (value, units), where value will be either int or float,
+ and units will be a string representing the units or None."""
+ match = VALUE_REGEX.search(value_string)
+ if match:
+ vs = match.group(1)
+ value = float(vs) if '.' in vs else int(vs)
+ us = match.group(2)
+ units = UNITS_MAP.get(us, us)
+ return (value, units)
+ else:
+ return (value_string, None)
+
+
+def get_meansd(values):
+ """Returns mean and standard deviation of the specified values."""
+ if not values:
+ return float('nan'), float('nan')
+ mean = sum(values) / len(values)
+ sd = math.sqrt(sum([v ** 2 for v in values]) / len(values) - mean ** 2)
+ return mean, sd
+
+
+def geomean(values):
+ """Returns the geometric mean of the values."""
+ return reduce(mul, values) ** (1.0 / len(values))
+
+
+def capitalize(text):
+ """Capitalises the specified text: first letter upper case,
+ all subsequent letters lower case."""
+ if not text:
+ return ''
+ return text[0].upper() + text[1:].lower()
+
+
+def convert_new_lines(text):
+ """ Convert new lines to a common format. """
+ return text.replace('\r\n', '\n').replace('\r', '\n')
+
+
+def escape_quotes(text):
+ """Escape quotes, and escaped quotes, in the specified text."""
+ return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
+
+
+def escape_single_quotes(text):
+ """Escape single quotes, and escaped single quotes, in the specified text."""
+ return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
+
+
+def escape_double_quotes(text):
+ """Escape double quotes, and escaped double quotes, in the specified text."""
+ return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
+
+
+def getch(count=1):
+ """Read ``count`` characters from standard input."""
+ if os.name == 'nt':
+ import msvcrt # pylint: disable=F0401
+ return ''.join([msvcrt.getch() for _ in xrange(count)])
+ else: # assume Unix
+ import tty # NOQA
+ import termios # NOQA
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ ch = sys.stdin.read(count)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+
+def isiterable(obj):
+ """Returns ``True`` if the specified object is iterable and
+ *is not a string type*, ``False`` otherwise."""
+ return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
+
+
+def utc_to_local(dt):
+ """Convert naive datetime to local time zone, assuming UTC."""
+ return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
+
+
+def local_to_utc(dt):
+ """Convert naive datetime to UTC, assuming local time zone."""
+ return dt.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
+
+
+def as_relative(path):
+ """Convert path to relative by stripping away the leading '/' on UNIX or
+ the equivant on other platforms."""
+ path = os.path.splitdrive(path)[1]
+ return path.lstrip(os.sep)
+
+
+def get_cpu_mask(cores):
+ """Return a string with the hex for the cpu mask for the specified core numbers."""
+ mask = 0
+ for i in cores:
+ mask |= 1 << i
+ return '0x{0:x}'.format(mask)
+
+
+def load_class(classpath):
+ """Loads the specified Python class. ``classpath`` must be a fully-qualified
+ class name (i.e. namspaced under module/package)."""
+ modname, clsname = classpath.rsplit('.', 1)
+ return getattr(__import__(modname), clsname)
+
+
+def get_pager():
+ """Returns the name of the system pager program."""
+ pager = os.getenv('PAGER')
+ if not pager:
+ pager = find_executable('less')
+ if not pager:
+ pager = find_executable('more')
+ return pager
+
+
+def enum_metaclass(enum_param, return_name=False, start=0):
+ """
+ Returns a ``type`` subclass that may be used as a metaclass for
+ an enum.
+
+ Paremeters:
+
+ :enum_param: the name of class attribute that defines enum values.
+ The metaclass will add a class attribute for each value in
+ ``enum_param``. The value of the attribute depends on the type
+ of ``enum_param`` and on the values of ``return_name``. If
+ ``return_name`` is ``True``, then the value of the new attribute is
+ the name of that attribute; otherwise, if ``enum_param`` is a ``list``
+ or a ``tuple``, the value will be the index of that param in
+ ``enum_param``, optionally offset by ``start``, otherwise, it will
+ be assumed that ``enum_param`` implementa a dict-like inteface and
+ the value will be ``enum_param[attr_name]``.
+ :return_name: If ``True``, the enum values will the names of enum attributes. If
+ ``False``, the default, the values will depend on the type of
+ ``enum_param`` (see above).
+ :start: If ``enum_param`` is a list or a tuple, and ``return_name`` is ``False``,
+ this specifies an "offset" that will be added to the index of the attribute
+ within ``enum_param`` to form the value.
+
+
+ """
+ class __EnumMeta(type):
+ def __new__(mcs, clsname, bases, attrs):
+ cls = type.__new__(mcs, clsname, bases, attrs)
+ values = getattr(cls, enum_param, [])
+ if return_name:
+ for name in values:
+ setattr(cls, name, name)
+ else:
+ if isinstance(values, list) or isinstance(values, tuple):
+ for i, name in enumerate(values):
+ setattr(cls, name, i + start)
+ else: # assume dict-like
+ for name in values:
+ setattr(cls, name, values[name])
+ return cls
+ return __EnumMeta
+
+
+def which(name):
+ """Platform-independent version of UNIX which utility."""
+ if os.name == 'nt':
+ paths = os.getenv('PATH').split(os.pathsep)
+ exts = os.getenv('PATHEXT').split(os.pathsep)
+ for path in paths:
+ testpath = os.path.join(path, name)
+ if os.path.isfile(testpath):
+ return testpath
+ for ext in exts:
+ testpathext = testpath + ext
+ if os.path.isfile(testpathext):
+ return testpathext
+ return None
+ else: # assume UNIX-like
+ try:
+ return check_output(['which', name])[0].strip()
+ except subprocess.CalledProcessError:
+ return None
+
+
+_bash_color_regex = re.compile('\x1b\[[0-9;]+m')
+
+
+def strip_bash_colors(text):
+ return _bash_color_regex.sub('', text)
+
+
+def format_duration(seconds, sep=' ', order=['day', 'hour', 'minute', 'second']): # pylint: disable=dangerous-default-value
+ """
+ Formats the specified number of seconds into human-readable duration.
+
+ """
+ if isinstance(seconds, timedelta):
+ td = seconds
+ else:
+ td = timedelta(seconds=seconds)
+ dt = datetime(1, 1, 1) + td
+ result = []
+ for item in order:
+ value = getattr(dt, item, None)
+ if item is 'day':
+ value -= 1
+ if not value:
+ continue
+ suffix = '' if value == 1 else 's'
+ result.append('{} {}{}'.format(value, item, suffix))
+ return sep.join(result)
+
+
+def get_article(word):
+ """
+ Returns the appropriate indefinite article for the word (ish).
+
+ .. note:: Indefinite article assignment in English is based on
+ sound rather than spelling, so this will not work correctly
+ in all case; e.g. this will return ``"a hour"``.
+
+ """
+ return'an' if word[0] in 'aoeiu' else 'a'
+
+
+def get_random_string(length):
+ """Returns a random ASCII string of the specified length)."""
+ return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
+
+
+class LoadSyntaxError(Exception):
+
+ def __init__(self, message, filepath, lineno):
+ super(LoadSyntaxError, self).__init__(message)
+ self.filepath = filepath
+ self.lineno = lineno
+
+ def __str__(self):
+ message = 'Syntax Error in {}, line {}:\n\t{}'
+ return message.format(self.filepath, self.lineno, self.message)
+
+
+RAND_MOD_NAME_LEN = 30
+BAD_CHARS = string.punctuation + string.whitespace
+TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
+
+
+def to_identifier(text):
+ """Converts text to a valid Python identifier by replacing all
+ whitespace and punctuation."""
+ return re.sub('_+', '_', text.translate(TRANS_TABLE))
+
+
+def load_struct_from_python(filepath=None, text=None):
+ """Parses a config structure from a .py file. The structure should be composed
+ of basic Python types (strings, ints, lists, dicts, etc.)."""
+ if not (filepath or text) or (filepath and text):
+ raise ValueError('Exactly one of filepath or text must be specified.')
+ try:
+ if filepath:
+ modname = to_identifier(filepath)
+ mod = imp.load_source(modname, filepath)
+ else:
+ modname = get_random_string(RAND_MOD_NAME_LEN)
+ while modname in sys.modules: # highly unlikely, but...
+ modname = get_random_string(RAND_MOD_NAME_LEN)
+ mod = imp.new_module(modname)
+ exec text in mod.__dict__ # pylint: disable=exec-used
+ return dict((k, v)
+ for k, v in mod.__dict__.iteritems()
+ if not k.startswith('_'))
+ except SyntaxError as e:
+ raise LoadSyntaxError(e.message, e.filepath, e.lineno)
+
+
+def load_struct_from_yaml(filepath=None, text=None):
+ """Parses a config structure from a .yaml file. The structure should be composed
+ of basic Python types (strings, ints, lists, dicts, etc.)."""
+ if not (filepath or text) or (filepath and text):
+ raise ValueError('Exactly one of filepath or text must be specified.')
+ try:
+ if filepath:
+ with open(filepath) as fh:
+ return yaml.load(fh)
+ else:
+ return yaml.load(text)
+ except yaml.YAMLError as e:
+ lineno = None
+ if hasattr(e, 'problem_mark'):
+ lineno = e.problem_mark.line
+ raise LoadSyntaxError(e.message, filepath=filepath, lineno=lineno)
+
+
+def load_struct_from_file(filepath):
+ """
+ Attempts to parse a Python structure consisting of basic types from the specified file.
+ Raises a ``ValueError`` if the specified file is of unkown format; ``LoadSyntaxError`` if
+ there is an issue parsing the file.
+
+ """
+ extn = os.path.splitext(filepath)[1].lower()
+ if (extn == '.py') or (extn == '.pyc') or (extn == '.pyo'):
+ return load_struct_from_python(filepath)
+ elif extn == '.yaml':
+ return load_struct_from_yaml(filepath)
+ else:
+ raise ValueError('Unknown format "{}": {}'.format(extn, filepath))
+
+
+def unique(alist):
+ """
+ Returns a list containing only unique elements from the input list (but preserves
+ order, unlike sets).
+
+ """
+ result = []
+ for item in alist:
+ if item not in result:
+ result.append(item)
+ return result
diff --git a/wlauto/utils/netio.py b/wlauto/utils/netio.py
new file mode 100644
index 00000000..e6c2a7e0
--- /dev/null
+++ b/wlauto/utils/netio.py
@@ -0,0 +1,98 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+This module contains utilities for implemening device hard reset
+using Netio 230 series power switches. This utilizes the KSHELL connection.
+
+"""
+
+import telnetlib
+import socket
+import re
+import time
+import logging
+
+
+logger = logging.getLogger('NetIO')
+
+
+class NetioError(Exception):
+ pass
+
+
+class KshellConnection(object):
+
+ response_regex = re.compile(r'^(\d+) (.*?)\r\n')
+ delay = 0.5
+
+ def __init__(self, host='ippowerbar', port=1234, timeout=None):
+ """Parameters are passed into ``telnetlib.Telnet`` -- see Python docs."""
+ self.host = host
+ self.port = port
+ self.conn = telnetlib.Telnet(host, port, timeout)
+ time.sleep(self.delay) # give time to respond
+ output = self.conn.read_very_eager()
+ if 'HELLO' not in output:
+ raise NetioError('Could not connect: did not see a HELLO. Got: {}'.format(output))
+
+ def login(self, user, password):
+ code, out = self.send_command('login {} {}\r\n'.format(user, password))
+ if not code == 250:
+ raise NetioError('Login failed. Got: {} {}'.format(code, out))
+
+ def enable_port(self, port):
+ """Enable the power supply at the specified port."""
+ self.set_port(port, 1)
+
+ def disable_port(self, port):
+ """Enable the power supply at the specified port."""
+ self.set_port(port, 0)
+
+ def set_port(self, port, value):
+ code, out = self.send_command('port {} {}'.format(port, value))
+ if not code == 250:
+ raise NetioError('Could not set {} on port {}. Got: {} {}'.format(value, port, code, out))
+
+ def send_command(self, command):
+ try:
+ if command.startswith('login'):
+ parts = command.split()
+ parts[2] = '*' * len(parts[2])
+ logger.debug(' '.join(parts))
+ else:
+ logger.debug(command)
+ self.conn.write('{}\n'.format(command))
+ time.sleep(self.delay) # give time to respond
+ out = self.conn.read_very_eager()
+ match = self.response_regex.search(out)
+ if not match:
+ raise NetioError('Invalid response: {}'.format(out.strip()))
+ logger.debug('response: {} {}'.format(match.group(1), match.group(2)))
+ return int(match.group(1)), match.group(2)
+ except socket.error as err:
+ try:
+ time.sleep(self.delay) # give time to respond
+ out = self.conn.read_very_eager()
+ if out.startswith('130 CONNECTION TIMEOUT'):
+ raise NetioError('130 Timed out.')
+ except EOFError:
+ pass
+ raise err
+
+ def close(self):
+ self.conn.close()
+
diff --git a/wlauto/utils/serial_port.py b/wlauto/utils/serial_port.py
new file mode 100644
index 00000000..51b55189
--- /dev/null
+++ b/wlauto/utils/serial_port.py
@@ -0,0 +1,111 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import time
+from contextlib import contextmanager
+
+import serial
+import fdpexpect
+# Adding pexpect exceptions into this module's namespace
+from pexpect import EOF, TIMEOUT # NOQA pylint: disable=W0611
+
+from wlauto.exceptions import HostError
+from wlauto.utils.log import LogWriter
+
+
+class PexpectLogger(LogWriter):
+
+ def __init__(self, kind):
+ """
+ File-like object class designed to be used for logging with pexpect or
+ fdpexect. Each complete line (terminated by new line character) gets logged
+ at DEBUG level. In complete lines are buffered until the next new line.
+
+ :param kind: This specified which of pexpect logfile attributes this logger
+ will be set to. It should be "read" for logfile_read, "send" for
+ logfile_send, and "" (emtpy string) for logfile.
+
+ """
+ if kind not in ('read', 'send', ''):
+ raise ValueError('kind must be "read", "send" or ""; got {}'.format(kind))
+ self.kind = kind
+ logger_name = 'serial_{}'.format(kind) if kind else 'serial'
+ super(PexpectLogger, self).__init__(logger_name)
+
+
+def pulse_dtr(conn, state=True, duration=0.1):
+ """Set the DTR line of the specified serial connection to the specified state
+ for the specified duration (note: the initial state of the line is *not* checked."""
+ conn.setDTR(state)
+ time.sleep(duration)
+ conn.setDTR(not state)
+
+
+@contextmanager
+def open_serial_connection(timeout, get_conn=False, init_dtr=None, *args, **kwargs):
+ """
+ Opens a serial connection to a device.
+
+ :param timeout: timeout for the fdpexpect spawn object.
+ :param conn: ``bool`` that specfies whether the underlying connection
+ object should be yielded as well.
+ :param init_dtr: specifies the initial DTR state stat should be set.
+
+ All arguments are passed into the __init__ of serial.Serial. See
+ pyserial documentation for details:
+
+ http://pyserial.sourceforge.net/pyserial_api.html#serial.Serial
+
+ :returns: a pexpect spawn object connected to the device.
+ See: http://pexpect.sourceforge.net/pexpect.html
+
+ """
+ if init_dtr is not None:
+ kwargs['dsrdtr'] = True
+ try:
+ conn = serial.Serial(*args, **kwargs)
+ except serial.SerialException as e:
+ raise HostError(e.message)
+ if init_dtr is not None:
+ conn.setDTR(init_dtr)
+ conn.nonblocking()
+ conn.flushOutput()
+ target = fdpexpect.fdspawn(conn.fileno(), timeout=timeout)
+ target.logfile_read = PexpectLogger('read')
+ target.logfile_send = PexpectLogger('send')
+
+ # Monkey-patching sendline to introduce a short delay after
+ # chacters are sent to the serial. If two sendline s are issued
+ # one after another the second one might start putting characters
+ # into the serial device before the first one has finished, causing
+ # corruption. The delay prevents that.
+ tsln = target.sendline
+
+ def sendline(x):
+ tsln(x)
+ time.sleep(0.1)
+
+ target.sendline = sendline
+
+ if get_conn:
+ yield target, conn
+ else:
+ yield target
+
+ target.close() # Closes the file descriptor used by the conn.
+ del conn
+
+
diff --git a/wlauto/utils/ssh.py b/wlauto/utils/ssh.py
new file mode 100644
index 00000000..d0e89f13
--- /dev/null
+++ b/wlauto/utils/ssh.py
@@ -0,0 +1,198 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import logging
+import subprocess
+import re
+
+import pxssh
+from pexpect import EOF, TIMEOUT, spawn
+
+from wlauto.exceptions import HostError, DeviceError, TimeoutError, ConfigError
+from wlauto.utils.misc import which, strip_bash_colors, escape_single_quotes, check_output
+
+
+ssh = None
+scp = None
+sshpass = None
+
+logger = logging.getLogger('ssh')
+
+
+def ssh_get_shell(host, username, password=None, keyfile=None, port=None, timeout=10, telnet=False):
+ _check_env()
+ if telnet:
+ if keyfile:
+ raise ConfigError('keyfile may not be used with a telnet connection.')
+ conn = TelnetConnection()
+ else: # ssh
+ conn = pxssh.pxssh()
+ try:
+ if keyfile:
+ conn.SSH_OPTS += ' -i {}'.format(keyfile)
+ conn.login(host, username, port=port, login_timeout=timeout)
+ else:
+ conn.login(host, username, password, port=port, login_timeout=timeout)
+ except EOF:
+ raise DeviceError('Could not connect to {}; is the host name correct?'.format(host))
+ return conn
+
+
+class TelnetConnection(pxssh.pxssh):
+ # pylint: disable=arguments-differ
+
+ def login(self, server, username, password='', original_prompt=r'[#$]', login_timeout=10,
+ auto_prompt_reset=True, sync_multiplier=1):
+ cmd = 'telnet -l {} {}'.format(username, server)
+
+ spawn._spawn(self, cmd) # pylint: disable=protected-access
+ i = self.expect('(?i)(?:password)', timeout=login_timeout)
+ if i == 0:
+ self.sendline(password)
+ i = self.expect([original_prompt, 'Login incorrect'], timeout=login_timeout)
+ else:
+ raise pxssh.ExceptionPxssh('could not log in: did not see a password prompt')
+
+ if i:
+ raise pxssh.ExceptionPxssh('could not log in: password was incorrect')
+
+ if not self.sync_original_prompt(sync_multiplier):
+ self.close()
+ raise pxssh.ExceptionPxssh('could not synchronize with original prompt')
+
+ if auto_prompt_reset:
+ if not self.set_unique_prompt():
+ self.close()
+ message = 'could not set shell prompt (recieved: {}, expected: {}).'
+ raise pxssh.ExceptionPxssh(message.format(self.before, self.PROMPT))
+ return True
+
+
+class SshShell(object):
+
+ def __init__(self, timeout=10):
+ self.timeout = timeout
+ self.conn = None
+
+ def login(self, host, username, password=None, keyfile=None, port=None, timeout=None, telnet=False):
+ # pylint: disable=attribute-defined-outside-init
+ logger.debug('Logging in {}@{}'.format(username, host))
+ self.host = host
+ self.username = username
+ self.password = password
+ self.keyfile = keyfile
+ self.port = port
+ timeout = self.timeout if timeout is None else timeout
+ self.conn = ssh_get_shell(host, username, password, keyfile, port, timeout, telnet)
+
+ def push_file(self, source, dest, timeout=30):
+ dest = '{}@{}:{}'.format(self.username, self.host, dest)
+ return self._scp(source, dest, timeout)
+
+ def pull_file(self, source, dest, timeout=30):
+ source = '{}@{}:{}'.format(self.username, self.host, source)
+ return self._scp(source, dest, timeout)
+
+ def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
+ port_string = '-p {}'.format(self.port) if self.port else ''
+ keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
+ command = '{} {} {} {}@{} {}'.format(ssh, keyfile_string, port_string, self.username, self.host, command)
+ logger.debug(command)
+ if self.password:
+ command = _give_password(self.password, command)
+ return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
+
+ def execute(self, command, timeout=None, check_exit_code=True, as_root=False, strip_colors=True):
+ output = self._execute_and_wait_for_prompt(command, timeout, as_root, strip_colors)
+ if check_exit_code:
+ exit_code = int(self._execute_and_wait_for_prompt('echo $?', strip_colors=strip_colors, log=False))
+ if exit_code:
+ message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
+ raise DeviceError(message.format(exit_code, command, output))
+ return output
+
+ def logout(self):
+ logger.debug('Logging out {}@{}'.format(self.username, self.host))
+ self.conn.logout()
+
+ def _execute_and_wait_for_prompt(self, command, timeout=None, as_root=False, strip_colors=True, log=True):
+ timeout = self.timeout if timeout is None else timeout
+ if as_root:
+ command = "sudo -- sh -c '{}'".format(escape_single_quotes(command))
+ if log:
+ logger.debug(command)
+ self.conn.sendline(command)
+ index = self.conn.expect_exact(['[sudo] password', TIMEOUT], timeout=0.5)
+ if index == 0:
+ self.conn.sendline(self.password)
+ timed_out = not self.conn.prompt(timeout)
+ output = re.sub(r'.*?{}'.format(re.escape(command)), '', self.conn.before, 1).strip()
+ else:
+ if log:
+ logger.debug(command)
+ self.conn.sendline(command)
+ timed_out = not self.conn.prompt(timeout)
+ # the regex removes line breaks potentiall introduced when writing
+ # command to shell.
+ command_index = re.sub(r' \r([^\n])', r'\1', self.conn.before).find(command)
+ while not timed_out and command_index == -1:
+ # In case of a "premature" timeout (i.e. timeout, but no hang,
+ # so command completes afterwards), there may be a prompt from
+ # the previous command completion in the serial output. This
+ # checks for this case by making sure that the original command
+ # is present in the serial output and waiting for the next
+ # prompt if it is not.
+ timed_out = not self.conn.prompt(timeout)
+ command_index = re.sub(r' \r([^\n])', r'\1', self.conn.before).find(command)
+ output = self.conn.before[command_index + len(command):].strip()
+ if timed_out:
+ raise TimeoutError(command, output)
+ if strip_colors:
+ output = strip_bash_colors(output)
+ return output
+
+ def _scp(self, source, dest, timeout=30):
+ port_string = '-P {}'.format(self.port) if self.port else ''
+ keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
+ command = '{} -r {} {} {} {}'.format(scp, keyfile_string, port_string, source, dest)
+ pass_string = ''
+ logger.debug(command)
+ if self.password:
+ command = _give_password(self.password, command)
+ try:
+ check_output(command, timeout=timeout, shell=True)
+ except subprocess.CalledProcessError as e:
+ raise subprocess.CalledProcessError(e.returncode, e.cmd.replace(pass_string, ''), e.output)
+ except TimeoutError as e:
+ raise TimeoutError(e.command.replace(pass_string, ''), e.output)
+
+
+def _give_password(password, command):
+ if not sshpass:
+ raise HostError('Must have sshpass installed on the host in order to use password-based auth.')
+ pass_string = "sshpass -p '{}' ".format(password)
+ return pass_string + command
+
+
+def _check_env():
+ global ssh, scp, sshpass # pylint: disable=global-statement
+ if not ssh:
+ ssh = which('ssh')
+ scp = which('scp')
+ sshpass = which('sshpass')
+ if not (ssh and scp):
+ raise HostError('OpenSSH must be installed on the host.')
+
diff --git a/wlauto/utils/types.py b/wlauto/utils/types.py
new file mode 100644
index 00000000..89396481
--- /dev/null
+++ b/wlauto/utils/types.py
@@ -0,0 +1,176 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Routines for doing various type conversions. These usually embody some higher-level
+semantics than are present in standard Python types (e.g. ``boolean`` will convert the
+string ``"false"`` to ``False``, where as non-empty strings are usually considered to be
+``True``).
+
+A lot of these are intened to stpecify type conversions declaratively in place like
+``Parameter``'s ``kind`` argument. These are basically "hacks" around the fact that Python
+is not the best language to use for configuration.
+
+"""
+import re
+import math
+from collections import defaultdict
+
+from wlauto.utils.misc import isiterable, to_identifier
+
+
+def identifier(text):
+ """Converts text to a valid Python identifier by replacing all
+ whitespace and punctuation."""
+ return to_identifier(text)
+
+
+def boolean(value):
+ """
+ Returns bool represented by the value. This is different from
+ calling the builtin bool() in that it will interpret string representations.
+ e.g. boolean('0') and boolean('false') will both yield False.
+
+ """
+ false_strings = ['', '0', 'n', 'no']
+ if isinstance(value, basestring):
+ value = value.lower()
+ if value in false_strings or 'false'.startswith(value):
+ return False
+ return bool(value)
+
+
+def numeric(value):
+ """
+ Returns the value as number (int if possible, or float otherwise), or
+ raises ``ValueError`` if the specified ``value`` does not have a straight
+ forward numeric conversion.
+
+ """
+ if isinstance(value, int):
+ return value
+ try:
+ fvalue = float(value)
+ except ValueError:
+ raise ValueError('Not numeric: {}'.format(value))
+ if not math.isnan(fvalue) and not math.isinf(fvalue):
+ ivalue = int(fvalue)
+ if ivalue == fvalue: # yeah, yeah, I know. Whatever. This is best-effort.
+ return ivalue
+ return fvalue
+
+
+def list_or_string(value):
+ """
+ If the value is a string, at will be kept as a string, otherwise it will be interpreted
+ as a list. If that is not possible, it will be interpreted as a string.
+
+ """
+ if isinstance(value, basestring):
+ return value
+ else:
+ try:
+ return list(value)
+ except ValueError:
+ return str(value)
+
+
+def list_of_strs(value):
+ """
+ Value must be iterable. All elements will be converted to strings.
+
+ """
+ if not isiterable(value):
+ raise ValueError(value)
+ return map(str, value)
+
+list_of_strings = list_of_strs
+
+
+def list_of_ints(value):
+ """
+ Value must be iterable. All elements will be converted to ``int``\ s.
+
+ """
+ if not isiterable(value):
+ raise ValueError(value)
+ return map(int, value)
+
+list_of_integers = list_of_ints
+
+
+def list_of_numbers(value):
+ """
+ Value must be iterable. All elements will be converted to numbers (either ``ints`` or
+ ``float``\ s depending on the elements).
+
+ """
+ if not isiterable(value):
+ raise ValueError(value)
+ return map(numeric, value)
+
+
+def list_of_bools(value, interpret_strings=True):
+ """
+ Value must be iterable. All elements will be converted to ``bool``\ s.
+
+ .. note:: By default, ``boolean()`` conversion function will be used, which means that
+ strings like ``"0"`` or ``"false"`` will be interpreted as ``False``. If this
+ is undesirable, set ``interpret_strings`` to ``False``.
+
+ """
+ if not isiterable(value):
+ raise ValueError(value)
+ if interpret_strings:
+ return map(boolean, value)
+ else:
+ return map(bool, value)
+
+
+regex_type = type(re.compile(''))
+
+
+def regex(value):
+ """
+ Regular expression. If value is a string, it will be complied with no flags. If you
+ want to specify flags, value must be precompiled.
+
+ """
+ if isinstance(value, regex_type):
+ return value
+ else:
+ return re.compile(value)
+
+
+__counters = defaultdict(int)
+
+
+def reset_counter(name=None):
+ __counters[name] = 0
+
+
+def counter(name=None):
+ """
+ An auto incremeting value (kind of like an AUTO INCREMENT field in SQL).
+ Optionally, the name of the counter to be used is specified (each counter
+ increments separately).
+
+ Counts start at 1, not 0.
+
+ """
+ __counters[name] += 1
+ value = __counters[name]
+ return value
diff --git a/wlauto/utils/uefi.py b/wlauto/utils/uefi.py
new file mode 100644
index 00000000..08928215
--- /dev/null
+++ b/wlauto/utils/uefi.py
@@ -0,0 +1,214 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import re
+import time
+import logging
+
+from wlauto.utils.serial_port import TIMEOUT
+
+
+logger = logging.getLogger('UEFI')
+
+
+class UefiMenu(object):
+ """
+ Allows navigating UEFI menu over serial (it relies on a pexpect connection).
+
+ """
+
+ option_regex = re.compile(r'^\[(\d+)\]\s+([^\r]+)\r\n', re.M)
+ prompt_regex = re.compile(r'^([^\r\n]+):\s*', re.M)
+ invalid_regex = re.compile(r'Invalid input \(max (\d+)\)', re.M)
+
+ load_delay = 1 # seconds
+ default_timeout = 60 # seconds
+
+ def __init__(self, conn, prompt='The default boot selection will start in'):
+ """
+ :param conn: A serial connection as returned by ``pexect.spawn()``.
+ :param prompt: The starting prompt to wait for during ``open()``.
+
+ """
+ self.conn = conn
+ self.start_prompt = prompt
+ self.options = {}
+ self.prompt = None
+
+ def open(self, timeout=default_timeout):
+ """
+ "Open" the UEFI menu by sending an interrupt on STDIN after seeing the
+ starting prompt (configurable upon creation of the ``UefiMenu`` object.
+
+ """
+ self.conn.expect(self.start_prompt, timeout)
+ self.conn.sendline('')
+ time.sleep(self.load_delay)
+
+ def create_entry(self, name, image, args, fdt_support, initrd=None, fdt_path=None):
+ """Create a new UEFI entry using the parameters. The menu is assumed
+ to be at the top level. Upon return, the menu will be at the top level."""
+ logger.debug('Creating UEFI entry {}'.format(name))
+ self.nudge()
+ self.select('Boot Manager')
+ self.select('Add Boot Device Entry')
+ self.select('NOR Flash')
+ self.enter(image)
+ self.enter('y' if fdt_support else 'n')
+ if initrd:
+ self.enter('y')
+ self.enter(initrd)
+ else:
+ self.enter('n')
+ self.enter(args)
+ self.enter(name)
+
+ if fdt_path:
+ self.select('Update FDT path')
+ self.enter(fdt_path)
+
+ self.select('Return to main menu')
+
+ def delete_entry(self, name):
+ """Delete the specified UEFI entry. The menu is assumed
+ to be at the top level. Upon return, the menu will be at the top level."""
+ logger.debug('Removing UEFI entry {}'.format(name))
+ self.nudge()
+ self.select('Boot Manager')
+ self.select('Remove Boot Device Entry')
+ self.select(name)
+ self.select('Return to main menu')
+
+ def select(self, option, timeout=default_timeout):
+ """
+ Select the specified option from the current menu.
+
+ :param option: Could be an ``int`` index of the option, or a string/regex to
+ match option text against.
+ :param timeout: If a non-``int`` option is specified, the option list may need
+ need to be parsed (if it hasn't been already), this may block
+ and the timeout is used to cap that , resulting in a ``TIMEOUT``
+ exception.
+ :param delay: A fixed delay to wait after sending the input to the serial connection.
+ This should be set if input this action is known to result in a
+ long-running operation.
+
+ """
+ if isinstance(option, basestring):
+ option = self.get_option_index(option, timeout)
+ self.enter(option)
+
+ def enter(self, value, delay=load_delay):
+ """Like ``select()`` except no resolution is performed -- the value is sent directly
+ to the serial connection."""
+ # Empty the buffer first, so that only response to the input about to
+ # be sent will be processed by subsequent commands.
+ value = str(value)
+ self._reset()
+ self.write_characters(value)
+ # TODO: in case the value is long an complicated, things may get
+ # screwed up (e.g. there may be line breaks injected), additionally,
+ # special chars might cause regex to fail. To avoid these issues i'm
+ # only matching against the first 5 chars of the value. This is
+ # entirely arbitrary and I'll probably have to find a better way of
+ # doing this at some point.
+ self.conn.expect(value[:5], timeout=delay)
+ time.sleep(self.load_delay)
+
+ def read_menu(self, timeout=default_timeout):
+ """Parse serial output to get the menu options and the following prompt."""
+ attempting_timeout_retry = False
+ attempting_invalid_retry = False
+ while True:
+ index = self.conn.expect([self.option_regex, self.prompt_regex, self.invalid_regex, TIMEOUT],
+ timeout=timeout)
+ match = self.conn.match
+ if index == 0: # matched menu option
+ self.options[match.group(1)] = match.group(2)
+ elif index == 1: # matched prompt
+ self.prompt = match.group(1)
+ break
+ elif index == 2: # matched invalid selection
+ # We've sent an invalid input (which includes an empty line) at
+ # the top-level menu. To get back the menu options, it seems we
+ # need to enter what the error reports as the max + 1, so...
+ if not attempting_invalid_retry:
+ attempting_invalid_retry = True
+ val = int(match.group(1)) + 1
+ self.empty_buffer()
+ self.enter(val)
+ else: # OK, that didn't work; panic!
+ raise RuntimeError('Could not read menu entries stuck on "{}" prompt'.format(self.prompt))
+ elif index == 3: # timed out
+ if not attempting_timeout_retry:
+ attempting_timeout_retry = True
+ self.nudge()
+ else: # Didn't help. Run away!
+ raise RuntimeError('Did not see a valid UEFI menu.')
+ else:
+ raise AssertionError('Unexpected response waiting for UEFI menu') # should never get here
+
+ def get_option_index(self, text, timeout=default_timeout):
+ """Returns the menu index of the specified option text (uses regex matching). If the option
+ is not in the current menu, ``LookupError`` will be raised."""
+ if not self.prompt:
+ self.read_menu(timeout)
+ for k, v in self.options.iteritems():
+ if re.search(text, v):
+ return k
+ raise LookupError(text)
+
+ def has_option(self, text, timeout=default_timeout):
+ """Returns ``True`` if at least one of the options in the current menu has
+ matched (using regex) the specified text."""
+ try:
+ self.get_option_index(text, timeout)
+ return True
+ except LookupError:
+ return False
+
+ def nudge(self):
+ """Send a little nudge to ensure there is something to read. This is useful when you're not
+ sure if all out put from the serial has been read already."""
+ self.enter('')
+
+ def empty_buffer(self):
+ """Read everything from the serial and clear the internal pexpect buffer. This ensures
+ that the next ``expect()`` call will time out (unless further input will be sent to the
+ serial beforehand. This is used to create a "known" state and avoid unexpected matches."""
+ try:
+ while True:
+ time.sleep(0.1)
+ self.conn.read_nonblocking(size=1024, timeout=0.1)
+ except TIMEOUT:
+ pass
+ self.conn.buffer = ''
+
+ def write_characters(self, line):
+ """Write a single line out to serial charcter-by-character. This will ensure that nothing will
+ be dropped for longer lines."""
+ line = line.rstrip('\r\n')
+ for c in line:
+ self.conn.send(c)
+ time.sleep(0.05)
+ self.conn.sendline('')
+
+ def _reset(self):
+ self.options = {}
+ self.prompt = None
+ self.empty_buffer()
+
+
diff --git a/wlauto/workloads/__init__.py b/wlauto/workloads/__init__.py
new file mode 100644
index 00000000..cd5d64d6
--- /dev/null
+++ b/wlauto/workloads/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
diff --git a/wlauto/workloads/andebench/__init__.py b/wlauto/workloads/andebench/__init__.py
new file mode 100644
index 00000000..9c62623e
--- /dev/null
+++ b/wlauto/workloads/andebench/__init__.py
@@ -0,0 +1,88 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+
+from wlauto import AndroidUiAutoBenchmark, Parameter, Alias
+from wlauto.exceptions import ConfigError
+
+
+class Andebench(AndroidUiAutoBenchmark):
+
+ name = 'andebench'
+ description = """
+ AndEBench is an industry standard Android benchmark provided by The
+ Embedded Microprocessor Benchmark Consortium (EEMBC).
+
+ http://www.eembc.org/andebench/about.php
+
+ From the website:
+
+ - Initial focus on CPU and Dalvik interpreter performance
+ - Internal algorithms concentrate on integer operations
+ - Compares the difference between native and Java performance
+ - Implements flexible multicore performance analysis
+ - Results displayed in Iterations per second
+ - Detailed log file for comprehensive engineering analysis
+
+ """
+ package = 'com.eembc.coremark'
+ activity = 'com.eembc.coremark.splash'
+ summary_metrics = ['AndEMark Java', 'AndEMark Native']
+
+ parameters = [
+ Parameter('number_of_threads', kind=int,
+ description='Number of threads that will be spawned by AndEBench.'),
+ Parameter('single_threaded', kind=bool,
+ description="""
+ If ``true``, AndEBench will run with a single thread. Note: this must
+ not be specified if ``number_of_threads`` has been specified.
+ """),
+ ]
+
+ aliases = [
+ Alias('andebenchst', number_of_threads=1),
+ ]
+
+ regex = re.compile('\s*(?P<key>(AndEMark Native|AndEMark Java))\s*:'
+ '\s*(?P<value>\d+)')
+
+ def validate(self):
+ if (self.number_of_threads is not None) and (self.single_threaded is not None): # pylint: disable=E1101
+ raise ConfigError('Can\'t specify both number_of_threads and single_threaded parameters.')
+
+ def setup(self, context):
+ if self.number_of_threads is None: # pylint: disable=access-member-before-definition
+ if self.single_threaded: # pylint: disable=E1101
+ self.number_of_threads = 1 # pylint: disable=attribute-defined-outside-init
+ else:
+ self.number_of_threads = self.device.number_of_cores # pylint: disable=W0201
+ self.logger.debug('Using {} threads'.format(self.number_of_threads))
+ self.uiauto_params['number_of_threads'] = self.number_of_threads
+ # Called after this setup as modifying uiauto_params
+ super(Andebench, self).setup(context)
+
+ def update_result(self, context):
+ super(Andebench, self).update_result(context)
+ results = {}
+ with open(self.logcat_log) as fh:
+ for line in fh:
+ match = self.regex.search(line)
+ if match:
+ data = match.groupdict()
+ results[data['key']] = data['value']
+ for key, value in results.iteritems():
+ context.result.add_metric(key, value)
+
diff --git a/wlauto/workloads/andebench/com.arm.wlauto.uiauto.andebench.jar b/wlauto/workloads/andebench/com.arm.wlauto.uiauto.andebench.jar
new file mode 100644
index 00000000..cc1bb880
--- /dev/null
+++ b/wlauto/workloads/andebench/com.arm.wlauto.uiauto.andebench.jar
Binary files differ
diff --git a/wlauto/workloads/andebench/uiauto/build.sh b/wlauto/workloads/andebench/uiauto/build.sh
new file mode 100755
index 00000000..d36878cf
--- /dev/null
+++ b/wlauto/workloads/andebench/uiauto/build.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+${ANDROID_HOME}/tools/android update project -p .
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.andebench.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.andebench.jar ..
+fi
diff --git a/wlauto/workloads/andebench/uiauto/build.xml b/wlauto/workloads/andebench/uiauto/build.xml
new file mode 100644
index 00000000..8d0957f1
--- /dev/null
+++ b/wlauto/workloads/andebench/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.andebench" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/andebench/uiauto/project.properties b/wlauto/workloads/andebench/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/andebench/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/andebench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/andebench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..41e36800
--- /dev/null
+++ b/wlauto/workloads/andebench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,108 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.andebench;
+
+import java.util.concurrent.TimeUnit;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "andebench";
+
+ private static int initialTimeoutSeconds = 20;
+ private static int shortDelaySeconds = 3;
+
+ public void runUiAutomation() throws Exception{
+ Bundle status = new Bundle();
+ Bundle params = getParams();
+ String numThreads = params.getString("number_of_threads");
+ status.putString("product", getUiDevice().getProductName());
+
+ waitForStartButton();
+ setNumberOfThreads(numThreads);
+ hitStart();
+ waitForAndExtractResuts();
+
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+ public void waitForStartButton() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject startButton = new UiObject(selector.className("android.widget.ImageButton")
+ .packageName("com.eembc.coremark"));
+ if (!startButton.waitForExists(TimeUnit.SECONDS.toMillis(initialTimeoutSeconds))) {
+ throw new UiObjectNotFoundException("Did not see start button.");
+ }
+ }
+
+ public void setNumberOfThreads(String numThreads) throws Exception {
+ UiSelector selector = new UiSelector();
+ getUiDevice().pressMenu();
+
+ UiObject settingsButton = new UiObject(selector.clickable(true));
+ settingsButton.click();
+ UiObject threadNumberField = new UiObject(selector.className("android.widget.EditText"));
+ threadNumberField.clearTextField();
+ threadNumberField.setText(numThreads);
+
+ getUiDevice().pressBack();
+ sleep(shortDelaySeconds);
+ // If the device does not have a physical keyboard, a virtual one might have
+ // poped up when setting the number of threads. If that happend, then the above
+ // backpress would dismiss the vkb and another one will be necessary to return
+ // from the settings screen.
+ if(threadNumberField.exists())
+ {
+ getUiDevice().pressBack();
+ sleep(shortDelaySeconds);
+ }
+ }
+
+ public void hitStart() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject startButton = new UiObject(selector.className("android.widget.ImageButton")
+ .packageName("com.eembc.coremark"));
+ startButton.click();
+ sleep(shortDelaySeconds);
+ }
+
+ public void waitForAndExtractResuts() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject runningText = new UiObject(selector.textContains("Running...")
+ .className("android.widget.TextView")
+ .packageName("com.eembc.coremark"));
+ runningText.waitUntilGone(TimeUnit.SECONDS.toMillis(600));
+
+ UiObject resultText = new UiObject(selector.textContains("Results in Iterations/sec:")
+ .className("android.widget.TextView")
+ .packageName("com.eembc.coremark"));
+ resultText.waitForExists(TimeUnit.SECONDS.toMillis(shortDelaySeconds));
+ Log.v(TAG, resultText.getText());
+ sleep(shortDelaySeconds);
+ }
+}
diff --git a/wlauto/workloads/angrybirds/__init__.py b/wlauto/workloads/angrybirds/__init__.py
new file mode 100644
index 00000000..92ef6828
--- /dev/null
+++ b/wlauto/workloads/angrybirds/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import GameWorkload
+
+
+class AngryBirds(GameWorkload):
+
+ name = 'angrybirds'
+ description = """
+ Angry Birds game.
+
+ A very popular Android 2D game.
+ """
+ package = 'com.rovio.angrybirds'
+ activity = 'com.rovio.ka3d.App'
+
diff --git a/wlauto/workloads/angrybirds/angrybirds_classic.revent b/wlauto/workloads/angrybirds/angrybirds_classic.revent
new file mode 100644
index 00000000..74a46c70
--- /dev/null
+++ b/wlauto/workloads/angrybirds/angrybirds_classic.revent
Binary files differ
diff --git a/wlauto/workloads/angrybirds/revent_files/.empty b/wlauto/workloads/angrybirds/revent_files/.empty
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/workloads/angrybirds/revent_files/.empty
diff --git a/wlauto/workloads/angrybirds_rio/__init__.py b/wlauto/workloads/angrybirds_rio/__init__.py
new file mode 100644
index 00000000..c413fd97
--- /dev/null
+++ b/wlauto/workloads/angrybirds_rio/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import GameWorkload
+
+
+class AngryBirdsRio(GameWorkload):
+
+ name = 'angrybirds_rio'
+ description = """
+ Angry Birds Rio game.
+
+ The sequel to the very popular Android 2D game.
+ """
+ package = 'com.rovio.angrybirdsrio'
+ activity = 'com.rovio.ka3d.App'
+
diff --git a/wlauto/workloads/angrybirds_rio/revent_files/.empty b/wlauto/workloads/angrybirds_rio/revent_files/.empty
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/workloads/angrybirds_rio/revent_files/.empty
diff --git a/wlauto/workloads/angrybirds_rio/revent_files/Nexus10.run.revent b/wlauto/workloads/angrybirds_rio/revent_files/Nexus10.run.revent
new file mode 100644
index 00000000..bb0e7018
--- /dev/null
+++ b/wlauto/workloads/angrybirds_rio/revent_files/Nexus10.run.revent
Binary files differ
diff --git a/wlauto/workloads/angrybirds_rio/revent_files/Nexus10.setup.revent b/wlauto/workloads/angrybirds_rio/revent_files/Nexus10.setup.revent
new file mode 100644
index 00000000..5f2ae879
--- /dev/null
+++ b/wlauto/workloads/angrybirds_rio/revent_files/Nexus10.setup.revent
Binary files differ
diff --git a/wlauto/workloads/anomaly2/__init__.py b/wlauto/workloads/anomaly2/__init__.py
new file mode 100644
index 00000000..8060c34c
--- /dev/null
+++ b/wlauto/workloads/anomaly2/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import json
+
+from wlauto.common.android.workload import GameWorkload
+from wlauto.exceptions import WorkloadError, DeviceError
+
+
+class Anomaly2(GameWorkload):
+
+ name = 'anomaly2'
+ description = """
+ Anomaly 2 game demo and benchmark.
+
+ Plays three scenes from the game, benchmarking each one. Scores reported are intended to
+ represent overall perceived quality of the game, based not only on raw FPS but also factors
+ like smoothness.
+
+ """
+ package = 'com.elevenbitstudios.anomaly2Benchmark'
+ activity = 'com.android.Game11Bits.MainActivity'
+ loading_time = 30
+ asset_file = 'obb:com.elevenbitstudios.anomaly2Benchmark.tar.gz'
+
+ def reset(self, context):
+ pass
+
+ def update_result(self, context):
+ super(Anomaly2, self).update_result(context)
+ sent_blobs = {'data': []}
+ with open(self.logcat_log) as fh:
+ for line in fh:
+ if 'sendHttpRequest: json = ' in line:
+ data = json.loads(line.split('json = ')[1])
+ sent_blobs['data'].append(data)
+ if 'scene' not in data['intValues']:
+ continue
+ scene = data['intValues']['scene']
+ score = data['intValues']['score']
+ fps = data['floatValues']['fps']
+ context.result.add_metric('scene_{}_score'.format(scene), score)
+ context.result.add_metric('scene_{}_fps'.format(scene), fps)
+ outfile = os.path.join(context.output_directory, 'anomaly2.json')
+ with open(outfile, 'wb') as wfh:
+ json.dump(sent_blobs, wfh, indent=4)
+
+ def teardown(self, context):
+ self.device.execute('am force-stop {}'.format(self.package))
+
diff --git a/wlauto/workloads/antutu/__init__.py b/wlauto/workloads/antutu/__init__.py
new file mode 100644
index 00000000..305baa7b
--- /dev/null
+++ b/wlauto/workloads/antutu/__init__.py
@@ -0,0 +1,136 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+from collections import defaultdict, OrderedDict
+
+from wlauto import AndroidUiAutoBenchmark, Parameter
+
+
+class Antutu(AndroidUiAutoBenchmark):
+
+ name = 'antutu'
+ description = """
+ AnTuTu Benchmark is an benchmarking tool for Android Mobile Phone/Pad. It
+ can run a full test of a key project, through the "Memory Performance","CPU
+ Integer Performance","CPU Floating point Performance","2D 3D Graphics
+ Performance","SD card reading/writing speed","Database IO" performance
+ testing, and gives accurate analysis for Andriod smart phones.
+
+ http://www.antutulabs.com/AnTuTu-Benchmark
+
+ From the website:
+
+ AnTuTu Benchmark can support the latest quad-core cpu. In reaching the
+ overall and individual scores of the hardware, AnTuTu Benchmark could judge
+ your phone by the scores of the performance of the hardware. By uploading
+ the scores, Benchmark can view your device in the world rankings, allowing
+ points to let you know the level of hardware performance equipment.
+
+ """
+ #pylint: disable=E1101
+
+ package = "com.antutu.ABenchMark"
+ activity = ".ABenchMarkStart"
+ summary_metrics = ['score', 'Overall_Score']
+
+ valid_versions = ['3.3.2', '4.0.3', '5.3.0']
+
+ device_prefs_directory = '/data/data/com.antutu.ABenchMark/shared_prefs'
+ device_prefs_file = '/'.join([device_prefs_directory, 'com.antutu.ABenchMark_preferences.xml'])
+ local_prefs_directory = os.path.join(os.path.dirname(__file__), 'shared_prefs')
+
+ parameters = [
+ Parameter('version', allowed_values=valid_versions, default=sorted(valid_versions, reverse=True)[0],
+ description=('Specify the version of AnTuTu to be run. If not specified, the latest available '
+ 'version will be used.')),
+ Parameter('times', kind=int, default=1,
+ description=('The number of times the benchmark will be executed in a row (i.e. '
+ 'without going through the full setup/teardown process). Note: this does '
+ 'not work with versions prior to 4.0.3.')),
+ Parameter('enable_sd_tests', kind=bool, default=False,
+ description=('If ``True`` enables SD card tests in pre version 4 AnTuTu. These tests '
+ 'were know to cause problems on platforms without an SD card. This parameter '
+ 'will be ignored on AnTuTu version 4 and higher.')),
+ ]
+
+ def __init__(self, device, **kwargs): # pylint: disable=W0613
+ super(Antutu, self).__init__(device, **kwargs)
+ self.run_timeout = 6 * 60 * self.times
+ self.uiauto_params['version'] = self.version
+ self.uiauto_params['times'] = self.times
+ self.uiauto_params['enable_sd_tests'] = self.enable_sd_tests
+
+ def update_result(self, context):
+ super(Antutu, self).update_result(context)
+ with open(self.logcat_log) as fh:
+ if self.version == '4.0.3':
+ metrics = extract_version4_metrics(fh)
+ else:
+ metrics = extract_older_version_metrics(fh)
+ for key, value in metrics.iteritems():
+ key = key.replace(' ', '_')
+ context.result.add_metric(key, value)
+
+
+# Utility functions
+
+def extract_version4_metrics(fh):
+ metrics = OrderedDict()
+ metric_counts = defaultdict(int)
+ for line in fh:
+ if 'ANTUTU RESULT:' in line:
+ parts = line.split(':')
+ metric = parts[2].strip()
+ # If times prameter > 1 the same metric will appear
+ # multiple times in logcat -- we want to collet all of
+ # them as they're from different iterations.
+ metric_counts[metric] += 1
+ if metric_counts[metric] > 1:
+ metric += '_' + str(metric_counts[metric])
+
+ value_string = parts[3].strip()
+ # Grahics results report resolution in square brackets
+ # as part of value string.
+ if ']' in value_string:
+ value = int(value_string.split(']')[1].strip())
+ else:
+ value = int(value_string)
+
+ metrics[metric] = value
+ return metrics
+
+
+def extract_older_version_metrics(fh):
+ metrics = {}
+ metric_counts = defaultdict(int)
+ for line in fh:
+ if 'i/antutu' in line.lower():
+ parts = line.split(':')
+ if not len(parts) == 3:
+ continue
+ metric = parts[1].strip()
+ value = int(parts[2].strip())
+
+ # If times prameter > 1 the same metric will appear
+ # multiple times in logcat -- we want to collet all of
+ # them as they're from different iterations.
+ metric_counts[metric] += 1
+ if metric_counts[metric] > 1:
+ metric += ' ' + str(metric_counts[metric])
+
+ metrics[metric] = value
+ return metrics
+
diff --git a/wlauto/workloads/antutu/com.arm.wlauto.uiauto.antutu.jar b/wlauto/workloads/antutu/com.arm.wlauto.uiauto.antutu.jar
new file mode 100644
index 00000000..35618ede
--- /dev/null
+++ b/wlauto/workloads/antutu/com.arm.wlauto.uiauto.antutu.jar
Binary files differ
diff --git a/wlauto/workloads/antutu/uiauto/build.sh b/wlauto/workloads/antutu/uiauto/build.sh
new file mode 100755
index 00000000..7cd2e7f0
--- /dev/null
+++ b/wlauto/workloads/antutu/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.antutu.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.antutu.jar ..
+fi
diff --git a/wlauto/workloads/antutu/uiauto/build.xml b/wlauto/workloads/antutu/uiauto/build.xml
new file mode 100644
index 00000000..a649f2fd
--- /dev/null
+++ b/wlauto/workloads/antutu/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.antutu" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/antutu/uiauto/project.properties b/wlauto/workloads/antutu/uiauto/project.properties
new file mode 100644
index 00000000..4ab12569
--- /dev/null
+++ b/wlauto/workloads/antutu/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-19
diff --git a/wlauto/workloads/antutu/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/antutu/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..339f7d72
--- /dev/null
+++ b/wlauto/workloads/antutu/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,295 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.antutu;
+
+import java.util.Set;
+import java.util.HashSet;
+import java.util.concurrent.TimeUnit;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.core.UiCollection;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "antutu";
+
+ private static int initialTimeoutSeconds = 20;
+
+ public void runUiAutomation() throws Exception{
+ Bundle parameters = getParams();
+
+ String version = parameters.getString("version");
+ boolean enableSdTests = Boolean.parseBoolean(parameters.getString("enable_sd_tests"));
+
+ int times = Integer.parseInt(parameters.getString("times"));
+ if (times < 1) {
+ times = 1;
+ }
+
+ if (version.equals("4.0.3") || version.equals("5.3.0")){
+ int iteration = 0;
+ dismissNewVersionNotificationIfNecessary();
+ hitTestButton();
+ while (true) {
+ if (version.equals("5.3.0"))
+ hitTestButtonVersion5();
+ else
+ hitTestButton();
+
+ waitForVersion4Results();
+ viewDetails();
+ extractResults();
+ iteration++;
+ if (iteration >= times) {
+ break;
+ }
+ returnToTestScreen();
+ }
+ } else { // version earlier than 4.0.3
+ dismissReleaseNotesDialogIfNecessary();
+ if(!enableSdTests){
+ disableSdCardTests();
+ }
+ hitStart();
+ waitForAndViewResults();
+ }
+
+ Bundle status = new Bundle();
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+ public boolean dismissNewVersionNotificationIfNecessary() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject closeButton = new UiObject(selector.text("Cancel"));
+ if (closeButton.waitForExists(TimeUnit.SECONDS.toMillis(initialTimeoutSeconds))) {
+ closeButton.click();
+ sleep(1); // diaglog dismissal
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ public boolean dismissReleaseNotesDialogIfNecessary() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject closeButton = new UiObject(selector.text("Close"));
+ if (closeButton.waitForExists(TimeUnit.SECONDS.toMillis(initialTimeoutSeconds))) {
+ closeButton.click();
+ sleep(1); // diaglog dismissal
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ public void hitTestButton() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject test = new UiObject(selector.text("Test")
+ .className("android.widget.Button"));
+ test.waitForExists(initialTimeoutSeconds);
+ test.click();
+ sleep(1); // possible tab transtion
+ }
+
+ /* In version 5 of antutu, the test has been changed from a button widget to a textview */
+
+ public void hitTestButtonVersion5() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject test = new UiObject(selector.resourceId("com.antutu.ABenchMark:id/start_test_region")
+ .className("android.widget.TextView"));
+ test.waitForExists(initialTimeoutSeconds);
+ test.click();
+ sleep(1); // possible tab transtion
+ }
+
+
+
+ public void hitTest() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject test = new UiObject(selector.text("Test"));
+ test.click();
+ sleep(1); // possible tab transtion
+ }
+
+ public void disableSdCardTests() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject custom = new UiObject(selector.textContains("Custom"));
+ custom.click();
+ sleep(1); // tab transition
+
+ UiObject sdCardButton = new UiObject(selector.text("SD card IO"));
+ sdCardButton.click();
+ }
+
+ public void hitStart() throws Exception {
+ UiSelector selector = new UiSelector();
+ Log.v(TAG, "Start the test");
+ UiObject startButton = new UiObject(selector.text("Start Test")
+ .className("android.widget.Button"));
+ startButton.click();
+ }
+
+ public void waitForVersion4Results() throws Exception {
+ // The observed behaviour seems to vary between devices. On some platforms,
+ // the benchmark terminates in the barchart screen; on others, it terminates in
+ // details screen. So we have to wait for either and then act appropriatesl (on the barchart
+ // screen a back button press is required to get to the details screen.
+ UiSelector selector = new UiSelector();
+ UiObject barChart = new UiObject(new UiSelector().className("android.widget.TextView")
+ .text("Bar Chart"));
+ UiObject detailsButton = new UiObject(new UiSelector().className("android.widget.Button")
+ .text("Details"));
+ for (int i = 0; i < 60; i++) {
+ if (detailsButton.exists() || barChart.exists()) {
+ break;
+ }
+ sleep(5);
+ }
+
+ if (barChart.exists()) {
+ getUiDevice().pressBack();
+ }
+ }
+
+ public void viewDetails() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject detailsButton = new UiObject(new UiSelector().className("android.widget.Button")
+ .text("Details"));
+ detailsButton.clickAndWaitForNewWindow();
+ }
+
+ public void extractResults() throws Exception {
+ extractOverallResult();
+ extractSectionResults();
+ }
+
+ public void extractOverallResult() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiSelector resultTextSelector = selector.className("android.widget.TextView").index(0);
+ UiSelector relativeLayoutSelector = selector.className("android.widget.RelativeLayout").index(1);
+ UiObject result = new UiObject(selector.className("android.widget.LinearLayout")
+ .childSelector(relativeLayoutSelector)
+ .childSelector(resultTextSelector));
+ if (result.exists()) {
+ Log.v(TAG, String.format("ANTUTU RESULT: Overall Score: %s", result.getText()));
+ }
+ }
+
+ public void extractSectionResults() throws Exception {
+ UiSelector selector = new UiSelector();
+ Set<String> processedMetrics = new HashSet<String>();
+
+ actuallyExtractSectionResults(processedMetrics);
+ UiScrollable resultsList = new UiScrollable(selector.className("android.widget.ScrollView"));
+ // Note: there is an assumption here that the entire results list fits on at most
+ // two screens on the device. Given then number of entries in the current
+ // antutu verion and the devices we're dealing with, this is a reasonable
+ // assumption. But if this changes, this will need to be adapted to scroll more
+ // slowly.
+ resultsList.scrollToEnd(10);
+ actuallyExtractSectionResults(processedMetrics);
+ }
+
+ public void actuallyExtractSectionResults(Set<String> processedMetrics) throws Exception {
+ UiSelector selector = new UiSelector();
+
+ for (int i = 1; i < 8; i += 2) {
+ UiObject table = new UiObject(selector.className("android.widget.TableLayout").index(i));
+ for (int j = 0; j < 3; j += 2) {
+ UiObject row = table.getChild(selector.className("android.widget.TableRow").index(j));
+ UiObject metric = row.getChild(selector.className("android.widget.TextView").index(0));
+ UiObject value = row.getChild(selector.className("android.widget.TextView").index(1));
+
+ if (metric.exists() && value.exists()) {
+ String metricText = metric.getText();
+ if (!processedMetrics.contains(metricText)) {
+ Log.v(TAG, String.format("ANTUTU RESULT: %s %s", metric.getText(), value.getText()));
+ processedMetrics.add(metricText);
+ }
+ }
+ }
+ }
+ }
+
+ public void returnToTestScreen() throws Exception {
+ getUiDevice().pressBack();
+ UiSelector selector = new UiSelector();
+ UiObject retestButton = new UiObject(selector.text("Test Again")
+ .className("android.widget.Button"));
+ retestButton.clickAndWaitForNewWindow();
+ }
+
+ public void waitForAndViewResults() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject submitTextView = new UiObject(selector.text("Submit Scores")
+ .className("android.widget.TextView"));
+ UiObject detailTextView = new UiObject(selector.text("Detailed Scores")
+ .className("android.widget.TextView"));
+ UiObject commentTextView = new UiObject(selector.text("User comment")
+ .className("android.widget.TextView"));
+ boolean foundResults = false;
+ for (int i = 0; i < 60; i++) {
+ if (detailTextView.exists() || submitTextView.exists() || commentTextView.exists()) {
+ foundResults = true;
+ break;
+ }
+ sleep(5);
+ }
+
+ if (!foundResults) {
+ throw new UiObjectNotFoundException("Did not see AnTuTu results screen.");
+ }
+
+ if (commentTextView.exists()) {
+ getUiDevice().pressBack();
+ }
+ // Yes, sometimes, it needs to be done twice...
+ if (commentTextView.exists()) {
+ getUiDevice().pressBack();
+ }
+
+ if (detailTextView.exists()) {
+ detailTextView.click();
+ sleep(1); // tab transition
+
+ UiObject testTextView = new UiObject(selector.text("Test")
+ .className("android.widget.TextView"));
+ if (testTextView.exists()) {
+ testTextView.click();
+ sleep(1); // tab transition
+ }
+
+ UiObject scoresTextView = new UiObject(selector.text("Scores")
+ .className("android.widget.TextView"));
+ if (scoresTextView.exists()) {
+ scoresTextView.click();
+ sleep(1); // tab transition
+ }
+ }
+ }
+}
diff --git a/wlauto/workloads/applaunch/__init__.py b/wlauto/workloads/applaunch/__init__.py
new file mode 100644
index 00000000..4e2cac14
--- /dev/null
+++ b/wlauto/workloads/applaunch/__init__.py
@@ -0,0 +1,169 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+
+from __future__ import division
+import os
+
+try:
+ import jinja2
+except ImportError:
+ jinja2 = None
+
+from wlauto import Workload, settings, Parameter
+from wlauto.exceptions import WorkloadError
+from wlauto.utils.hwmon import discover_sensors
+from wlauto.utils.misc import get_meansd
+from wlauto.utils.types import boolean, identifier, list_of_strs
+
+
+THIS_DIR = os.path.dirname(__file__)
+TEMPLATE_NAME = 'device_script.template'
+SCRIPT_TEMPLATE = os.path.join(THIS_DIR, TEMPLATE_NAME)
+
+APP_CONFIG = {
+ 'browser': {
+ 'package': 'com.android.browser',
+ 'activity': '.BrowserActivity',
+ 'options': '-d about:blank',
+ },
+ 'calculator': {
+ 'package': 'com.android.calculator2',
+ 'activity': '.Calculator',
+ 'options': '',
+ },
+ 'calendar': {
+ 'package': 'com.android.calendar',
+ 'activity': '.LaunchActivity',
+ 'options': '',
+ },
+}
+
+
+class ApplaunchWorkload(Workload):
+
+ name = 'applaunch'
+ description = """
+ Measures the time and energy used in launching an application.
+
+ """
+
+ parameters = [
+ Parameter('app', default='browser', allowed_values=['calculator', 'browser', 'calendar'],
+ description='The name of the application to measure.'),
+ Parameter('set_launcher_affinity', kind=bool, default=True,
+ description=('If ``True``, this will explicitly set the affinity of the launcher '
+ 'process to the A15 cluster.')),
+ Parameter('times', kind=int, default=8,
+ description='Number of app launches to do on the device.'),
+ Parameter('measure_energy', kind=boolean, default=False,
+ description="""
+ Specfies wether energy measurments should be taken during the run.
+
+ .. note:: This depends on appropriate sensors to be exposed through HWMON.
+
+ """),
+ Parameter('cleanup', kind=boolean, default=True,
+ description='Specifies whether to clean up temporary files on the device.'),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(ApplaunchWorkload, self).__init__(device, **kwargs)
+ if not jinja2:
+ raise WorkloadError('Please install jinja2 Python package: "sudo pip install jinja2"')
+ filename = '{}-{}.sh'.format(self.name, self.app)
+ self.host_script_file = os.path.join(settings.meta_directory, filename)
+ self.device_script_file = os.path.join(self.device.working_directory, filename)
+ self._launcher_pid = None
+ self._old_launcher_affinity = None
+ self.sensors = []
+
+ def on_run_init(self, context): # pylint: disable=W0613
+ if self.measure_energy:
+ self.sensors = discover_sensors(self.device, ['energy'])
+ for sensor in self.sensors:
+ sensor.label = identifier(sensor.label).upper()
+
+ def setup(self, context):
+ self.logger.debug('Creating script {}'.format(self.host_script_file))
+ with open(self.host_script_file, 'w') as wfh:
+ env = jinja2.Environment(loader=jinja2.FileSystemLoader(THIS_DIR))
+ template = env.get_template(TEMPLATE_NAME)
+ wfh.write(template.render(device=self.device, # pylint: disable=maybe-no-member
+ sensors=self.sensors,
+ iterations=self.times,
+ package=APP_CONFIG[self.app]['package'],
+ activity=APP_CONFIG[self.app]['activity'],
+ options=APP_CONFIG[self.app]['options'],
+ ))
+ self.device_script_file = self.device.install(self.host_script_file)
+ if self.set_launcher_affinity:
+ self._set_launcher_affinity()
+ self.device.clear_logcat()
+
+ def run(self, context):
+ self.device.execute('sh {}'.format(self.device_script_file), timeout=300)
+
+ def update_result(self, context):
+ result_files = ['time.result']
+ result_files += ['{}.result'.format(sensor.label) for sensor in self.sensors]
+ for filename in result_files:
+ host_result_file = os.path.join(context.output_directory, filename)
+ device_result_file = self.device.path.join(self.device.working_directory, filename)
+ self.device.pull_file(device_result_file, host_result_file)
+
+ with open(host_result_file) as fh:
+ if filename == 'time.result':
+ values = [v / 1000 for v in map(int, fh.read().split())]
+ _add_metric(context, 'time', values, 'Seconds')
+ else:
+ metric = filename.replace('.result', '').lower()
+ numbers = iter(map(int, fh.read().split()))
+ deltas = [(after - before) / 1000000 for before, after in zip(numbers, numbers)]
+ _add_metric(context, metric, deltas, 'Joules')
+
+ def teardown(self, context):
+ if self.set_launcher_affinity:
+ self._reset_launcher_affinity()
+ if self.cleanup:
+ self.device.delete_file(self.device_script_file)
+
+ def _set_launcher_affinity(self):
+ try:
+ self._launcher_pid = self.device.get_pids_of('com.android.launcher')[0]
+ result = self.device.execute('taskset -p {}'.format(self._launcher_pid), busybox=True, as_root=True)
+ self._old_launcher_affinity = int(result.split(':')[1].strip(), 16)
+
+ cpu_ids = [i for i, x in enumerate(self.device.core_names) if x == 'a15']
+ if not cpu_ids or len(cpu_ids) == len(self.device.core_names):
+ self.logger.debug('Cannot set affinity.')
+ return
+
+ new_mask = reduce(lambda x, y: x | y, cpu_ids, 0x0)
+ self.device.execute('taskset -p 0x{:X} {}'.format(new_mask, self._launcher_pid), busybox=True, as_root=True)
+ except IndexError:
+ raise WorkloadError('Could not set affinity of launcher: PID not found.')
+
+ def _reset_launcher_affinity(self):
+ command = 'taskset -p 0x{:X} {}'.format(self._old_launcher_affinity, self._launcher_pid)
+ self.device.execute(command, busybox=True, as_root=True)
+
+
+def _add_metric(context, metric, values, units):
+ mean, sd = get_meansd(values)
+ context.result.add_metric(metric, mean, units)
+ context.result.add_metric(metric + ' sd', sd, units, lower_is_better=True)
+
diff --git a/wlauto/workloads/applaunch/device_script.template b/wlauto/workloads/applaunch/device_script.template
new file mode 100644
index 00000000..d1313db9
--- /dev/null
+++ b/wlauto/workloads/applaunch/device_script.template
@@ -0,0 +1,69 @@
+#!{{ device.binaries_directory.rstrip('/') }}/sh
+
+
+{% for sensor in sensors %}
+GET_{{ sensor.label }}="cat {{ sensor.filepath }}"
+{% endfor %}
+
+LAUNCH_COMMAND="am start -W -n {{ package }}/{{ activity }} {{ options }}"
+STOP_COMMAND="am force-stop {{ package }}"
+TEMP_FILE=tmp.txt
+
+TIME_RESULT=""
+{% for sensor in sensors %}
+{{ sensor.label }}=""
+{% endfor %}
+
+cd {{ device.working_directory }}
+
+# esc esc down down down ENTER (this should bring up the apps menu)
+input keyevent 111
+sleep 1
+input keyevent 111
+sleep 1
+input keyevent 20
+sleep 1
+input keyevent 20
+sleep 1
+input keyevent 20
+sleep 1
+input keyevent 66
+sleep 1
+
+# Warm up caches.
+$LAUNCH_COMMAND
+$STOP_COMMAND
+$LAUNCH_COMMAND
+$STOP_COMMAND
+$LAUNCH_COMMAND
+$STOP_COMMAND
+
+for i in $(busybox seq 1 {{ iterations }})
+do
+ {% for sensor in sensors %}
+ {{ sensor.label }}="${{ sensor.label }} `$GET_{{ sensor.label }}`"
+ {% endfor %}
+
+ $LAUNCH_COMMAND > $TEMP_FILE
+
+ {% for sensor in sensors %}
+ {{ sensor.label }}="${{ sensor.label }} `$GET_{{ sensor.label }}`"
+ {% endfor %}
+
+ TIME=`busybox awk '{if($1~"TotalTime") print $2}' $TEMP_FILE`
+ TIME_RESULT="$TIME_RESULT $TIME"
+ {% if cleanup %}
+ rm $TEMP_FILE
+ {% endif %}
+
+ $STOP_COMMAND
+ sleep 2
+done
+
+{% for sensor in sensors %}
+echo ${{ sensor.label }} > {{ sensor.label }}.result
+{% endfor %}
+echo $TIME_RESULT > time.result
+# esc esc down down down ENTER (this should bring up the apps menu)
+input keyevent 111
+sleep 1
diff --git a/wlauto/workloads/audio/__init__.py b/wlauto/workloads/audio/__init__.py
new file mode 100644
index 00000000..75244e45
--- /dev/null
+++ b/wlauto/workloads/audio/__init__.py
@@ -0,0 +1,102 @@
+# Copyright 2012-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=E1101,W0201
+import os
+import time
+import urllib
+
+from wlauto import settings, Workload, Parameter
+from wlauto.exceptions import ConfigError
+from wlauto.utils.types import boolean
+
+
+DEFAULT_AUDIO_FILE_URL = "http://archive.org/download/PachelbelsCanoninD/Canon_in_D_Piano.mp3"
+
+
+class Audio(Workload):
+
+ name = 'audio'
+ description = """
+ Audio workload plays an MP3 file using the built-in music player. By default,
+ it plays Canon_in_D_Pieano.mp3 for 30 seconds.
+
+ """
+
+ parameters = [
+ Parameter('duration', kind=int, default=30,
+ description='The duration the music will play for in seconds.'),
+ Parameter('audio_file', default=os.path.join(settings.dependencies_directory, 'Canon_in_D_Piano.mp3'),
+ description='''The (on-host) path to the audio file to be played.
+
+ .. note:: If the default file is not present locally, it will be downloaded.
+ '''),
+ Parameter('perform_cleanup', kind=boolean, default=False,
+ description='If ``True``, workload files on the device will be deleted after execution.'),
+ Parameter('clear_file_cache', kind=boolean, default=True,
+ description='Clear the the file cache on the target device prior to running the workload.')
+ ]
+
+ def init_resources(self, context):
+ if not os.path.isfile(self.audio_file):
+ self._download_audio_file()
+
+ def setup(self, context):
+ self.on_device_file = os.path.join(self.device.working_directory,
+ os.path.basename(self.audio_file))
+
+ self.device.push_file(self.audio_file, self.on_device_file, timeout=120)
+
+ # Open the browser with default page
+ self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank')
+ time.sleep(5)
+
+ # Stop the browser if already running and wait for it to stop
+ self.device.execute('am force-stop com.android.browser')
+ time.sleep(5)
+
+ # Clear the logs
+ self.device.clear_logcat()
+
+ # Clear browser cache
+ self.device.execute('pm clear com.android.browser')
+
+ if self.clear_file_cache:
+ self.device.execute('sync')
+ self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
+
+ # Start the background music
+ self.device.execute('am start -W -S -n com.android.music/.MediaPlaybackActivity -d {}'.format(self.on_device_file))
+
+ # Launch the browser to blank the screen
+ self.device.execute('am start -W -n com.android.browser/.BrowserActivity about:blank')
+ time.sleep(5) # Wait for browser to be properly launched
+
+ def run(self, context):
+ time.sleep(self.duration)
+
+ def update_result(self, context):
+ # Stop the browser
+ self.device.execute('am force-stop com.android.browser')
+ # Stop the audio
+ self.device.execute('am force-stop com.android.music')
+
+ def teardown(self, context):
+ if self.perform_cleanup:
+ self.device.delete_file(self.on_device_file)
+
+ def _download_audio_file(self):
+ self.logger.debug('Downloading audio file from {}'.format(DEFAULT_AUDIO_FILE_URL))
+ urllib.urlretrieve(DEFAULT_AUDIO_FILE_URL, self.audio_file)
+
diff --git a/wlauto/workloads/bbench/__init__.py b/wlauto/workloads/bbench/__init__.py
new file mode 100644
index 00000000..1843ad69
--- /dev/null
+++ b/wlauto/workloads/bbench/__init__.py
@@ -0,0 +1,231 @@
+# Copyright 2012-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=E1101,W0201
+import os
+import time
+import urllib
+import tarfile
+import shutil
+import json
+import re
+
+from collections import defaultdict
+
+from wlauto import settings, Workload, Parameter, Alias, Executable
+from wlauto.exceptions import ConfigError
+from wlauto.utils.types import boolean
+
+DEFAULT_BBENCH_FILE = "http://bbench.eecs.umich.edu/bbench/bbench_2.0.tgz"
+DOWNLOADED_FILE_NAME = "bbench_2.0.tgz"
+BBENCH_SERVER_NAME = 'bbench_server'
+PATCH_FILES = os.path.join(os.path.dirname(__file__), "patches")
+DEFAULT_AUDIO_FILE = "http://archive.org/download/PachelbelsCanoninD/Canon_in_D_Piano.mp3"
+DEFAULT_AUDIO_FILE_NAME = 'Canon_in_D_Piano.mp3'
+
+
+class BBench(Workload):
+
+ name = 'bbench'
+ description = """
+ BBench workload opens the built-in browser and navigates to, and
+ scrolls through, some preloaded web pages and ends the workload by trying to
+ connect to a local server it runs after it starts. It can also play the
+ workload while it plays an audio file in the background.
+
+ """
+
+ summary_metrics = ['Mean Latency']
+
+ parameters = [
+ Parameter('with_audio', kind=boolean, default=False,
+ description=('Specifies whether an MP3 should be played in the background during '
+ 'workload execution.')),
+ Parameter('server_timeout', kind=int, default=300,
+ description='Specifies the timeout (in seconds) before the server is stopped.'),
+ Parameter('force_dependency_push', kind=boolean, default=False,
+ description=('Specifies whether to push dependency files to the device to the device '
+ 'if they are already on it.')),
+ Parameter('audio_file', default=os.path.join(settings.dependencies_directory, 'Canon_in_D_Piano.mp3'),
+ description=('The (on-host) path to the audio file to be played. This is only used if '
+ '``with_audio`` is ``True``.')),
+ Parameter('perform_cleanup', kind=boolean, default=False,
+ description='If ``True``, workload files on the device will be deleted after execution.'),
+ Parameter('clear_file_cache', kind=boolean, default=True,
+ description='Clear the the file cache on the target device prior to running the workload.'),
+ Parameter('browser_package', default='com.android.browser',
+ description='Specifies the package name of the device\'s browser app.'),
+ Parameter('browser_activity', default='.BrowserActivity',
+ description='Specifies the startup activity name of the device\'s browser app.'),
+ ]
+
+ aliases = [
+ Alias('bbench_with_audio', with_audio=True),
+ ]
+
+ def setup(self, context): # NOQA
+ self.bbench_on_device = '/'.join([self.device.working_directory, 'bbench'])
+ self.bbench_server_on_device = os.path.join(self.device.working_directory, BBENCH_SERVER_NAME)
+ self.audio_on_device = os.path.join(self.device.working_directory, DEFAULT_AUDIO_FILE_NAME)
+ self.index_noinput = 'file:///{}'.format(self.bbench_on_device) + '/index_noinput.html'
+ self.luanch_server_command = '{} {}'.format(BBENCH_SERVER_NAME, self.server_timeout)
+
+ if not os.path.isdir(os.path.join(self.dependencies_directory, "sites")):
+ self._download_bbench_file()
+ if self.with_audio and not os.path.isfile(self.audio_file):
+ self._download_audio_file()
+
+ if not os.path.isdir(self.dependencies_directory):
+ raise ConfigError('Bbench directory does not exist: {}'.format(self.dependencies_directory))
+ self._apply_patches()
+
+ if self.with_audio:
+ if self.force_dependency_push or not self.device.file_exists(self.audio_on_device):
+ self.device.push_file(self.audio_file, self.audio_on_device, timeout=120)
+
+ # Push the bbench site pages and http server to target device
+ if self.force_dependency_push or not self.device.file_exists(self.bbench_on_device):
+ self.logger.debug('Copying bbench sites to device.')
+ self.device.push_file(self.dependencies_directory, self.bbench_on_device, timeout=300)
+
+ # Push the bbench server
+ host_binary = context.resolver.get(Executable(self, self.device.abi, 'bbench_server'))
+ self.device.install(host_binary)
+
+ # Open the browser with default page
+ self.device.execute('am start -n {}/{} about:blank'.format(self.browser_package, self.browser_activity))
+ time.sleep(5)
+
+ # Stop the browser if already running and wait for it to stop
+ self.device.execute('am force-stop {}'.format(self.browser_package))
+ time.sleep(5)
+
+ # Clear the logs
+ self.device.clear_logcat()
+
+ # clear browser cache
+ self.device.execute('pm clear {}'.format(self.browser_package))
+ if self.clear_file_cache:
+ self.device.execute('sync')
+ self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
+
+ # Launch the background music
+ if self.with_audio:
+ self.device.execute('am start -W -S -n com.android.music/.MediaPlaybackActivity -d {}'.format(self.audio_on_device))
+
+ def run(self, context):
+ # Launch the bbench
+ self.device.execute('am start -n {}/{} {}'.format(self.browser_package, self.browser_activity, self.index_noinput))
+ time.sleep(5) # WA1 parity
+ # Launch the server waiting for Bbench to complete
+ self.device.execute(self.luanch_server_command, self.server_timeout)
+
+ def update_result(self, context):
+ # Stop the browser
+ self.device.execute('am force-stop {}'.format(self.browser_package))
+
+ # Stop the music
+ if self.with_audio:
+ self.device.execute('am force-stop com.android.music')
+
+ # Get index_no_input.html
+ indexfile = os.path.join(self.device.working_directory, 'bbench/index_noinput.html')
+ self.device.pull_file(indexfile, context.output_directory)
+
+ # Get the logs
+ output_file = os.path.join(self.device.working_directory, 'browser_bbench_logcat.txt')
+ self.device.execute('logcat -v time -d > {}'.format(output_file))
+ self.device.pull_file(output_file, context.output_directory)
+
+ metrics = _parse_metrics(os.path.join(context.output_directory, 'browser_bbench_logcat.txt'),
+ os.path.join(context.output_directory, 'index_noinput.html'),
+ context.output_directory)
+ for key, values in metrics:
+ for i, value in enumerate(values):
+ metric = '{}_{}'.format(key, i) if i else key
+ context.result.add_metric(metric, value, units='ms', lower_is_better=True)
+
+ def teardown(self, context):
+ if self.perform_cleanup:
+ self.device.execute('rm -r {}'.format(self.bbench_on_device))
+ self.device.execute('rm {}'.format(self.audio_on_device))
+
+ def _download_audio_file(self):
+ self.logger.debug('Downloadling audio file.')
+ urllib.urlretrieve(DEFAULT_AUDIO_FILE, self.audio_file)
+
+ def _download_bbench_file(self):
+ # downloading the file to bbench_dir
+ self.logger.debug('Downloading bbench dependencies.')
+ full_file_path = os.path.join(self.dependencies_directory, DOWNLOADED_FILE_NAME)
+ urllib.urlretrieve(DEFAULT_BBENCH_FILE, full_file_path)
+
+ # Extracting Bbench to bbench_dir/
+ self.logger.debug('Extracting bbench dependencies.')
+ tar = tarfile.open(full_file_path)
+ tar.extractall(os.path.dirname(self.dependencies_directory))
+
+ # Removing not needed files and the compressed file
+ os.remove(full_file_path)
+ youtube_dir = os.path.join(self.dependencies_directory, 'sites', 'youtube')
+ os.remove(os.path.join(youtube_dir, 'www.youtube.com', 'kp.flv'))
+ os.remove(os.path.join(youtube_dir, 'kp.flv'))
+
+ def _apply_patches(self):
+ self.logger.debug('Applying patches.')
+ shutil.copy(os.path.join(PATCH_FILES, "bbench.js"), self.dependencies_directory)
+ shutil.copy(os.path.join(PATCH_FILES, "results.html"), self.dependencies_directory)
+ shutil.copy(os.path.join(PATCH_FILES, "index_noinput.html"), self.dependencies_directory)
+
+
+def _parse_metrics(logfile, indexfile, output_directory): # pylint: disable=R0914
+ regex_bbscore = re.compile(r'(?P<head>\w+)=(?P<val>\w+)')
+ regex_bbmean = re.compile(r'Mean = (?P<mean>[0-9\.]+)')
+ regex_pagescore_head = re.compile(r'metrics:(\w+),(\d+)')
+ regex_pagescore_tail = re.compile(r',(\d+.\d+)')
+ regex_indexfile = re.compile(r'<body onload="startTest\((.*)\)">')
+ settings_dict = defaultdict()
+
+ with open(indexfile) as fh:
+ for line in fh:
+ match = regex_indexfile.search(line)
+ if match:
+ settings_dict['iterations'], settings_dict['scrollDelay'], settings_dict['scrollSize'] = match.group(1).split(',')
+ with open(logfile) as fh:
+ results_dict = defaultdict(list)
+ for line in fh:
+ if 'metrics:Mean' in line:
+ results_list = regex_bbscore.findall(line)
+ results_dict['Mean Latency'].append(regex_bbmean.search(line).group('mean'))
+ if results_list:
+ break
+ elif 'metrics:' in line:
+ page_results = [0]
+ match = regex_pagescore_head.search(line)
+ name, page_results[0] = match.groups()
+ page_results.extend(regex_pagescore_tail.findall(line[match.end():]))
+ for val in page_results[:-2]:
+ results_list.append((name, int(float(val))))
+
+ setting_names = ['siteIndex', 'CGTPreviousTime', 'scrollDelay', 'scrollSize', 'iterations']
+ for k, v in results_list:
+ if k not in setting_names:
+ results_dict[k].append(v)
+
+ sorted_results = sorted(results_dict.items())
+
+ with open(os.path.join(output_directory, 'settings.json'), 'w') as wfh:
+ json.dump(settings_dict, wfh)
+
+ return sorted_results
diff --git a/wlauto/workloads/bbench/bin/arm64/bbench_server b/wlauto/workloads/bbench/bin/arm64/bbench_server
new file mode 100755
index 00000000..c33f5cfd
--- /dev/null
+++ b/wlauto/workloads/bbench/bin/arm64/bbench_server
Binary files differ
diff --git a/wlauto/workloads/bbench/bin/armeabi/bbench_server b/wlauto/workloads/bbench/bin/armeabi/bbench_server
new file mode 100755
index 00000000..c33f5cfd
--- /dev/null
+++ b/wlauto/workloads/bbench/bin/armeabi/bbench_server
Binary files differ
diff --git a/wlauto/workloads/bbench/patches/bbench.js b/wlauto/workloads/bbench/patches/bbench.js
new file mode 100644
index 00000000..05e2900f
--- /dev/null
+++ b/wlauto/workloads/bbench/patches/bbench.js
@@ -0,0 +1,177 @@
+//Author: Anthony Gutierrez
+
+var bb_site = [];
+var bb_results = [];
+var globalSiteIndex = 0;
+var numWebsites = 9;
+var bb_path = document.location.pathname;
+var bb_home = "file:///" + bb_path.substr(1, bb_path.lastIndexOf("bbench") + 5);
+var num_iters = 0;
+var init = false;
+
+function generateSiteArray(numTimesToExecute) {
+ for (i = 0; i < numTimesToExecute * numWebsites; i += numWebsites) {
+ bb_site[i+0] = bb_home + "/sites/amazon/www.amazon.com/index.html";
+ bb_site[i+1] = bb_home + "/sites/bbc/www.bbc.co.uk/index.html";
+ bb_site[i+2] = bb_home + "/sites/cnn/www.cnn.com/index.html";
+ bb_site[i+3] = bb_home + "/sites/craigslist/newyork.craigslist.org/index.html";
+ bb_site[i+4] = bb_home + "/sites/ebay/www.ebay.com/index.html";
+ bb_site[i+5] = bb_home + "/sites/google/www.google.com/index.html";
+// bb_site[i+6] = bb_home + "/sites/youtube/www.youtube.com/index.html";
+ bb_site[i+6] = bb_home + "/sites/msn/www.msn.com/index.html";
+ bb_site[i+7] = bb_home + "/sites/slashdot/slashdot.org/index.html";
+ bb_site[i+8] = bb_home + "/sites/twitter/twitter.com/index.html";
+// bb_site[i+10] = bb_home + "/sites/espn/espn.go.com/index.html";
+ }
+
+ bb_site[i] = bb_home + "/results.html";
+}
+
+
+/* gets the URL parameters and removes from window href */
+function getAndRemoveURLParams(windowURL, param) {
+ var regex_string = "(.*)(\\?)" + param + "(=)([0-9]+)(&)(.*)";
+ var regex = new RegExp(regex_string);
+ var results = regex.exec(windowURL.value);
+
+ if (results == null)
+ return "";
+ else {
+ windowURL.value = results[1] + results[6];
+ return results[4];
+ }
+}
+
+/* gets the URL parameters */
+function getURLParams(param) {
+ var regex_string = "(.*)(\\?)" + param + "(=)([0-9]+)(&)(.*)";
+ var regex = new RegExp(regex_string);
+ var results = regex.exec(window.location.href);
+
+ if (results == null)
+ return "";
+ else
+ return results[4];
+}
+
+/* gets all the parameters */
+function getAllParams() {
+ var regex_string = "(\\?.*)(\\?siteIndex=)([0-9]+)(&)";
+ var regex = new RegExp(regex_string);
+ var results = regex.exec(window.location.href);
+ /*alert(" Result is 1: " + results[1] + " 2: " + results[2] + " 3: " + results[3]);*/
+
+ if (results == null)
+ return "";
+ else
+ return results[1];
+}
+
+/* sets a cookie */
+function setCookie(c_name, value) {
+ var c_value = escape(value) + ";";
+ document.cookie = c_name + "=" + c_value + " path=/";
+}
+
+/* gets a cookie */
+function getCookie(c_name) {
+ var cookies = document.cookie.split(";");
+ var i, x, y;
+
+ for (i = 0; i < cookies.length; ++i) {
+ x = cookies[i].substr(0, cookies[i].indexOf("="));
+ y = cookies[i].substr(cookies[i].indexOf("=") + 1);
+ x = x.replace(/^\s+|\s+$/g,"");
+
+ if (x == c_name)
+ return unescape(y);
+ }
+}
+
+/* start the test, simply go to site 1. */
+function startTest(n, del, y) {
+ //var start_time = (new Date()).getTime();
+ //setCookie("PreviousTime", start_time);
+
+ init = true;
+
+ generateSiteArray(n);
+ siteTest(bb_site[0], globalSiteIndex, new Date().getTime(), "scrollSize=" + y + "&?scrollDelay=" + del + "&?iterations=" + n + "&?" + "StartPage");
+ //siteTest(bb_site[0], globalSiteIndex, new Date().getTime(), "scrollDelay=" + del + "&?iterations=" + n + "&?" + "StartPage");
+ //goToSite(bb_site[0], new Date().getTime());
+}
+
+/* jump to the next site */
+function goToSite(site) {
+ curr_time = new Date().getTime();
+ setCookie("CGTPreviousTime", curr_time);
+ site+="?CGTPreviousTime="+curr_time+"&";
+ window.location.href = site;
+}
+
+/*
+ the test we want to run on the site.
+ for now, simply scroll to the bottom
+ and jump to the next site. in the
+ future we will want to do some more
+ realistic browsing tests.
+*/
+function siteTest(nextSite, siteIndex, startTime, siteName) {
+ if (!init) {
+ var iterations = getURLParams("iterations");
+ var params = getAllParams();
+ var delay = getURLParams("scrollDelay");
+ var verticalScroll = getURLParams("scrollSize");
+ generateSiteArray(iterations);
+ nextSite = bb_site[siteIndex] + params;
+ }
+ else {
+ var delay = 500;
+ var verticalScroll = 500;
+ }
+ var cgtPreviousTime = getURLParams("CGTPreviousTime");
+ var load_time = 0;
+ siteIndex++;
+ if (siteIndex > 1) {
+ cur_time = new Date().getTime();
+// alert("previous " + cgtPreviousTime + " foo " + getCookie("CGTPreviousTime"));
+ load_time = (cur_time - cgtPreviousTime);
+ setCookie("CGTLoadTime", load_time);
+// diff = cur_time-startTime;
+// alert("starttime "+startTime+" currtime "+ cur_time + " diff " + diff + "load_time " + load_time );
+ }
+ setTimeout(function() {
+ scrollToBottom(0, verticalScroll, delay,load_time,
+ function(load_time_param){
+ cur_time = new Date().getTime();
+ load_time = (cur_time - startTime);
+ //load_time = (cur_time - getCookie("PreviousTime"));
+ // alert("Done with this site! " + window.cur_time + " " + startTime + " " + window.load_time);
+ //alert("Done with this site! " + window.cur_time + " " + getCookie("PreviousTime") + " " + window.load_time);
+ //goToSite(nextSite + "?iterations=" + iterations + "&?" + siteName + "=" + load_time + "&" + "?siteIndex=" + siteIndex + "&" );
+// alert("loadtime in cookie="+ getCookie("CGTLoadTime")+" loadtime in var="+load_time_param);
+ goToSite(nextSite + "?" + siteName + "=" + load_time_param + "&" + "?siteIndex=" + siteIndex + "&" );
+ }
+ );},(siteIndex > 1) ? 1000 : 0);
+}
+
+/*
+ scroll to the bottom of the page in
+ num_y pixel increments. may want to
+ do some horizontal scrolling in the
+ future as well.
+*/
+function scrollToBottom(num_x, num_y, del, load_time, k) {
+ ++num_iters;
+ var diff = document.body.scrollHeight - num_y * num_iters;
+ //var num_scrolls = 0;
+
+ if (diff > num_y) {
+ //self.scrollBy(num_x, num_y);
+ //setTimeout(function(){self.scrollBy(num_x, num_y); /*diff -= 100;*/ scrollToBottom(num_x, num_y, k);}, 2);
+ setTimeout(function(){self.scrollBy(num_x, num_y); /*diff -= 100;*/ scrollToBottom(num_x, num_y, del, load_time,k);}, del);
+ }
+ else{
+ k(load_time);
+ }
+}
diff --git a/wlauto/workloads/bbench/patches/index_noinput.html b/wlauto/workloads/bbench/patches/index_noinput.html
new file mode 100644
index 00000000..072c9ad8
--- /dev/null
+++ b/wlauto/workloads/bbench/patches/index_noinput.html
@@ -0,0 +1,56 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1.dtd">
+<!--
+ Author: Anthony Gutierrez
+-->
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <title>University of Michigan - BBench 2.0</title>
+ <script type="text/javascript" src="bbench.js"></script>
+ <script type="text/javascript" src="forms.js"></script>
+</head>
+
+<body onload="startTest(2,2000,500)">
+<!--
+<body>
+ <img src="mich_engin.png" width="35%"/>
+ <h2>University of Michigan BBench version 2.0</h2>
+
+ <form name="config_form">
+ <b>Number of iterations:</b> <input type="text" name="numIterations" value="5" size="4" onchange="setIters();">
+ <input type="button" value="-" name="iterPlusButton" onClick="document.config_form.numIterations.value=numItersDec(); return true;">
+ <input type="button" value="+" name="iterMinusButton" onClick="document.config_form.numIterations.value=numItersInc(); return true;">
+ (Number of times the page set is iterated through.)
+ <br/><br/>
+
+ <b>Scroll Delay (ms):</b> <input type="text" name="scrollDelay" value="0" size="8" onchange="setScrollDelay();">
+ <input type="button" value="-" name="scrollDelayPlusButton" onClick="document.config_form.scrollDelay.value=scrollDelayDec(); return true;">
+ <input type="button" value="+" name="scrollDelayMinusButton" onClick="document.config_form.scrollDelay.value=scrollDelayInc(); return true;">
+ (Number of milliseconds to pause before scrolling.)
+ <br/><br/>
+
+ <b>Scroll Size:</b> <input type="text" name="scrollSize" value="500" size="8" onchange="setScrollSize();">
+ <input type="button" value="-" name="scrollSizePlusButton" onClick="document.config_form.scrollSize.value=scrollSizeDec(); return true;">
+ <input type="button" value="+" name="scrollSizeMinusButton" onClick="document.config_form.scrollSize.value=scrollSizeInc(); return true;">
+ (Number of pixel to scroll.)
+ <br/><br/>
+ </form>
+
+ <p>
+ <b>Click on the start button to begin the benchmark.</b>
+ </p>
+ <button onclick="startTest(numIters, scrollDelay, scrollSize)">start</button>
+
+ <p>
+ If you use BBench in your work please cite our <a href="http://www.eecs.umich.edu/~atgutier/iiswc_2011.pdf">2011 IISWC paper</a>:<br/><br/>
+
+ A. Gutierrez, R.G. Dreslinksi, T.F. Wenisch, T. Mudge, A. Saidi, C. Emmons, and N. Paver. Full-System Analysis and Characterization
+ of Interactive Smartphone Applications. <i>IEEE International Symposium on Workload Characterization</i>, 2011.
+ </p>
+--!>
+</body>
+
+</html>
+
diff --git a/wlauto/workloads/bbench/patches/results.html b/wlauto/workloads/bbench/patches/results.html
new file mode 100644
index 00000000..a7eb2e33
--- /dev/null
+++ b/wlauto/workloads/bbench/patches/results.html
@@ -0,0 +1,158 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1.dtd">
+<!--
+ Author: Anthony Gutierrez
+-->
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <title>University of Michigan - BBench 2.0</title>
+ <script type="text/javascript" src="bbench.js"></script>
+
+ <script type="text/javascript">
+ var numTimesToExecute = getURLParams("iterations");
+
+ function closeWindow() {
+ window.open('','_self','');
+ window.close();
+ }
+
+ function averageWarm(siteTimes) {
+ var sum = 0;
+
+ if (numTimesToExecute == 1)
+ return siteTimes[0];
+
+ for (i = 0; i < numTimesToExecute - 1; ++i)
+ sum = eval(sum + siteTimes[i]);
+
+ return (sum / (numTimesToExecute - 1));
+ }
+
+ function stdDevWarm(siteTimes) {
+ var avg = averageWarm(siteTimes)
+ var tmpArray = [];
+
+ if (numTimesToExecute == 1)
+ return 0;
+
+ for (i = 0; i < numTimesToExecute - 1; ++i)
+ tmpArray[i] = Math.pow((siteTimes[i] - avg), 2);
+
+ avg = averageWarm(tmpArray);
+
+ return Math.sqrt(avg);
+ }
+
+ function geoMean(avgTimes) {
+ var prod = 1;
+
+ for (i = 0; i < numWebsites; ++i)
+ prod = eval(prod * avgTimes[i]);
+
+ return Math.pow(prod, (1/numWebsites));
+ }
+ </script>
+</head>
+
+<body>
+ <img src="mich_engin.png" width="35%"/>
+ <h2>University of Michigan BBench version 2.0</h2>
+ <h3>Results</h3>
+
+ <script type="text/javascript">
+ var bbSiteColdTimes = [];
+ var bbSiteTimes = [];
+ var bbSiteAvgRunTime = [];
+ var bbSiteStdDev = [];
+ var bbSiteCoeffVar = [];
+ var bbSiteNames = ["amazon",
+ "bbc",
+ "cnn",
+ "craigslist",
+ "ebay",
+// "espn",
+ "google",
+ "msn",
+ "slashdot",
+ "twitter"];
+// "youtube"];
+
+ var windowURL = new Object();
+ var windowURL2 = new Object();
+ windowURL.value = window.location.href;
+ windowURL2.value = window.location.href;
+
+ for (j = 0; j < numWebsites; ++j) {
+
+ for (i = 0; i < numTimesToExecute; ++i) {
+ var site_time = getAndRemoveURLParams(windowURL, bbSiteNames[j]) - 0;
+ bbSiteTimes[i] = site_time;
+ }
+
+ bbSiteColdTimes[j] = bbSiteTimes[i - 1];
+ bbSiteAvgRunTime[j] = averageWarm(bbSiteTimes);
+ bbSiteStdDev[j] = stdDevWarm(bbSiteTimes);
+ bbSiteCoeffVar[j] = (bbSiteStdDev[j] / bbSiteAvgRunTime[j]) * 100;
+ }
+
+ var bbSiteAvgGeoMean = geoMean(bbSiteAvgRunTime);
+ </script>
+
+ <table border="1">
+ <script type="text/javascript">
+ document.write("<tr align=\"right\"><td>Site Name</td><td>Cold Start Time</td><td>Avg Warm Page Rendering Time (ms)</td><td>Std Dev of Warm Runs</td><td>%Coeff Var of Warm Runs</td>");
+ for (i = 0; i < numWebsites; ++i) {
+ document.write("<tr align=\"right\">");
+ document.write("<td>" + bbSiteNames[i] + "</td>");
+ document.write("<td>" + bbSiteColdTimes[i] + "</td>");
+ document.write("<td>" + bbSiteAvgRunTime[i].toFixed(2) + "</td>");
+ document.write("<td>" + bbSiteStdDev[i].toFixed(2) + "</td>");
+ document.write("<td>" + bbSiteCoeffVar[i].toFixed(2) + "</td>");
+ document.write("</tr>");
+ }
+ </script>
+ </table>
+
+ <br />
+
+ <table border="1">
+ <script type="text/javascript">
+ document.write("<tr><td>Geometric Mean of Average Warm Runs</td><td>" + bbSiteAvgGeoMean.toFixed(2) + "</td></tr>");
+ console.log("metrics:" + "Mean = " + bbSiteAvgGeoMean.toFixed(2) + ":")
+ </script>
+ </table>
+
+ <h3>CSV version of the table:</h3>
+
+ <script type="text/javascript">
+ document.write("Site Name,Cold Start Time, Avg Warm Page Rendering Time (ms),Std Dev of Warm Runs,%Coeff Var of Warm Runs<br />");
+ for (i = 0; i < numWebsites; ++i) {
+ document.write(bbSiteNames[i] + ",");
+ document.write(bbSiteColdTimes[i] + ",");
+ document.write(bbSiteAvgRunTime[i].toFixed(2) + ",");
+ document.write(bbSiteStdDev[i].toFixed(2) + ",");
+ document.write(bbSiteCoeffVar[i].toFixed(2) + "<br />");
+ console.log("metrics:" + bbSiteNames[i] + "," + bbSiteColdTimes[i] + "," + bbSiteAvgRunTime[i].toFixed(2) + "," + bbSiteStdDev[i].toFixed(2) + "," + bbSiteCoeffVar[i].toFixed(2) + ":");
+ }
+
+ document.write("<h3>Individual Site Times:</h3>");
+ for (j = 0; j < numWebsites; ++j) {
+ for (i = 0; i < numTimesToExecute; ++i) {
+ var site_time = getAndRemoveURLParams(windowURL2, bbSiteNames[j]) - 0;
+ bbSiteTimes[i] = site_time;
+ document.write(bbSiteNames[j] + " load time: " + site_time + "<br />");
+ }
+ document.write("<br />");
+ }
+ setTimeout("window.location.href='http://localhost:3030/'", 1);
+ </script>
+
+ <p>
+ <b>Click the return button to go to the start page.</b>
+ </p>
+ <button onclick="window.location.href='index.html'">return</button>
+</body>
+
+</html>
diff --git a/wlauto/workloads/benchmarkpi/__init__.py b/wlauto/workloads/benchmarkpi/__init__.py
new file mode 100644
index 00000000..c49f6d05
--- /dev/null
+++ b/wlauto/workloads/benchmarkpi/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import re
+
+from wlauto import AndroidUiAutoBenchmark
+
+
+class BenchmarkPi(AndroidUiAutoBenchmark):
+
+ name = 'benchmarkpi'
+ description = """
+ Measures the time the target device takes to run and complete the Pi
+ calculation algorithm.
+
+ http://androidbenchmark.com/howitworks.php
+
+ from the website:
+
+ The whole idea behind this application is to use the same Pi calculation
+ algorithm on every Android Device and check how fast that proccess is.
+ Better calculation times, conclude to faster Android devices. This way you
+ can also check how lightweight your custom made Android build is. Or not.
+
+ As Pi is an irrational number, Benchmark Pi does not calculate the actual Pi
+ number, but an approximation near the first digits of Pi over the same
+ calculation circles the algorithms needs.
+
+ So, the number you are getting in miliseconds is the time your mobile device
+ takes to run and complete the Pi calculation algorithm resulting in a
+ approximation of the first Pi digits.
+ """
+ package = 'gr.androiddev.BenchmarkPi'
+ activity = '.BenchmarkPi'
+ summary_metrics = ['pi calculation']
+
+ regex = re.compile('You calculated Pi in ([0-9]+)')
+
+ def update_result(self, context):
+ super(BenchmarkPi, self).update_result(context)
+ result = None
+ with open(self.logcat_log) as fh:
+ for line in fh:
+ match = self.regex.search(line)
+ if match:
+ result = int(match.group(1))
+
+ if result is not None:
+ context.result.add_metric('pi calculation', result,
+ 'Milliseconds', lower_is_better=True)
diff --git a/wlauto/workloads/benchmarkpi/com.arm.wlauto.uiauto.benchmarkpi.jar b/wlauto/workloads/benchmarkpi/com.arm.wlauto.uiauto.benchmarkpi.jar
new file mode 100644
index 00000000..433334d2
--- /dev/null
+++ b/wlauto/workloads/benchmarkpi/com.arm.wlauto.uiauto.benchmarkpi.jar
Binary files differ
diff --git a/wlauto/workloads/benchmarkpi/uiauto/build.sh b/wlauto/workloads/benchmarkpi/uiauto/build.sh
new file mode 100755
index 00000000..be7ca104
--- /dev/null
+++ b/wlauto/workloads/benchmarkpi/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.benchmarkpi.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.benchmarkpi.jar ..
+fi
diff --git a/wlauto/workloads/benchmarkpi/uiauto/build.xml b/wlauto/workloads/benchmarkpi/uiauto/build.xml
new file mode 100644
index 00000000..67603ca8
--- /dev/null
+++ b/wlauto/workloads/benchmarkpi/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.benchmarkpi" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/benchmarkpi/uiauto/project.properties b/wlauto/workloads/benchmarkpi/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/benchmarkpi/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/benchmarkpi/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/benchmarkpi/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..e4e8b7ad
--- /dev/null
+++ b/wlauto/workloads/benchmarkpi/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,62 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.benchmarkpi;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "benchmarkpi";
+
+ public void runUiAutomation() throws Exception{
+ Bundle status = new Bundle();
+
+ startTest();
+ waitForAndExtractResults();
+
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+ public void startTest() throws Exception{
+ UiSelector selector = new UiSelector();
+ UiObject benchButton = new UiObject(selector.text("Benchmark my Android!")
+ .className("android.widget.Button"));
+ benchButton.click();
+ }
+
+ public void waitForAndExtractResults() throws Exception{
+ UiSelector selector = new UiSelector();
+ UiObject submitButton = new UiObject(selector.text("Submit")
+ .className("android.widget.Button"));
+ submitButton.waitForExists(10 * 1000);
+
+ UiObject resultsText = new UiObject(selector.textContains("You calculated Pi in")
+ .className("android.widget.TextView"));
+ Log.v(TAG, resultsText.getText());
+ }
+}
diff --git a/wlauto/workloads/caffeinemark/__init__.py b/wlauto/workloads/caffeinemark/__init__.py
new file mode 100644
index 00000000..6b2ae0f9
--- /dev/null
+++ b/wlauto/workloads/caffeinemark/__init__.py
@@ -0,0 +1,68 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+
+from wlauto import AndroidUiAutoBenchmark
+
+
+class Caffeinemark(AndroidUiAutoBenchmark):
+
+ name = 'caffeinemark'
+ description = """
+ CaffeineMark is a series of tests that measure the speed of Java
+ programs running in various hardware and software configurations.
+
+ http://www.benchmarkhq.ru/cm30/info.html
+
+ From the website:
+
+ CaffeineMark scores roughly correlate with the number of Java instructions
+ executed per second, and do not depend significantly on the the amount of
+ memory in the system or on the speed of a computers disk drives or internet
+ connection.
+
+ The following is a brief description of what each test does:
+
+ - Sieve: The classic sieve of eratosthenes finds prime numbers.
+ - Loop: The loop test uses sorting and sequence generation as to measure
+ compiler optimization of loops.
+ - Logic: Tests the speed with which the virtual machine executes
+ decision-making instructions.
+ - Method: The Method test executes recursive function calls to see how
+ well the VM handles method calls.
+ - Float: Simulates a 3D rotation of objects around a point.
+ - Graphics: Draws random rectangles and lines.
+ - Image: Draws a sequence of three graphics repeatedly.
+ - Dialog: Writes a set of values into labels and editboxes on a form.
+
+ The overall CaffeineMark score is the geometric mean of the individual
+ scores, i.e., it is the 9th root of the product of all the scores.
+ """
+ package = "com.flexycore.caffeinemark"
+ activity = ".Application"
+ summary_metrics = ['OverallScore']
+
+ regex = re.compile(r'CAFFEINEMARK RESULT: (?P<type>\w+) (?P<value>\S+)')
+
+ def update_result(self, context):
+ super(Caffeinemark, self).update_result(context)
+ with open(self.logcat_log) as fh:
+ for line in fh:
+ match = self.regex.search(line)
+ if match:
+ metric = match.group('type')
+ value = float(match.group('value'))
+ context.result.add_metric(metric, value)
diff --git a/wlauto/workloads/caffeinemark/com.arm.wlauto.uiauto.caffeinemark.jar b/wlauto/workloads/caffeinemark/com.arm.wlauto.uiauto.caffeinemark.jar
new file mode 100644
index 00000000..2a75e9d2
--- /dev/null
+++ b/wlauto/workloads/caffeinemark/com.arm.wlauto.uiauto.caffeinemark.jar
Binary files differ
diff --git a/wlauto/workloads/caffeinemark/uiauto/build.sh b/wlauto/workloads/caffeinemark/uiauto/build.sh
new file mode 100755
index 00000000..148e101e
--- /dev/null
+++ b/wlauto/workloads/caffeinemark/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.caffeinemark.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.caffeinemark.jar ..
+fi
diff --git a/wlauto/workloads/caffeinemark/uiauto/build.xml b/wlauto/workloads/caffeinemark/uiauto/build.xml
new file mode 100644
index 00000000..0b50bbf9
--- /dev/null
+++ b/wlauto/workloads/caffeinemark/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.caffeinemark" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/caffeinemark/uiauto/project.properties b/wlauto/workloads/caffeinemark/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/caffeinemark/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/caffeinemark/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/caffeinemark/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..3979b675
--- /dev/null
+++ b/wlauto/workloads/caffeinemark/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,85 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.caffeinemark;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "caffeinemark";
+ public String[] categories = {"Sieve", "Loop", "Logic", "String", "Float", "Method"};
+
+ public void runUiAutomation() throws Exception {
+ Bundle status = new Bundle();
+ status.putString("product", getUiDevice().getProductName());
+
+ UiSelector selector = new UiSelector();
+ UiObject runButton = new UiObject(selector.text("Run benchmark")
+ .className("android.widget.Button"));
+ runButton.click();
+
+ try {
+ waitText("CaffeineMark results");
+ extractOverallScore();
+ extractDetailedScores();
+
+
+ } catch(UiObjectNotFoundException e) {
+ takeScreenshot("caffeine-error");
+ }
+
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+ public void extractOverallScore() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject linearLayoutOverallScore = new UiObject(selector.className("android.widget.LinearLayout")
+ .instance(1));
+ UiObject overallScore = linearLayoutOverallScore.getChild(selector.className("android.widget.TextView")
+ .instance(2));
+ Log.v(TAG, "CAFFEINEMARK RESULT: OverallScore " + overallScore.getText());
+ }
+
+ public void extractDetailedScores() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject detailsButton = new UiObject(selector.text("Details")
+ .className("android.widget.Button"));
+ detailsButton.click();
+ sleep(2);
+
+ UiObject linearObject;
+ UiObject detailedScore;
+ for (int i = 1; i <= 6; i++) {
+ linearObject = new UiObject(selector.className("android.widget.LinearLayout")
+ .instance(i));
+ detailedScore = linearObject.getChild(selector.className("android.widget.TextView")
+ .instance(1));
+ Log.v(TAG,"CAFFEINEMARK RESULT: " + categories[i-1] + " " + detailedScore.getText());
+ }
+ }
+}
diff --git a/wlauto/workloads/cameracapture/__init__.py b/wlauto/workloads/cameracapture/__init__.py
new file mode 100644
index 00000000..de72acea
--- /dev/null
+++ b/wlauto/workloads/cameracapture/__init__.py
@@ -0,0 +1,51 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+
+from wlauto import UiAutomatorWorkload, Parameter
+
+
+class Cameracapture(UiAutomatorWorkload):
+
+ name = 'cameracapture'
+ description = """
+ Uses in-built Android camera app to take photos.
+
+ """
+ package = 'com.google.android.gallery3d'
+ activity = 'com.android.camera.CameraActivity'
+
+ parameters = [
+ Parameter('no_of_captures', kind=int, default=5,
+ description='Number of photos to be taken.'),
+ Parameter('time_between_captures', kind=int, default=5,
+ description='Time, in seconds, between two consecutive camera clicks.'),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(Cameracapture, self).__init__(device, **kwargs)
+ self.uiauto_params['no_of_captures'] = self.no_of_captures
+ self.uiauto_params['time_between_captures'] = self.time_between_captures
+
+ def setup(self, context):
+ super(Cameracapture, self).setup(context)
+ self.device.execute('am start -n {}/{}'.format(self.package, self.activity))
+
+ def update_result(self, context):
+ pass
+
+ def teardown(self, context):
+ super(Cameracapture, self).teardown(context)
diff --git a/wlauto/workloads/cameracapture/com.arm.wlauto.uiauto.cameracapture.jar b/wlauto/workloads/cameracapture/com.arm.wlauto.uiauto.cameracapture.jar
new file mode 100644
index 00000000..0d37d0b2
--- /dev/null
+++ b/wlauto/workloads/cameracapture/com.arm.wlauto.uiauto.cameracapture.jar
Binary files differ
diff --git a/wlauto/workloads/cameracapture/uiauto/build.sh b/wlauto/workloads/cameracapture/uiauto/build.sh
new file mode 100755
index 00000000..6b54f4f6
--- /dev/null
+++ b/wlauto/workloads/cameracapture/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.cameracapture.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.cameracapture.jar ..
+fi
diff --git a/wlauto/workloads/cameracapture/uiauto/build.xml b/wlauto/workloads/cameracapture/uiauto/build.xml
new file mode 100644
index 00000000..bcd7ef9d
--- /dev/null
+++ b/wlauto/workloads/cameracapture/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.cameracapture" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/cameracapture/uiauto/project.properties b/wlauto/workloads/cameracapture/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/cameracapture/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/cameracapture/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/cameracapture/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..a5497468
--- /dev/null
+++ b/wlauto/workloads/cameracapture/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,68 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.cameracapture;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "cameracapture";
+
+ public void runUiAutomation() throws Exception {
+ int timeDurationBetweenEachCapture = 0;
+ int sleepTime = 2;
+ Bundle parameters = getParams();
+ String noOfCaptures = "";
+ int iterations = 0;
+
+ if (parameters.size() > 0) {
+ iterations = Integer.parseInt(parameters
+ .getString("no_of_captures"));
+ timeDurationBetweenEachCapture = Integer.parseInt(parameters
+ .getString("time_between_captures"));
+ }
+ // switch to camera capture mode
+ UiObject clickModes = new UiObject(new UiSelector().descriptionMatches("Camera, video or panorama selector"));
+ clickModes.click();
+ sleep(sleepTime);
+
+ UiObject changeModeToCapture = new UiObject(new UiSelector().descriptionMatches("Switch to photo"));
+
+ changeModeToCapture.click();
+ sleep(sleepTime);
+
+ // click to capture photos
+ UiObject clickCaptureButton = new UiObject(new UiSelector().descriptionMatches("Shutter button"));
+
+ for (int i = 0; i < iterations; i++) {
+ clickCaptureButton.longClick();
+ sleep(timeDurationBetweenEachCapture);
+ }
+ getUiDevice().pressBack();
+ }
+}
diff --git a/wlauto/workloads/camerarecord/__init__.py b/wlauto/workloads/camerarecord/__init__.py
new file mode 100644
index 00000000..7f237f2f
--- /dev/null
+++ b/wlauto/workloads/camerarecord/__init__.py
@@ -0,0 +1,47 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from wlauto import UiAutomatorWorkload, Parameter
+
+
+class Camerarecord(UiAutomatorWorkload):
+
+ name = 'camerarecord'
+ description = """
+ Uses in-built Android camera app to record the video for given interval
+ of time.
+
+ """
+ package = 'com.google.android.gallery3d'
+ activity = 'com.android.camera.CameraActivity'
+ run_timeout = 0
+
+ parameters = [
+ Parameter('recording_time', kind=int, default=60,
+ description='The video recording time in seconds.'),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(Camerarecord, self).__init__(device)
+ self.uiauto_params['recording_time'] = self.recording_time # pylint: disable=E1101
+ self.run_timeout = 3 * self.uiauto_params['recording_time']
+
+ def setup(self, context):
+ super(Camerarecord, self).setup(context)
+ self.device.execute('am start -n {}/{}'.format(self.package, self.activity))
+
+ def teardown(self, context):
+ self.device.execute('am force-stop {}'.format(self.package))
+ super(Camerarecord, self).teardown(context)
diff --git a/wlauto/workloads/camerarecord/com.arm.wlauto.uiauto.camerarecord.jar b/wlauto/workloads/camerarecord/com.arm.wlauto.uiauto.camerarecord.jar
new file mode 100644
index 00000000..7de7cfcf
--- /dev/null
+++ b/wlauto/workloads/camerarecord/com.arm.wlauto.uiauto.camerarecord.jar
Binary files differ
diff --git a/wlauto/workloads/camerarecord/uiauto/build.sh b/wlauto/workloads/camerarecord/uiauto/build.sh
new file mode 100755
index 00000000..eff5293f
--- /dev/null
+++ b/wlauto/workloads/camerarecord/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.camerarecord.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.camerarecord.jar ..
+fi
diff --git a/wlauto/workloads/camerarecord/uiauto/build.xml b/wlauto/workloads/camerarecord/uiauto/build.xml
new file mode 100644
index 00000000..31a4132e
--- /dev/null
+++ b/wlauto/workloads/camerarecord/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.camerarecord" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/camerarecord/uiauto/project.properties b/wlauto/workloads/camerarecord/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/camerarecord/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/camerarecord/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/camerarecord/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..c030a078
--- /dev/null
+++ b/wlauto/workloads/camerarecord/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,65 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.camerarecord;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "camerarecord";
+
+ public void runUiAutomation() throws Exception {
+ Bundle parameters = getParams();
+ int timeToRecord = 0;
+ int timeout = 4;
+ int sleepTime = 2;
+ int recordingTime = 0;
+ if (parameters.size() > 0) {
+ recordingTime = Integer.parseInt(parameters
+ .getString("recording_time"));
+ }
+
+ // switch to camera capture mode
+ UiObject clickModes = new UiObject(new UiSelector().descriptionMatches("Camera, video or panorama selector"));
+ clickModes.click();
+ sleep(sleepTime);
+
+ UiObject changeModeToCapture = new UiObject(new UiSelector().descriptionMatches("Switch to video"));
+ changeModeToCapture.click();
+ sleep(sleepTime);
+
+ UiObject clickRecordingButton = new UiObject(new UiSelector().descriptionMatches("Shutter button"));
+ clickRecordingButton.longClick();
+ sleep(recordingTime);
+
+ // Stop video recording
+ clickRecordingButton.longClick();
+ getUiDevice().pressBack();
+ }
+
+}
diff --git a/wlauto/workloads/castlebuilder/__init__.py b/wlauto/workloads/castlebuilder/__init__.py
new file mode 100644
index 00000000..5a527330
--- /dev/null
+++ b/wlauto/workloads/castlebuilder/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import GameWorkload
+
+
+class Castlebuilder(GameWorkload):
+
+ name = 'castlebuilder'
+ description = """
+ Castle Builder game.
+
+ """
+ package = 'com.ettinentertainment.castlebuilder'
+ activity = 'com.unity3d.player.UnityPlayerProxyActivity'
diff --git a/wlauto/workloads/castlebuilder/revent_files/.empty b/wlauto/workloads/castlebuilder/revent_files/.empty
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/workloads/castlebuilder/revent_files/.empty
diff --git a/wlauto/workloads/castlebuilder/revent_files/Nexus10.run.revent b/wlauto/workloads/castlebuilder/revent_files/Nexus10.run.revent
new file mode 100644
index 00000000..704231f5
--- /dev/null
+++ b/wlauto/workloads/castlebuilder/revent_files/Nexus10.run.revent
Binary files differ
diff --git a/wlauto/workloads/castlebuilder/revent_files/Nexus10.setup.revent b/wlauto/workloads/castlebuilder/revent_files/Nexus10.setup.revent
new file mode 100644
index 00000000..25370392
--- /dev/null
+++ b/wlauto/workloads/castlebuilder/revent_files/Nexus10.setup.revent
Binary files differ
diff --git a/wlauto/workloads/castlemaster/__init__.py b/wlauto/workloads/castlemaster/__init__.py
new file mode 100644
index 00000000..fa104b81
--- /dev/null
+++ b/wlauto/workloads/castlemaster/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import GameWorkload
+
+
+class CastleMaster(GameWorkload):
+
+ name = 'castlemaster'
+ description = """
+ Castle Master v1.09 game.
+
+ """
+ package = 'com.alphacloud.castlemaster'
+ activity = 'com.unity3d.player.UnityPlayerActivity'
+ install_timeout = 500
+
diff --git a/wlauto/workloads/castlemaster/revent_files/.empty b/wlauto/workloads/castlemaster/revent_files/.empty
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/workloads/castlemaster/revent_files/.empty
diff --git a/wlauto/workloads/castlemaster/revent_files/Nexus10.run.revent b/wlauto/workloads/castlemaster/revent_files/Nexus10.run.revent
new file mode 100644
index 00000000..404f7c71
--- /dev/null
+++ b/wlauto/workloads/castlemaster/revent_files/Nexus10.run.revent
Binary files differ
diff --git a/wlauto/workloads/castlemaster/revent_files/Nexus10.setup.revent b/wlauto/workloads/castlemaster/revent_files/Nexus10.setup.revent
new file mode 100644
index 00000000..680d2e34
--- /dev/null
+++ b/wlauto/workloads/castlemaster/revent_files/Nexus10.setup.revent
Binary files differ
diff --git a/wlauto/workloads/cfbench/__init__.py b/wlauto/workloads/cfbench/__init__.py
new file mode 100644
index 00000000..a9fab988
--- /dev/null
+++ b/wlauto/workloads/cfbench/__init__.py
@@ -0,0 +1,72 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import xml.etree.ElementTree as ET
+
+from wlauto import AndroidUiAutoBenchmark
+
+
+class Cfbench(AndroidUiAutoBenchmark):
+
+ name = 'cfbench'
+ description = """
+ CF-Bench is (mainly) CPU and memory benchmark tool specifically designed to
+ be able to handle multi-core devices, produce a fairly stable score, and
+ test both native as well managed code performance.
+
+ https://play.google.com/store/apps/details?id=eu.chainfire.cfbench&hl=en
+
+ From the website:
+
+ It tests specific device properties you do not regularly see tested by other
+ benchmarks, and runs in a set timeframe.
+
+ It does produce some "final" scores, but as with every benchmark, you should
+ take those with a grain of salt. It is simply not theoretically possible to
+ produce a single number that accurately describes a device's performance.
+
+ .. note:: This workload relies on the device being rooted
+
+ """
+ package = 'eu.chainfire.cfbench'
+ activity = '.MainActivity'
+ run_timeout = 5 * 60 # seconds
+ summary_metrics = ['overall_score']
+
+ cfbench_params = ['java_mdflops', 'native_memory_read', 'java_msflops', 'native_disk_read', 'native_score', 'java_efficiency_memory_read',
+ 'native_mips', 'native_mdflops', 'java_score', 'native_memory_write', 'java_memory_write', 'native_mallocs', 'native_msflops',
+ 'java_mips', 'java_efficiency_mdflops', 'overall_score', 'java_memory_read', 'java_efficiency_memory_write', 'java_efficiency_mips',
+ 'java_efficiency_msflops', 'native_disk_write']
+
+ def update_result(self, context):
+ super(Cfbench, self).update_result(context)
+ device_results_file = os.path.join(self.device.package_data_directory,
+ self.package,
+ 'shared_prefs', 'eu.chainfire.cfbench_preferences.xml ')
+ self.device.execute('cp {} {}'.format(device_results_file, self.device.working_directory), as_root=True)
+ self.device.pull_file(os.path.join(self.device.working_directory, 'eu.chainfire.cfbench_preferences.xml'), context.output_directory)
+ result_file = os.path.join(context.output_directory, 'eu.chainfire.cfbench_preferences.xml')
+ tree = ET.parse(result_file)
+ root = tree.getroot()
+ for child in root:
+ if child.attrib['name'] in self.cfbench_params:
+ if '%' in child.text:
+ value = float(child.text.split('%')[0]) / 100
+ else:
+ value = int(child.text)
+ context.result.add_metric(child.attrib['name'], value)
+
+
diff --git a/wlauto/workloads/cfbench/com.arm.wlauto.uiauto.cfbench.jar b/wlauto/workloads/cfbench/com.arm.wlauto.uiauto.cfbench.jar
new file mode 100644
index 00000000..1b4ae753
--- /dev/null
+++ b/wlauto/workloads/cfbench/com.arm.wlauto.uiauto.cfbench.jar
Binary files differ
diff --git a/wlauto/workloads/cfbench/uiauto/build.sh b/wlauto/workloads/cfbench/uiauto/build.sh
new file mode 100755
index 00000000..d72e4d38
--- /dev/null
+++ b/wlauto/workloads/cfbench/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.cfbench.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.cfbench.jar ..
+fi
diff --git a/wlauto/workloads/cfbench/uiauto/build.xml b/wlauto/workloads/cfbench/uiauto/build.xml
new file mode 100644
index 00000000..994c34e7
--- /dev/null
+++ b/wlauto/workloads/cfbench/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.cfbench" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/cfbench/uiauto/project.properties b/wlauto/workloads/cfbench/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/cfbench/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/cfbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/cfbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..0e61d92d
--- /dev/null
+++ b/wlauto/workloads/cfbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,63 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.cfbench;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "cfbench";
+
+ public void runUiAutomation() throws Exception{
+ Bundle status = new Bundle();
+ status.putString("product", getUiDevice().getProductName());
+ UiSelector selector = new UiSelector();
+ UiObject text_bench = new UiObject(selector.text("Full Benchmark")
+ .className("android.widget.TextView"));
+
+ text_bench.click();
+ sleep(2);
+
+ try{
+ UiObject stop_text = new UiObject(selector.textContains("Benchmarking ...")
+ .className("android.widget.TextView"));
+ waitUntilNoObject(stop_text, 600);
+
+ sleep(2);
+ }finally{
+ takeScreenshot("cf-bench");
+ }
+
+ UiScrollable res = new UiScrollable(new UiSelector());//.scrollable(true));
+ res.flingToEnd(10);
+ sleep(2);
+
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+}
diff --git a/wlauto/workloads/citadel/__init__.py b/wlauto/workloads/citadel/__init__.py
new file mode 100644
index 00000000..71b433b9
--- /dev/null
+++ b/wlauto/workloads/citadel/__init__.py
@@ -0,0 +1,44 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+import time
+
+from wlauto import GameWorkload, Parameter
+
+
+class EpicCitadel(GameWorkload):
+
+ name = 'citadel'
+ description = """
+ Epic Citadel demo showcasing Unreal Engine 3.
+
+ The game has very rich graphics details. The workload only moves around its
+ environment for the specified time.
+
+ """
+ package = 'com.epicgames.EpicCitadel'
+ activity = '.UE3JavaApp'
+ install_timeout = 120
+
+ parameters = [
+ Parameter('duration', kind=int, default=60,
+ description=('Duration, in seconds, of the run (may need to be adjusted for '
+ 'different devices.')),
+ ]
+
+ def run(self, context):
+ super(EpicCitadel, self).run(context)
+ time.sleep(self.duration)
diff --git a/wlauto/workloads/citadel/revent_files/.empty b/wlauto/workloads/citadel/revent_files/.empty
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/workloads/citadel/revent_files/.empty
diff --git a/wlauto/workloads/citadel/revent_files/Nexus10.run.revent b/wlauto/workloads/citadel/revent_files/Nexus10.run.revent
new file mode 100644
index 00000000..ac580434
--- /dev/null
+++ b/wlauto/workloads/citadel/revent_files/Nexus10.run.revent
Binary files differ
diff --git a/wlauto/workloads/citadel/revent_files/Nexus10.setup.revent b/wlauto/workloads/citadel/revent_files/Nexus10.setup.revent
new file mode 100644
index 00000000..df45cf44
--- /dev/null
+++ b/wlauto/workloads/citadel/revent_files/Nexus10.setup.revent
Binary files differ
diff --git a/wlauto/workloads/cyclictest/LICENSE b/wlauto/workloads/cyclictest/LICENSE
new file mode 100644
index 00000000..58b35d03
--- /dev/null
+++ b/wlauto/workloads/cyclictest/LICENSE
@@ -0,0 +1,8 @@
+cyclictest binaries included here are part of the Linux kernel and are distributed
+under GPL version 2; The full text of the license may be viewed here:
+
+http://www.gnu.org/licenses/gpl-2.0.html
+
+Source for these binaries can be obtained here:
+
+http://git.kernel.org/cgit/linux/kernel/git/clrkwllms/rt-tests.git
diff --git a/wlauto/workloads/cyclictest/__init__.py b/wlauto/workloads/cyclictest/__init__.py
new file mode 100644
index 00000000..700bd993
--- /dev/null
+++ b/wlauto/workloads/cyclictest/__init__.py
@@ -0,0 +1,141 @@
+# Copyright 2012-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=no-member
+# pylint: disable=attribute-defined-outside-init
+
+import os
+import time
+
+from wlauto import settings, Workload, Executable, Parameter
+from wlauto.exceptions import ConfigError, WorkloadError
+from wlauto.utils.types import boolean
+
+TXT_RESULT_NAME = 'cyclictest_result.txt'
+RESULT_INTERPRETATION = {
+ 'T': 'Thread',
+ 'P': 'Priority',
+ 'C': 'Clock',
+}
+
+
+class Cyclictest(Workload):
+
+ name = 'cyclictest'
+ description = """
+ Measures the amount of time that passes between when a timer expires and
+ when the thread which set the timer actually runs.
+
+ Cyclic test works by taking a time snapshot just prior to waiting for a specific
+ time interval (t1), then taking another time snapshot after the timer
+ finishes (t2), then comparing the theoretical wakeup time with the actual
+ wakeup time (t2 -(t1 + sleep_time)). This value is the latency for that
+ timers wakeup.
+
+ """
+
+ parameters = [
+ Parameter('clock', allowed_values=['monotonic', 'realtime'], default='realtime',
+ description=('specify the clock to be used during the test.')),
+ Parameter('duration', kind=int, default=30,
+ description=('Specify the length for the test to run in seconds.')),
+ Parameter('quiet', kind=boolean, default=True,
+ description=('Run the tests quiet and print only a summary on exit.')),
+ Parameter('thread', kind=int, default=8,
+ description=('Set the number of test threads')),
+ Parameter('latency', kind=int, default=1000000,
+ description=('Write the value to /dev/cpu_dma_latency')),
+ Parameter('extra_parameters', kind=str, default="",
+ description=('Any additional command line parameters to append to the '
+ 'existing parameters above. A list can be found at '
+ 'https://rt.wiki.kernel.org/index.php/Cyclictest or '
+ 'in the help page ``cyclictest -h``')),
+ Parameter('clear_file_cache', kind=boolean, default=True,
+ description=('Clear file caches before starting test')),
+ Parameter('screen_off', kind=boolean, default=True,
+ description=('If true it will turn the screen off so that onscreen '
+ 'graphics do not effect the score. This is predominantly '
+ 'for devices without a GPU')),
+
+ ]
+
+ def setup(self, context):
+ self.cyclictest_on_device = 'cyclictest'
+ self.cyclictest_result = os.path.join(self.device.working_directory, TXT_RESULT_NAME)
+ self.cyclictest_command = '{} --clock={} --duration={}s --thread={} --latency={} {} {} > {}'
+ self.device_binary = None
+
+ if not self.device.is_rooted:
+ raise WorkloadError("This workload requires a device with root premissions to run")
+
+ if not self.device.is_installed('cyclictest'):
+ host_binary = context.resolver.get(Executable(self, self.device.abi, 'cyclictest'))
+ self.device_binary = self.device.install(host_binary)
+ else:
+ self.device_binary = 'cyclictest'
+
+ self.cyclictest_command = self.cyclictest_command.format(self.device_binary,
+ 0 if self.clock == 'monotonic' else 1,
+ self.duration,
+ self.thread,
+ self.latency,
+ "--quiet" if self.quiet else "",
+ self.extra_parameters,
+ self.cyclictest_result)
+
+ if self.clear_file_cache:
+ self.device.execute('sync')
+ self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
+
+ if self.device.platform == 'android':
+ if self.screen_off and self.device.is_screen_on:
+ self.device.execute('input keyevent 26')
+
+ def run(self, context):
+ self.device.execute(self.cyclictest_command, self.duration * 2, as_root=True)
+
+ def update_result(self, context):
+ self.device.pull_file(self.cyclictest_result, context.output_directory)
+
+ # Parsing the output
+ # Standard Cyclictest Output:
+ # T: 0 (31974) P:95 I:1000 C:4990 Min:9 Act:37 Avg:31 Max:59
+ with open(os.path.join(context.output_directory, TXT_RESULT_NAME)) as f:
+ for line in f:
+ if line.find('C:') is not -1:
+ # Key = T: 0 (31974) P:95 I:1000
+ # Remaing = 49990 Min:9 Act:37 Avg:31 Max:59
+ # sperator = C:
+ (key, sperator, remaing) = line.partition('C:')
+
+ index = key.find('T')
+ key = key.replace(key[index], RESULT_INTERPRETATION['T'])
+ index = key.find('P')
+ key = key.replace(key[index], RESULT_INTERPRETATION['P'])
+
+ index = sperator.find('C')
+ sperator = sperator.replace(sperator[index], RESULT_INTERPRETATION['C'])
+
+ metrics = (sperator + remaing).split()
+ # metrics is now in the from of ['Min:', '9', 'Act:', '37', 'Avg:', '31' , 'Max', '59']
+ for i in range(0, len(metrics), 2):
+ full_key = key + ' ' + metrics[i][:-1]
+ value = int(metrics[i + 1])
+ context.result.add_metric(full_key, value, 'microseconds')
+
+ def teardown(self, context):
+ if self.device.platform == 'android':
+ if self.screen_off:
+ self.device.ensure_screen_is_on()
+ self.device.execute('rm -f {}'.format(self.cyclictest_result))
diff --git a/wlauto/workloads/cyclictest/bin/arm64/cyclictest b/wlauto/workloads/cyclictest/bin/arm64/cyclictest
new file mode 100755
index 00000000..9d682da1
--- /dev/null
+++ b/wlauto/workloads/cyclictest/bin/arm64/cyclictest
Binary files differ
diff --git a/wlauto/workloads/cyclictest/bin/armeabi/cyclictest b/wlauto/workloads/cyclictest/bin/armeabi/cyclictest
new file mode 100755
index 00000000..e61f2076
--- /dev/null
+++ b/wlauto/workloads/cyclictest/bin/armeabi/cyclictest
Binary files differ
diff --git a/wlauto/workloads/dex2oat/__init__.py b/wlauto/workloads/dex2oat/__init__.py
new file mode 100644
index 00000000..440ed5b3
--- /dev/null
+++ b/wlauto/workloads/dex2oat/__init__.py
@@ -0,0 +1,121 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=no-member,attribute-defined-outside-init
+import re
+import os
+import sys
+
+from wlauto import Workload, Parameter, ExtensionLoader
+from wlauto.exceptions import WorkloadError
+from wlauto.utils.android import ApkInfo
+import wlauto.common.android.resources
+
+
+class Dex2oatBenchmark(Workload):
+
+ name = 'dex2oat'
+ description = """
+ Benchmarks the execution time of dex2oat (a key part of APK installation process).
+
+ ART is a new Android runtime in KitKat, which replaces Dalvik VM. ART uses Ahead-Of-Time
+ compilation. It pre-compiles ODEX files used by Dalvik using dex2oat tool as part of APK
+ installation process.
+
+ This workload benchmarks the time it take to compile an APK using dex2oat, which has a
+ significant impact on the total APK installation time, and therefore user experience.
+
+ """
+
+ command_template = 'dex2oat --dex-file={} --oat-file={} --instruction-set={} --dump-timing'
+ run_timeout = 5 * 60
+
+ parameters = [
+ Parameter('instruction_set', default='arm64',
+ allowed_values=['arm', 'arm64', 'x86', 'x86_64', 'mips'],
+ description="""Specifies the instruction set to compile for. Only options supported by
+ the target device can be used."""),
+ ]
+
+ def init_resources(self, context):
+ # TODO: find a better APK to use for this.
+ peacekeeper = ExtensionLoader().get_workload('peacekeeper', self.device)
+ self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(peacekeeper), version='chrome')
+ self.package = ApkInfo(self.apk_file).package
+
+ def setup(self, context):
+ if self.device.getprop('persist.sys.dalvik.vm.lib.2') != 'libart.so':
+ raise WorkloadError('Android system must be using ART (rather than Dalvik) in order for dex2oat to work.')
+ supported = [eabi == 'armeabi' and 'arm' or eabi.split('-')[0]
+ for eabi in self.device.supported_eabi]
+ if self.instruction_set not in supported:
+ message = 'Instruction set "{}" is not supported by the device; (supported: {})'
+ raise WorkloadError(message.format(self.instruction_set, supported))
+
+ on_device_apk = self.device.path.join(self.device.working_directory,
+ os.path.basename(self.apk_file))
+ self.on_device_oat = on_device_apk.replace('.apk', '-{}.oat'.format(self.instruction_set))
+ self.command = self.command_template.format(on_device_apk, self.on_device_oat, self.instruction_set)
+
+ if not self.device.file_exists(on_device_apk):
+ self.device.push_file(self.apk_file, on_device_apk)
+
+ def run(self, context):
+ self.device.execute(self.command, self.run_timeout)
+
+ def update_result(self, context):
+ """
+ Retrieve the last dex2oat time from the logs. That will correspond with the run() method.
+ The compilation time does not.
+
+ Pulls out the compilation time and dex2oat execution time:
+ I/dex2oat ( 2522): 1.8s Compile Dex File
+ I/dex2oat ( 2522): dex2oat took 2.366s (threads: 6)
+
+
+ """
+ logcat_log = os.path.join(context.output_directory, 'logcat.log')
+ self.device.dump_logcat(logcat_log)
+
+ regex_time = re.compile("^I\/dex2oat \( *[0-9]+\): dex2oat took (?P<time>[0-9]+\.?[0-9]*)(?P<unit>m?s)")
+ regex_comp_time = re.compile("^I\/dex2oat \( *[0-9]+\): +(?P<time>[0-9]*\.?[0-9]*)(?P<unit>m?s) Compile Dex File")
+ time_data, comp_time_data = None, None
+ with open(logcat_log) as fh:
+ for line in fh:
+ match = regex_time.search(line)
+
+ if match:
+ time_data = match.groupdict()
+
+ match = regex_comp_time.search(line)
+
+ if match:
+ comp_time_data = match.groupdict()
+ # Last dex2oat time wins.
+ if time_data is not None:
+ time = time_data['time']
+ if time_data['unit'] == "s":
+ time = float(time) * 1000.0
+ context.result.add_metric('dex2oat_time', time, "ms", lower_is_better=True)
+
+ if comp_time_data is not None:
+ time = comp_time_data['time']
+ if comp_time_data['unit'] == "s":
+ time = float(time) * 1000.0
+ context.result.add_metric('dex2oat_comp_time', time, "ms", lower_is_better=True)
+
+ def teardown(self, context):
+ self.device.delete_file(self.on_device_oat)
+
diff --git a/wlauto/workloads/dhrystone/__init__.py b/wlauto/workloads/dhrystone/__init__.py
new file mode 100644
index 00000000..b87ff99f
--- /dev/null
+++ b/wlauto/workloads/dhrystone/__init__.py
@@ -0,0 +1,109 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#pylint: disable=E1101,W0201
+
+import os
+import re
+
+from wlauto import Workload, Parameter
+from wlauto.exceptions import ConfigError
+
+
+this_dir = os.path.dirname(__file__)
+
+
+class Dhrystone(Workload):
+
+ name = 'dhrystone'
+ description = """
+ Runs the Dhrystone benchmark.
+
+ Original source from::
+
+ http://classes.soe.ucsc.edu/cmpe202/benchmarks/standard/dhrystone.c
+
+ This version has been modified to configure duration and the number of
+ threads used.
+
+ """
+
+ bm_regex = re.compile(r'This machine benchmarks at (?P<score>\d+)')
+ dmips_regex = re.compile(r'(?P<score>\d+) DMIPS')
+ time_regex = re.compile(r'Total dhrystone run time: (?P<time>[0-9.]+)')
+
+ default_mloops = 100
+
+ parameters = [
+ Parameter('duration', kind=int, default=0,
+ description='The duration, in seconds, for which dhrystone will be executed. '
+ 'Either this or ``mloops`` should be specified but not both.'),
+ Parameter('mloops', kind=int, default=0,
+ description='Millions of loops to run. Either this or ``duration`` should be '
+ 'specified, but not both. If neither is specified, this will default '
+ 'to ``{}``'.format(default_mloops)),
+ Parameter('threads', kind=int, default=4,
+ description='The number of separate dhrystone "threads" that will be forked.'),
+ Parameter('delay', kind=int, default=0,
+ description=('The delay, in seconds, between kicking off of dhrystone '
+ 'threads (if ``threads`` > 1).')),
+ ]
+
+ def setup(self, context):
+ host_exe = os.path.join(this_dir, 'dhrystone')
+ self.device_exe = self.device.install(host_exe)
+ execution_mode = '-l {}'.format(self.mloops) if self.mloops else '-r {}'.format(self.duration)
+ self.command = '{} {} -t {} -d {}'.format(self.device_exe,
+ execution_mode,
+ self.threads, self.delay)
+ self.timeout = self.duration and self.duration + self.delay * self.threads + 10 or 300
+
+ def run(self, context):
+ self.output = self.device.execute(self.command, timeout=self.timeout, check_exit_code=False)
+
+ def update_result(self, context):
+ outfile = os.path.join(context.output_directory, 'dhrystone.output')
+ with open(outfile, 'w') as wfh:
+ wfh.write(self.output)
+ score_count = 0
+ dmips_count = 0
+ for line in self.output.split('\n'):
+ match = self.time_regex.search(line)
+ if match:
+ context.result.add_metric('time', float(match.group('time')), 'seconds', lower_is_better=True)
+ else:
+ match = self.bm_regex.search(line)
+ if match:
+ metric = 'thread {} score'.format(score_count)
+ value = int(match.group('score'))
+ context.result.add_metric(metric, value)
+ score_count += 1
+ else:
+ match = self.dmips_regex.search(line)
+ if match:
+ metric = 'thread {} DMIPS'.format(dmips_count)
+ value = int(match.group('score'))
+ context.result.add_metric(metric, value)
+ dmips_count += 1
+
+ def teardown(self, context):
+ self.device.uninstall_executable('dhrystone')
+
+ def validate(self):
+ if self.mloops and self.duration: # pylint: disable=E0203
+ raise ConfigError('mloops and duration cannot be both specified at the same time for dhrystone.')
+ if not self.mloops and not self.duration: # pylint: disable=E0203
+ self.mloops = self.default_mloops
+
diff --git a/wlauto/workloads/dhrystone/dhrystone b/wlauto/workloads/dhrystone/dhrystone
new file mode 100755
index 00000000..68cd9b71
--- /dev/null
+++ b/wlauto/workloads/dhrystone/dhrystone
Binary files differ
diff --git a/wlauto/workloads/dhrystone/src/build.sh b/wlauto/workloads/dhrystone/src/build.sh
new file mode 100755
index 00000000..61fcce5d
--- /dev/null
+++ b/wlauto/workloads/dhrystone/src/build.sh
@@ -0,0 +1,23 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+ndk-build
+if [[ -f libs/armeabi/dhrystone ]]; then
+ echo "Dhrystone binary updated."
+ cp libs/armeabi/dhrystone ..
+ rm -rf libs
+ rm -rf obj
+fi
diff --git a/wlauto/workloads/dhrystone/src/jni/Android.mk b/wlauto/workloads/dhrystone/src/jni/Android.mk
new file mode 100644
index 00000000..2f974319
--- /dev/null
+++ b/wlauto/workloads/dhrystone/src/jni/Android.mk
@@ -0,0 +1,11 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES:= dhrystone.c
+LOCAL_MODULE := dhrystone
+LOCAL_MODULE_TAGS := optional
+LOCAL_STATIC_LIBRARIES := libc
+LOCAL_SHARED_LIBRARIES := liblog
+LOCAL_LDLIBS := -llog
+LOCAL_CFLAGS := -O2
+include $(BUILD_EXECUTABLE)
diff --git a/wlauto/workloads/dhrystone/src/jni/dhrystone.c b/wlauto/workloads/dhrystone/src/jni/dhrystone.c
new file mode 100644
index 00000000..9f16003e
--- /dev/null
+++ b/wlauto/workloads/dhrystone/src/jni/dhrystone.c
@@ -0,0 +1,959 @@
+/* ARM modifications to the original Dhrystone are */
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+/***** hpda:net.sources / homxb!gemini / 1:58 am Apr 1, 1986*/
+/* EVERBODY: Please read "APOLOGY" below. -rick 01/06/85
+ * See introduction in net.arch, or net.micro
+ *
+ * "DHRYSTONE" Benchmark Program
+ *
+ * Version: C/1.1, 12/01/84
+ *
+ * Date: PROGRAM updated 01/06/86, RESULTS updated 03/31/86
+ *
+ * Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013
+ * Translated from ADA by Rick Richardson
+ * Every method to preserve ADA-likeness has been used,
+ * at the expense of C-ness.
+ *
+ * Compile: cc -O dry.c -o drynr : No registers
+ * cc -O -DREG=register dry.c -o dryr : Registers
+ *
+ * Defines: Defines are provided for old C compiler's
+ * which don't have enums, and can't assign structures.
+ * The time(2) function is library dependant; Most
+ * return the time in seconds, but beware of some, like
+ * Aztec C, which return other units.
+ * The LOOPS define is initially set for 50000 loops.
+ * If you have a machine with large integers and is
+ * very fast, please change this number to 500000 to
+ * get better accuracy. Please select the way to
+ * measure the execution time using the TIME define.
+ * For single user machines, time(2) is adequate. For
+ * multi-user machines where you cannot get single-user
+ * access, use the times(2) function. If you have
+ * neither, use a stopwatch in the dead of night.
+ * Use a "printf" at the point marked "start timer"
+ * to begin your timings. DO NOT use the UNIX "time(1)"
+ * command, as this will measure the total time to
+ * run this program, which will (erroneously) include
+ * the time to malloc(3) storage and to compute the
+ * time it takes to do nothing.
+ *
+ * Run: drynr; dryr
+ *
+ * Results: If you get any new machine/OS results, please send to:
+ *
+ * ihnp4!castor!pcrat!rick
+ *
+ * and thanks to all that do. Space prevents listing
+ * the names of those who have provided some of these
+ * results. I'll be forwarding these results to
+ * Rheinhold Weicker.
+ *
+ * Note: I order the list in increasing performance of the
+ * "with registers" benchmark. If the compiler doesn't
+ * provide register variables, then the benchmark
+ * is the same for both REG and NOREG.
+ *
+ * PLEASE: Send complete information about the machine type,
+ * clock speed, OS and C manufacturer/version. If
+ * the machine is modified, tell me what was done.
+ * On UNIX, execute uname -a and cc -V to get this info.
+ *
+ * 80x8x NOTE: 80x8x benchers: please try to do all memory models
+ * for a particular compiler.
+ *
+ * APOLOGY (1/30/86):
+ * Well, I goofed things up! As pointed out by Haakon Bugge,
+ * the line of code marked "GOOF" below was missing from the
+ * Dhrystone distribution for the last several months. It
+ * *WAS* in a backup copy I made last winter, so no doubt it
+ * was victimized by sleepy fingers operating vi!
+ *
+ * The effect of the line missing is that the reported benchmarks
+ * are 15% too fast (at least on a 80286). Now, this creates
+ * a dilema - do I throw out ALL the data so far collected
+ * and use only results from this (corrected) version, or
+ * do I just keep collecting data for the old version?
+ *
+ * Since the data collected so far *is* valid as long as it
+ * is compared with like data, I have decided to keep
+ * TWO lists- one for the old benchmark, and one for the
+ * new. This also gives me an opportunity to correct one
+ * other error I made in the instructions for this benchmark.
+ * My experience with C compilers has been mostly with
+ * UNIX 'pcc' derived compilers, where the 'optimizer' simply
+ * fixes sloppy code generation (peephole optimization).
+ * But today, there exist C compiler optimizers that will actually
+ * perform optimization in the Computer Science sense of the word,
+ * by removing, for example, assignments to a variable whose
+ * value is never used. Dhrystone, unfortunately, provides
+ * lots of opportunities for this sort of optimization.
+ *
+ * I request that benchmarkers re-run this new, corrected
+ * version of Dhrystone, turning off or bypassing optimizers
+ * which perform more than peephole optimization. Please
+ * indicate the version of Dhrystone used when reporting the
+ * results to me.
+ *
+ * RESULTS BEGIN HERE
+ *
+ *----------------DHRYSTONE VERSION 1.1 RESULTS BEGIN--------------------------
+ *
+ * MACHINE MICROPROCESSOR OPERATING COMPILER DHRYSTONES/SEC.
+ * TYPE SYSTEM NO REG REGS
+ * -------------------------- ------------ ----------- ---------------
+ * Apple IIe 65C02-1.02Mhz DOS 3.3 Aztec CII v1.05i 37 37
+ * - Z80-2.5Mhz CPM-80 v2.2 Aztec CII v1.05g 91 91
+ * - 8086-8Mhz RMX86 V6 Intel C-86 V2.0 197 203LM??
+ * IBM PC/XT 8088-4.77Mhz COHERENT 2.3.43 Mark Wiiliams 259 275
+ * - 8086-8Mhz RMX86 V6 Intel C-86 V2.0 287 304 ??
+ * Fortune 32:16 68000-6Mhz V7+sys3+4.1BSD cc 360 346
+ * PDP-11/34A w/FP-11C UNIX V7m cc 406 449
+ * Macintosh512 68000-7.7Mhz Mac ROM O/S DeSmet(C ware) 625 625
+ * VAX-11/750 w/FPA UNIX 4.2BSD cc 831 852
+ * DataMedia 932 68000-10Mhz UNIX sysV cc 837 888
+ * Plexus P35 68000-12.5Mhz UNIX sysIII cc 835 894
+ * ATT PC7300 68010-10Mhz UNIX 5.0.3 cc 973 1034
+ * Compaq II 80286-8Mhz MSDOS 3.1 MS C 3.0 1086 1140 LM
+ * IBM PC/AT 80286-7.5Mhz Venix/286 SVR2 cc 1159 1254 *15
+ * Compaq II 80286-8Mhz MSDOS 3.1 MS C 3.0 1190 1282 MM
+ * MicroVAX II - Mach/4.3 cc 1361 1385
+ * DEC uVAX II - Ultrix-32m v1.1 cc 1385 1399
+ * Compaq II 80286-8Mhz MSDOS 3.1 MS C 3.0 1351 1428
+ * VAX 11/780 - UNIX 4.2BSD cc 1417 1441
+ * VAX-780/MA780 Mach/4.3 cc 1428 1470
+ * VAX 11/780 - UNIX 5.0.1 cc 4.1.1.31 1650 1640
+ * Ridge 32C V1 - ROS 3.3 Ridge C (older) 1628 1695
+ * Gould PN6005 - UTX 1.1c+ (4.2) cc 1732 1884
+ * Gould PN9080 custom ECL UTX-32 1.1C cc 4745 4992
+ * VAX-784 - Mach/4.3 cc 5263 5555 &4
+ * VAX 8600 - 4.3 BSD cc 6329 6423
+ * Amdahl 5860 - UTS sysV cc 1.22 28735 28846
+ * IBM3090/200 - ? ? 31250 31250
+ *
+ *
+ *----------------DHRYSTONE VERSION 1.0 RESULTS BEGIN--------------------------
+ *
+ * MACHINE MICROPROCESSOR OPERATING COMPILER DHRYSTONES/SEC.
+ * TYPE SYSTEM NO REG REGS
+ * -------------------------- ------------ ----------- ---------------
+ * Commodore 64 6510-1MHz C64 ROM C Power 2.8 36 36
+ * HP-110 8086-5.33Mhz MSDOS 2.11 Lattice 2.14 284 284
+ * IBM PC/XT 8088-4.77Mhz PC/IX cc 271 294
+ * CCC 3205 - Xelos(SVR2) cc 558 592
+ * Perq-II 2901 bitslice Accent S5c cc (CMU) 301 301
+ * IBM PC/XT 8088-4.77Mhz COHERENT 2.3.43 MarkWilliams cc 296 317
+ * Cosmos 68000-8Mhz UniSoft cc 305 322
+ * IBM PC/XT 8088-4.77Mhz Venix/86 2.0 cc 297 324
+ * DEC PRO 350 11/23 Venix/PRO SVR2 cc 299 325
+ * IBM PC 8088-4.77Mhz MSDOS 2.0 b16cc 2.0 310 340
+ * PDP11/23 11/23 Venix (V7) cc 320 358
+ * Commodore Amiga ? Lattice 3.02 368 371
+ * PC/XT 8088-4.77Mhz Venix/86 SYS V cc 339 377
+ * IBM PC 8088-4.77Mhz MSDOS 2.0 CI-C86 2.20M 390 390
+ * IBM PC/XT 8088-4.77Mhz PCDOS 2.1 Wizard 2.1 367 403
+ * IBM PC/XT 8088-4.77Mhz PCDOS 3.1 Lattice 2.15 403 403 @
+ * Colex DM-6 68010-8Mhz Unisoft SYSV cc 378 410
+ * IBM PC 8088-4.77Mhz PCDOS 3.1 Datalight 1.10 416 416
+ * IBM PC NEC V20-4.77Mhz MSDOS 3.1 MS 3.1 387 420
+ * IBM PC/XT 8088-4.77Mhz PCDOS 2.1 Microsoft 3.0 390 427
+ * IBM PC NEC V20-4.77Mhz MSDOS 3.1 MS 3.1 (186) 393 427
+ * PDP-11/34 - UNIX V7M cc 387 438
+ * IBM PC 8088, 4.77mhz PC-DOS 2.1 Aztec C v3.2d 423 454
+ * Tandy 1000 V20, 4.77mhz MS-DOS 2.11 Aztec C v3.2d 423 458
+ * Tandy TRS-16B 68000-6Mhz Xenix 1.3.5 cc 438 458
+ * PDP-11/34 - RSTS/E decus c 438 495
+ * Onyx C8002 Z8000-4Mhz IS/1 1.1 (V7) cc 476 511
+ * Tandy TRS-16B 68000-6Mhz Xenix 1.3.5 Green Hills 609 617
+ * DEC PRO 380 11/73 Venix/PRO SVR2 cc 577 628
+ * FHL QT+ 68000-10Mhz Os9/68000 version 1.3 603 649 FH
+ * Apollo DN550 68010-?Mhz AegisSR9/IX cc 3.12 666 666
+ * HP-110 8086-5.33Mhz MSDOS 2.11 Aztec-C 641 676
+ * ATT PC6300 8086-8Mhz MSDOS 2.11 b16cc 2.0 632 684
+ * IBM PC/AT 80286-6Mhz PCDOS 3.0 CI-C86 2.1 666 684
+ * Tandy 6000 68000-8Mhz Xenix 3.0 cc 694 694
+ * IBM PC/AT 80286-6Mhz Xenix 3.0 cc 684 704 MM
+ * Macintosh 68000-7.8Mhz 2M Mac Rom Mac C 32 bit int 694 704
+ * Macintosh 68000-7.7Mhz - MegaMax C 2.0 661 709
+ * Macintosh512 68000-7.7Mhz Mac ROM O/S DeSmet(C ware) 714 714
+ * IBM PC/AT 80286-6Mhz Xenix 3.0 cc 704 714 LM
+ * Codata 3300 68000-8Mhz UniPlus+ (v7) cc 678 725
+ * WICAT MB 68000-8Mhz System V WICAT C 4.1 585 731 ~
+ * Cadmus 9000 68010-10Mhz UNIX cc 714 735
+ * AT&T 6300 8086-8Mhz Venix/86 SVR2 cc 668 743
+ * Cadmus 9790 68010-10Mhz 1MB SVR0,Cadmus3.7 cc 720 747
+ * NEC PC9801F 8086-8Mhz PCDOS 2.11 Lattice 2.15 768 - @
+ * ATT PC6300 8086-8Mhz MSDOS 2.11 CI-C86 2.20M 769 769
+ * Burroughs XE550 68010-10Mhz Centix 2.10 cc 769 769 CT1
+ * EAGLE/TURBO 8086-8Mhz Venix/86 SVR2 cc 696 779
+ * ALTOS 586 8086-10Mhz Xenix 3.0b cc 724 793
+ * DEC 11/73 J-11 micro Ultrix-11 V3.0 cc 735 793
+ * ATT 3B2/300 WE32000-?Mhz UNIX 5.0.2 cc 735 806
+ * Apollo DN320 68010-?Mhz AegisSR9/IX cc 3.12 806 806
+ * IRIS-2400 68010-10Mhz UNIX System V cc 772 829
+ * Atari 520ST 68000-8Mhz TOS DigResearch 839 846
+ * IBM PC/AT 80286-6Mhz PCDOS 3.0 MS 3.0(large) 833 847 LM
+ * WICAT MB 68000-8Mhz System V WICAT C 4.1 675 853 S~
+ * VAX 11/750 - Ultrix 1.1 4.2BSD cc 781 862
+ * CCC 7350A 68000-8MHz UniSoft V.2 cc 821 875
+ * VAX 11/750 - UNIX 4.2bsd cc 862 877
+ * Fast Mac 68000-7.7Mhz - MegaMax C 2.0 839 904 +
+ * IBM PC/XT 8086-9.54Mhz PCDOS 3.1 Microsoft 3.0 833 909 C1
+ * DEC 11/44 Ultrix-11 V3.0 cc 862 909
+ * Macintosh 68000-7.8Mhz 2M Mac Rom Mac C 16 bit int 877 909 S
+ * CCC 3210 - Xelos R01(SVR2) cc 849 924
+ * CCC 3220 - Ed. 7 v2.3 cc 892 925
+ * IBM PC/AT 80286-6Mhz Xenix 3.0 cc -i 909 925
+ * AT&T 6300 8086, 8mhz MS-DOS 2.11 Aztec C v3.2d 862 943
+ * IBM PC/AT 80286-6Mhz Xenix 3.0 cc 892 961
+ * VAX 11/750 w/FPA Eunice 3.2 cc 914 976
+ * IBM PC/XT 8086-9.54Mhz PCDOS 3.1 Wizard 2.1 892 980 C1
+ * IBM PC/XT 8086-9.54Mhz PCDOS 3.1 Lattice 2.15 980 980 C1
+ * Plexus P35 68000-10Mhz UNIX System III cc 984 980
+ * PDP-11/73 KDJ11-AA 15Mhz UNIX V7M 2.1 cc 862 981
+ * VAX 11/750 w/FPA UNIX 4.3bsd cc 994 997
+ * IRIS-1400 68010-10Mhz UNIX System V cc 909 1000
+ * IBM PC/AT 80286-6Mhz Venix/86 2.1 cc 961 1000
+ * IBM PC/AT 80286-6Mhz PCDOS 3.0 b16cc 2.0 943 1063
+ * Zilog S8000/11 Z8001-5.5Mhz Zeus 3.2 cc 1011 1084
+ * NSC ICM-3216 NSC 32016-10Mhz UNIX SVR2 cc 1041 1084
+ * IBM PC/AT 80286-6Mhz PCDOS 3.0 MS 3.0(small) 1063 1086
+ * VAX 11/750 w/FPA VMS VAX-11 C 2.0 958 1091
+ * Stride 68000-10Mhz System-V/68 cc 1041 1111
+ * Plexus P/60 MC68000-12.5Mhz UNIX SYSIII Plexus 1111 1111
+ * ATT PC7300 68010-10Mhz UNIX 5.0.2 cc 1041 1111
+ * CCC 3230 - Xelos R01(SVR2) cc 1040 1126
+ * Stride 68000-12Mhz System-V/68 cc 1063 1136
+ * IBM PC/AT 80286-6Mhz Venix/286 SVR2 cc 1056 1149
+ * Plexus P/60 MC68000-12.5Mhz UNIX SYSIII Plexus 1111 1163 T
+ * IBM PC/AT 80286-6Mhz PCDOS 3.0 Datalight 1.10 1190 1190
+ * ATT PC6300+ 80286-6Mhz MSDOS 3.1 b16cc 2.0 1111 1219
+ * IBM PC/AT 80286-6Mhz PCDOS 3.1 Wizard 2.1 1136 1219
+ * Sun2/120 68010-10Mhz Sun 4.2BSD cc 1136 1219
+ * IBM PC/AT 80286-6Mhz PCDOS 3.0 CI-C86 2.20M 1219 1219
+ * WICAT PB 68000-8Mhz System V WICAT C 4.1 998 1226 ~
+ * MASSCOMP 500 68010-10MHz RTU V3.0 cc (V3.2) 1156 1238
+ * Alliant FX/8 IP (68012-12Mhz) Concentrix cc -ip;exec -i 1170 1243 FX
+ * Cyb DataMate 68010-12.5Mhz Uniplus 5.0 Unisoft cc 1162 1250
+ * PDP 11/70 - UNIX 5.2 cc 1162 1250
+ * IBM PC/AT 80286-6Mhz PCDOS 3.1 Lattice 2.15 1250 1250
+ * IBM PC/AT 80286-7.5Mhz Venix/86 2.1 cc 1190 1315 *15
+ * Sun2/120 68010-10Mhz Standalone cc 1219 1315
+ * Intel 380 80286-8Mhz Xenix R3.0up1 cc 1250 1315 *16
+ * Sequent Balance 8000 NS32032-10MHz Dynix 2.0 cc 1250 1315 N12
+ * IBM PC/DSI-32 32032-10Mhz MSDOS 3.1 GreenHills 2.14 1282 1315 C3
+ * ATT 3B2/400 WE32100-?Mhz UNIX 5.2 cc 1315 1315
+ * CCC 3250XP - Xelos R01(SVR2) cc 1215 1318
+ * IBM PC/RT 032 RISC(801?)?Mhz BSD 4.2 cc 1248 1333 RT
+ * DG MV4000 - AOS/VS 5.00 cc 1333 1333
+ * IBM PC/AT 80286-8Mhz Venix/86 2.1 cc 1275 1380 *16
+ * IBM PC/AT 80286-6Mhz MSDOS 3.0 Microsoft 3.0 1250 1388
+ * ATT PC6300+ 80286-6Mhz MSDOS 3.1 CI-C86 2.20M 1428 1428
+ * COMPAQ/286 80286-8Mhz Venix/286 SVR2 cc 1326 1443
+ * IBM PC/AT 80286-7.5Mhz Venix/286 SVR2 cc 1333 1449 *15
+ * WICAT PB 68000-8Mhz System V WICAT C 4.1 1169 1464 S~
+ * Tandy II/6000 68000-8Mhz Xenix 3.0 cc 1384 1477
+ * MicroVAX II - Mach/4.3 cc 1513 1536
+ * WICAT MB 68000-12.5Mhz System V WICAT C 4.1 1246 1537 ~
+ * IBM PC/AT 80286-9Mhz SCO Xenix V cc 1540 1556 *18
+ * Cyb DataMate 68010-12.5Mhz Uniplus 5.0 Unisoft cc 1470 1562 S
+ * VAX 11/780 - UNIX 5.2 cc 1515 1562
+ * MicroVAX-II - - - 1562 1612
+ * VAX-780/MA780 Mach/4.3 cc 1587 1612
+ * VAX 11/780 - UNIX 4.3bsd cc 1646 1662
+ * Apollo DN660 - AegisSR9/IX cc 3.12 1666 1666
+ * ATT 3B20 - UNIX 5.2 cc 1515 1724
+ * NEC PC-98XA 80286-8Mhz PCDOS 3.1 Lattice 2.15 1724 1724 @
+ * HP9000-500 B series CPU HP-UX 4.02 cc 1724 -
+ * Ridge 32C V1 - ROS 3.3 Ridge C (older) 1776 -
+ * IBM PC/STD 80286-8Mhz MSDOS 3.0 Microsoft 3.0 1724 1785 C2
+ * WICAT MB 68000-12.5Mhz System V WICAT C 4.1 1450 1814 S~
+ * WICAT PB 68000-12.5Mhz System V WICAT C 4.1 1530 1898 ~
+ * DEC-2065 KL10-Model B TOPS-20 6.1FT5 Port. C Comp. 1937 1946
+ * Gould PN6005 - UTX 1.1(4.2BSD) cc 1675 1964
+ * DEC2060 KL-10 TOPS-20 cc 2000 2000 NM
+ * Intel 310AP 80286-8Mhz Xenix 3.0 cc 1893 2009
+ * VAX 11/785 - UNIX 5.2 cc 2083 2083
+ * VAX 11/785 - VMS VAX-11 C 2.0 2083 2083
+ * VAX 11/785 - UNIX SVR2 cc 2123 2083
+ * VAX 11/785 - ULTRIX-32 1.1 cc 2083 2091
+ * VAX 11/785 - UNIX 4.3bsd cc 2135 2136
+ * WICAT PB 68000-12.5Mhz System V WICAT C 4.1 1780 2233 S~
+ * Pyramid 90x - OSx 2.3 cc 2272 2272
+ * Pyramid 90x FPA,cache,4Mb OSx 2.5 cc no -O 2777 2777
+ * Pyramid 90x w/cache OSx 2.5 cc w/-O 3333 3333
+ * IBM-4341-II - VM/SP3 Waterloo C 1.2 3333 3333
+ * IRIS-2400T 68020-16.67Mhz UNIX System V cc 3105 3401
+ * Celerity C-1200 ? UNIX 4.2BSD cc 3485 3468
+ * SUN 3/75 68020-16.67Mhz SUN 4.2 V3 cc 3333 3571
+ * IBM-4341 Model 12 UTS 5.0 ? 3685 3685
+ * SUN-3/160 68020-16.67Mhz Sun 4.2 V3.0A cc 3381 3764
+ * Sun 3/180 68020-16.67Mhz Sun 4.2 cc 3333 3846
+ * IBM-4341 Model 12 UTS 5.0 ? 3910 3910 MN
+ * MC 5400 68020-16.67MHz RTU V3.0 cc (V4.0) 3952 4054
+ * Intel 386/20 80386-12.5Mhz PMON debugger Intel C386v0.2 4149 4386
+ * NCR Tower32 68020-16.67Mhz SYS 5.0 Rel 2.0 cc 3846 4545
+ * MC 5600/5700 68020-16.67MHz RTU V3.0 cc (V4.0) 4504 4746 %
+ * Intel 386/20 80386-12.5Mhz PMON debugger Intel C386v0.2 4534 4794 i1
+ * Intel 386/20 80386-16Mhz PMON debugger Intel C386v0.2 5304 5607
+ * Gould PN9080 custom ECL UTX-32 1.1C cc 5369 5676
+ * Gould 1460-342 ECL proc UTX/32 1.1/c cc 5342 5677 G1
+ * VAX-784 - Mach/4.3 cc 5882 5882 &4
+ * Intel 386/20 80386-16Mhz PMON debugger Intel C386v0.2 5801 6133 i1
+ * VAX 8600 - UNIX 4.3bsd cc 7024 7088
+ * VAX 8600 - VMS VAX-11 C 2.0 7142 7142
+ * Alliant FX/8 CE Concentrix cc -ce;exec -c 6952 7655 FX
+ * CCI POWER 6/32 COS(SV+4.2) cc 7500 7800
+ * CCI POWER 6/32 POWER 6 UNIX/V cc 8236 8498
+ * CCI POWER 6/32 4.2 Rel. 1.2b cc 8963 9544
+ * Sperry (CCI Power 6) 4.2BSD cc 9345 10000
+ * CRAY-X-MP/12 105Mhz COS 1.14 Cray C 10204 10204
+ * IBM-3083 - UTS 5.0 Rel 1 cc 16666 12500
+ * CRAY-1A 80Mhz CTSS Cray C 2.0 12100 13888
+ * IBM-3083 - VM/CMS HPO 3.4 Waterloo C 1.2 13889 13889
+ * Amdahl 470 V/8 UTS/V 5.2 cc v1.23 15560 15560
+ * CRAY-X-MP/48 105Mhz CTSS Cray C 2.0 15625 17857
+ * Amdahl 580 - UTS 5.0 Rel 1.2 cc v1.5 23076 23076
+ * Amdahl 5860 UTS/V 5.2 cc v1.23 28970 28970
+ *
+ * NOTE
+ * * Crystal changed from 'stock' to listed value.
+ * + This Macintosh was upgraded from 128K to 512K in such a way that
+ * the new 384K of memory is not slowed down by video generator accesses.
+ * % Single processor; MC == MASSCOMP
+ * NM A version 7 C compiler written at New Mexico Tech.
+ * @ vanilla Lattice compiler used with MicroPro standard library
+ * S Shorts used instead of ints
+ * T with Chris Torek's patches (whatever they are).
+ * ~ For WICAT Systems: MB=MultiBus, PB=Proprietary Bus
+ * LM Large Memory Model. (Otherwise, all 80x8x results are small model)
+ * MM Medium Memory Model. (Otherwise, all 80x8x results are small model)
+ * C1 Univation PC TURBO Co-processor; 9.54Mhz 8086, 640K RAM
+ * C2 Seattle Telecom STD-286 board
+ * C3 Definicon DSI-32 coprocessor
+ * C? Unknown co-processor board?
+ * CT1 Convergent Technologies MegaFrame, 1 processor.
+ * MN Using Mike Newtons 'optimizer' (see net.sources).
+ * G1 This Gould machine has 2 processors and was able to run 2 dhrystone
+ * Benchmarks in parallel with no slowdown.
+ * FH FHC == Frank Hogg Labs (Hazelwood Uniquad 2 in an FHL box).
+ * FX The Alliant FX/8 is a system consisting of 1-8 CEs (computation
+ * engines) and 1-12 IPs (interactive processors). Note N8 applies.
+ * RT This is one of the RT's that CMU has been using for awhile. I'm
+ * not sure that this is identical to the machine that IBM is selling
+ * to the public.
+ * i1 Normally, the 386/20 starter kit has a 16k direct mapped cache
+ * which inserts 2 or 3 wait states on a write thru. These results
+ * were obtained by disabling the write-thru, or essentially turning
+ * the cache into 0 wait state memory.
+ * Nnn This machine has multiple processors, allowing "nn" copies of the
+ * benchmark to run in the same time as 1 copy.
+ * &nn This machine has "nn" processors, and the benchmark results were
+ * obtained by having all "nn" processors working on 1 copy of dhrystone.
+ * (Note, this is different than Nnn. Salesmen like this measure).
+ * ? I don't trust results marked with '?'. These were sent to me with
+ * either incomplete info, or with times that just don't make sense.
+ * ?? means I think the performance is too poor, ?! means too good.
+ * If anybody can confirm these figures, please respond.
+ *
+ * ABBREVIATIONS
+ * CCC Concurrent Computer Corp. (was Perkin-Elmer)
+ * MC Masscomp
+ *
+ *--------------------------------RESULTS END----------------------------------
+ *
+ * The following program contains statements of a high-level programming
+ * language (C) in a distribution considered representative:
+ *
+ * assignments 53%
+ * control statements 32%
+ * procedure, function calls 15%
+ *
+ * 100 statements are dynamically executed. The program is balanced with
+ * respect to the three aspects:
+ * - statement type
+ * - operand type (for simple data types)
+ * - operand access
+ * operand global, local, parameter, or constant.
+ *
+ * The combination of these three aspects is balanced only approximately.
+ *
+ * The program does not compute anything meaningfull, but it is
+ * syntactically and semantically correct.
+ *
+ */
+
+/* Accuracy of timings and human fatigue controlled by next two lines */
+/*#define LOOPS 5000 /* Use this for slow or 16 bit machines */
+/*#define LOOPS 50000 /* Use this for slow or 16 bit machines */
+#define LOOPS 500000 /* Use this for faster machines */
+
+/* Compiler dependent options */
+#undef NOENUM /* Define if compiler has no enum's */
+#undef NOSTRUCTASSIGN /* Define if compiler can't assign structures */
+
+/* define only one of the next three defines */
+#define GETRUSAGE /* Use getrusage(2) time function */
+/*#define TIMES /* Use times(2) time function */
+/*#define TIME /* Use time(2) time function */
+
+/* define the granularity of your times(2) function (when used) */
+/*#define HZ 60 /* times(2) returns 1/60 second (most) */
+/*#define HZ 100 /* times(2) returns 1/100 second (WECo) */
+
+/* for compatibility with goofed up version */
+/*#define GOOF /* Define if you want the goofed up version */
+
+/* default number of threads that will be spawned */
+#define DEFAULT_THREADS 1
+
+/* Dhrystones per second obtained on VAX11/780 -- a notional 1MIPS machine. */
+/* Used in DMIPS calculation. */
+#define ONE_MIPS 1757
+
+#ifdef GOOF
+char Version[] = "1.0";
+#else
+char Version[] = "1.1";
+#endif
+
+#ifdef NOSTRUCTASSIGN
+#define structassign(d, s) memcpy(&(d), &(s), sizeof(d))
+#else
+#define structassign(d, s) d = s
+#endif
+
+#ifdef NOENUM
+#define Ident1 1
+#define Ident2 2
+#define Ident3 3
+#define Ident4 4
+#define Ident5 5
+typedef int Enumeration;
+#else
+typedef enum {Ident1, Ident2, Ident3, Ident4, Ident5} Enumeration;
+#endif
+
+typedef int OneToThirty;
+typedef int OneToFifty;
+typedef char CapitalLetter;
+typedef char String30[31];
+typedef int Array1Dim[51];
+typedef int Array2Dim[51][51];
+
+struct Record
+{
+ struct Record *PtrComp;
+ Enumeration Discr;
+ Enumeration EnumComp;
+ OneToFifty IntComp;
+ String30 StringComp;
+};
+
+typedef struct Record RecordType;
+typedef RecordType * RecordPtr;
+typedef int boolean;
+
+//#define NULL 0
+#define TRUE 1
+#define FALSE 0
+
+#ifndef REG
+#define REG
+#endif
+
+extern Enumeration Func1();
+extern boolean Func2();
+
+#ifdef TIMES
+#include <sys/param.h>
+#include <sys/types.h>
+#endif
+#ifdef GETRUSAGE
+#include <sys/resource.h>
+#endif
+#include <time.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+
+
+main(int argc, char** argv)
+{
+ int num_threads = DEFAULT_THREADS;
+ int runtime = 0;
+ int delay = 0;
+ long mloops = 0;
+
+ int opt;
+ while ((opt = getopt(argc, argv, "ht:r:d:l:")) != -1) {
+ switch (opt) {
+ case 'h':
+ printhelp();
+ exit(0);
+ break;
+ case 't':
+ num_threads = atoi(optarg);
+ break;
+ case 'r':
+ runtime = atoi(optarg);
+ break;
+ case 'd':
+ delay = atoi(optarg);
+ break;
+ case 'l':
+ mloops = atoll(optarg);
+ break;
+ }
+ }
+
+ if (runtime && mloops) {
+ fprintf(stderr, "-r and -l options cannot be specified at the same time.\n");
+ exit(1);
+ } else if (!runtime && !mloops) {
+ fprintf(stderr, "Must specify either -r or -l option; use -h to see help.\n");
+ exit(1);
+ }
+
+ long num_loops = mloops ? mloops * 1000000L : LOOPS * num_threads;
+ run_dhrystone(runtime, num_threads, num_loops, delay);
+}
+
+run_dhrystone(int duration, int num_threads, long num_loops, int delay) {
+ printf("duration: %d seconds\n", duration);
+ printf("number of threads: %d\n", num_threads);
+ printf("number of loops: %ld\n", num_loops);
+ printf("delay between starting threads: %d seconds\n", delay);
+ printf("\n");
+
+ pid_t *children = malloc(num_threads* sizeof(pid_t));
+ int loops_per_thread = num_loops / num_threads;
+
+ clock_t run_start = clock();
+
+ long i;
+ int actual_duration;
+ for (i = 0; i < (num_threads - 1); i++) {
+ pid_t c = fork();
+ if (c == 0) {
+ // child
+ actual_duration = duration - i * delay;
+ if (actual_duration < 0)
+ actual_duration = 0;
+ run_for_duration(actual_duration, loops_per_thread);
+ exit(0);
+ }
+
+ children[i] = c;
+ sleep(delay);
+ }
+
+ run_for_duration(duration - delay * (num_threads - 1), loops_per_thread);
+
+ for (i = 0; i < num_threads; i++) {
+ int status, w;
+ do {
+ w= wait(&status);
+ } while (w != -1 && (!WIFEXITED(status) && !WIFSIGNALED(status)));
+ }
+
+ clock_t run_end = clock();
+ printf("\nTotal dhrystone run time: %f seconds.\n", (double)(run_end - run_start) / CLOCKS_PER_SEC);
+
+ exit(0);
+}
+
+run_for_duration(int duration, long num_loops) {
+ clock_t end = clock() + duration * CLOCKS_PER_SEC;
+ do {
+ Proc0(num_loops, duration == 0);
+ } while (clock() < end);
+}
+
+printhelp() {
+ printf("Usage: dhrystone (-h | -l MLOOPS | -r DURATION) [-t THREADS [-d DELAY]]\n");
+ printf("\n");
+ printf("Runs dhrystone benchmark either for a specfied duration or for a specified\n");
+ printf("number of iterations.\n");
+ printf("\n");
+ printf("Options:\n");
+ printf(" -h Print this message and exit.\n");
+ printf(" -l MLOOPS Run dhrystone for the specified number of millions\n");
+ printf(" of iterations (i.e. the actual number of iterations is\n");
+ printf(" MLOOPS * 1e6).\n");
+ printf(" -r DURATION Run dhhrystone for the specified duration (in seconds). \n");
+ printf(" dhrystone will be run 500000 iterations, looping until\n");
+ printf(" the specified time period has passed.\n");
+ printf("\n");
+ printf(" Note: -r and -l options may not be specified at the same time.\n");
+ printf("\n");
+ printf(" -t THREADS Specified the number of concurrent threads (processes,\n");
+ printf(" actually) that will be spawned. Defaults to 1.\n");
+ printf(" -d DELAY if THREADS is > 1, this specifies the delay between\n");
+ printf(" spawning the threads.\n");
+ printf("\n");
+}
+
+
+/*
+ * Package 1
+ */
+int IntGlob;
+boolean BoolGlob;
+char Char1Glob;
+char Char2Glob;
+Array1Dim Array1Glob;
+Array2Dim Array2Glob;
+RecordPtr PtrGlb;
+RecordPtr PtrGlbNext;
+
+Proc0(long numloops, boolean print_result)
+{
+ OneToFifty IntLoc1;
+ REG OneToFifty IntLoc2;
+ OneToFifty IntLoc3;
+ REG char CharLoc;
+ REG char CharIndex;
+ Enumeration EnumLoc;
+ String30 String1Loc;
+ String30 String2Loc;
+ // extern char *malloc();
+
+ register unsigned int i;
+#ifdef TIME
+ long time();
+ long starttime;
+ long benchtime;
+ long nulltime;
+
+ starttime = time( (long *) 0);
+ for (i = 0; i < numloops; ++i);
+ nulltime = time( (long *) 0) - starttime; /* Computes o'head of loop */
+#endif
+#ifdef TIMES
+ time_t starttime;
+ time_t benchtime;
+ time_t nulltime;
+ struct tms tms;
+
+ times(&tms); starttime = tms.tms_utime;
+ for (i = 0; i < numloops; ++i);
+ times(&tms);
+ nulltime = tms.tms_utime - starttime; /* Computes overhead of looping */
+#endif
+#ifdef GETRUSAGE
+ struct rusage starttime;
+ struct rusage endtime;
+ struct timeval nulltime;
+
+ getrusage(RUSAGE_SELF, &starttime);
+ for (i = 0; i < numloops; ++i);
+ getrusage(RUSAGE_SELF, &endtime);
+ nulltime.tv_sec = endtime.ru_utime.tv_sec - starttime.ru_utime.tv_sec;
+ nulltime.tv_usec = endtime.ru_utime.tv_usec - starttime.ru_utime.tv_usec;
+#endif
+
+ PtrGlbNext = (RecordPtr) malloc(sizeof(RecordType));
+ PtrGlb = (RecordPtr) malloc(sizeof(RecordType));
+ PtrGlb->PtrComp = PtrGlbNext;
+ PtrGlb->Discr = Ident1;
+ PtrGlb->EnumComp = Ident3;
+ PtrGlb->IntComp = 40;
+ strcpy(PtrGlb->StringComp, "DHRYSTONE PROGRAM, SOME STRING");
+#ifndef GOOF
+ strcpy(String1Loc, "DHRYSTONE PROGRAM, 1'ST STRING"); /*GOOF*/
+#endif
+ Array2Glob[8][7] = 10; /* Was missing in published program */
+
+/*****************
+-- Start Timer --
+*****************/
+#ifdef TIME
+ starttime = time( (long *) 0);
+#endif
+#ifdef TIMES
+ times(&tms); starttime = tms.tms_utime;
+#endif
+#ifdef GETRUSAGE
+ getrusage (RUSAGE_SELF, &starttime);
+#endif
+ for (i = 0; i < numloops; ++i)
+ {
+
+ Proc5();
+ Proc4();
+ IntLoc1 = 2;
+ IntLoc2 = 3;
+ strcpy(String2Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
+ EnumLoc = Ident2;
+ BoolGlob = ! Func2(String1Loc, String2Loc);
+ while (IntLoc1 < IntLoc2)
+ {
+ IntLoc3 = 5 * IntLoc1 - IntLoc2;
+ Proc7(IntLoc1, IntLoc2, &IntLoc3);
+ ++IntLoc1;
+ }
+ Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3);
+ Proc1(PtrGlb);
+ for (CharIndex = 'A'; CharIndex <= Char2Glob; ++CharIndex)
+ if (EnumLoc == Func1(CharIndex, 'C'))
+ Proc6(Ident1, &EnumLoc);
+ IntLoc3 = IntLoc2 * IntLoc1;
+ IntLoc2 = IntLoc3 / IntLoc1;
+ IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1;
+ Proc2(&IntLoc1);
+ }
+
+/*****************
+-- Stop Timer --
+*****************/
+
+ if (print_result) {
+#ifdef TIME
+ benchtime = time( (long *) 0) - starttime - nulltime;
+ printf("Dhrystone(%s) time for %ld passes = %ld\n",
+ Version,
+ (long) numloops, benchtime);
+ printf("This machine benchmarks at %ld dhrystones/second\n",
+ ((long) numloops) / benchtime);
+ printf(" %ld DMIPS\n",
+ ((long) numloops) / benchtime / ONE_MIPS);
+#endif
+#ifdef TIMES
+ times(&tms);
+ benchtime = tms.tms_utime - starttime - nulltime;
+ printf("Dhrystone(%s) time for %ld passes = %ld\n",
+ Version,
+ (long) numloops, benchtime/HZ);
+ printf("This machine benchmarks at %ld dhrystones/second\n",
+ ((long) numloops) * HZ / benchtime);
+ printf(" %ld DMIPS\n",
+ ((long) numloops) * HZ / benchtime / ONE_MIPS);
+#endif
+#ifdef GETRUSAGE
+ getrusage(RUSAGE_SELF, &endtime);
+ {
+ double t = (double)(endtime.ru_utime.tv_sec
+ - starttime.ru_utime.tv_sec
+ - nulltime.tv_sec)
+ + (double)(endtime.ru_utime.tv_usec
+ - starttime.ru_utime.tv_usec
+ - nulltime.tv_usec) * 1e-6;
+ printf("Dhrystone(%s) time for %ld passes = %.1f\n",
+ Version,
+ (long)numloops,
+ t);
+ printf("This machine benchmarks at %.0f dhrystones/second\n",
+ (double)numloops / t);
+ printf(" %.0f DMIPS\n",
+ (double)numloops / t / ONE_MIPS);
+ }
+#endif
+ }
+
+}
+
+Proc1(PtrParIn)
+REG RecordPtr PtrParIn;
+{
+#define NextRecord (*(PtrParIn->PtrComp))
+
+ structassign(NextRecord, *PtrGlb);
+ PtrParIn->IntComp = 5;
+ NextRecord.IntComp = PtrParIn->IntComp;
+ NextRecord.PtrComp = PtrParIn->PtrComp;
+ Proc3(NextRecord.PtrComp);
+ if (NextRecord.Discr == Ident1)
+ {
+ NextRecord.IntComp = 6;
+ Proc6(PtrParIn->EnumComp, &NextRecord.EnumComp);
+ NextRecord.PtrComp = PtrGlb->PtrComp;
+ Proc7(NextRecord.IntComp, 10, &NextRecord.IntComp);
+ }
+ else
+ structassign(*PtrParIn, NextRecord);
+
+#undef NextRecord
+}
+
+Proc2(IntParIO)
+OneToFifty *IntParIO;
+{
+ REG OneToFifty IntLoc;
+ REG Enumeration EnumLoc;
+
+ IntLoc = *IntParIO + 10;
+ for(;;)
+ {
+ if (Char1Glob == 'A')
+ {
+ --IntLoc;
+ *IntParIO = IntLoc - IntGlob;
+ EnumLoc = Ident1;
+ }
+ if (EnumLoc == Ident1)
+ break;
+ }
+}
+
+Proc3(PtrParOut)
+RecordPtr *PtrParOut;
+{
+ if (PtrGlb != NULL)
+ *PtrParOut = PtrGlb->PtrComp;
+ else
+ IntGlob = 100;
+ Proc7(10, IntGlob, &PtrGlb->IntComp);
+}
+
+Proc4()
+{
+ REG boolean BoolLoc;
+
+ BoolLoc = Char1Glob == 'A';
+ BoolLoc |= BoolGlob;
+ Char2Glob = 'B';
+}
+
+Proc5()
+{
+ Char1Glob = 'A';
+ BoolGlob = FALSE;
+}
+
+extern boolean Func3();
+
+Proc6(EnumParIn, EnumParOut)
+REG Enumeration EnumParIn;
+REG Enumeration *EnumParOut;
+{
+ *EnumParOut = EnumParIn;
+ if (! Func3(EnumParIn) )
+ *EnumParOut = Ident4;
+ switch (EnumParIn)
+ {
+ case Ident1: *EnumParOut = Ident1; break;
+ case Ident2: if (IntGlob > 100) *EnumParOut = Ident1;
+ else *EnumParOut = Ident4;
+ break;
+ case Ident3: *EnumParOut = Ident2; break;
+ case Ident4: break;
+ case Ident5: *EnumParOut = Ident3;
+ }
+}
+
+Proc7(IntParI1, IntParI2, IntParOut)
+OneToFifty IntParI1;
+OneToFifty IntParI2;
+OneToFifty *IntParOut;
+{
+ REG OneToFifty IntLoc;
+
+ IntLoc = IntParI1 + 2;
+ *IntParOut = IntParI2 + IntLoc;
+}
+
+Proc8(Array1Par, Array2Par, IntParI1, IntParI2)
+Array1Dim Array1Par;
+Array2Dim Array2Par;
+OneToFifty IntParI1;
+OneToFifty IntParI2;
+{
+ REG OneToFifty IntLoc;
+ REG OneToFifty IntIndex;
+
+ IntLoc = IntParI1 + 5;
+ Array1Par[IntLoc] = IntParI2;
+ Array1Par[IntLoc+1] = Array1Par[IntLoc];
+ Array1Par[IntLoc+30] = IntLoc;
+ for (IntIndex = IntLoc; IntIndex <= (IntLoc+1); ++IntIndex)
+ Array2Par[IntLoc][IntIndex] = IntLoc;
+ ++Array2Par[IntLoc][IntLoc-1];
+ Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc];
+ IntGlob = 5;
+}
+
+Enumeration Func1(CharPar1, CharPar2)
+CapitalLetter CharPar1;
+CapitalLetter CharPar2;
+{
+ REG CapitalLetter CharLoc1;
+ REG CapitalLetter CharLoc2;
+
+ CharLoc1 = CharPar1;
+ CharLoc2 = CharLoc1;
+ if (CharLoc2 != CharPar2)
+ return (Ident1);
+ else
+ return (Ident2);
+}
+
+boolean Func2(StrParI1, StrParI2)
+String30 StrParI1;
+String30 StrParI2;
+{
+ REG OneToThirty IntLoc;
+ REG CapitalLetter CharLoc;
+
+ IntLoc = 1;
+ while (IntLoc <= 1)
+ if (Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1)
+ {
+ CharLoc = 'A';
+ ++IntLoc;
+ }
+ if (CharLoc >= 'W' && CharLoc <= 'Z')
+ IntLoc = 7;
+ if (CharLoc == 'X')
+ return(TRUE);
+ else
+ {
+ if (strcmp(StrParI1, StrParI2) > 0)
+ {
+ IntLoc += 7;
+ return (TRUE);
+ }
+ else
+ return (FALSE);
+ }
+}
+
+boolean Func3(EnumParIn)
+REG Enumeration EnumParIn;
+{
+ REG Enumeration EnumLoc;
+
+ EnumLoc = EnumParIn;
+ if (EnumLoc == Ident3) return (TRUE);
+ return (FALSE);
+}
+
+#ifdef NOSTRUCTASSIGN
+memcpy(d, s, l)
+register char *d;
+register char *s;
+register int l;
+{
+ while (l--) *d++ = *s++;
+}
+#endif
+/* ---------- */
diff --git a/wlauto/workloads/dungeondefenders/__init__.py b/wlauto/workloads/dungeondefenders/__init__.py
new file mode 100644
index 00000000..da924202
--- /dev/null
+++ b/wlauto/workloads/dungeondefenders/__init__.py
@@ -0,0 +1,34 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=R0801
+import os
+import time
+
+from wlauto import GameWorkload
+from wlauto.exceptions import WorkloadError, DeviceError
+
+
+class DungeonDefenders(GameWorkload):
+
+ name = 'dungeondefenders'
+ description = """
+ Dungeon Defenders game.
+
+ """
+ package = 'com.trendy.ddapp'
+ activity = 'com.trendy.ddapp.ddapp'
+ loading_time = 20
+ asset_file = 'com.trendy.ddapp.tar.gz'
diff --git a/wlauto/workloads/dungeondefenders/revent_files/Nexus10.run.revent b/wlauto/workloads/dungeondefenders/revent_files/Nexus10.run.revent
new file mode 100644
index 00000000..42b13a84
--- /dev/null
+++ b/wlauto/workloads/dungeondefenders/revent_files/Nexus10.run.revent
Binary files differ
diff --git a/wlauto/workloads/dungeondefenders/revent_files/Nexus10.setup.revent b/wlauto/workloads/dungeondefenders/revent_files/Nexus10.setup.revent
new file mode 100644
index 00000000..d3575a75
--- /dev/null
+++ b/wlauto/workloads/dungeondefenders/revent_files/Nexus10.setup.revent
Binary files differ
diff --git a/wlauto/workloads/facebook/__init__.py b/wlauto/workloads/facebook/__init__.py
new file mode 100644
index 00000000..cbc9a7c8
--- /dev/null
+++ b/wlauto/workloads/facebook/__init__.py
@@ -0,0 +1,82 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import time
+import sys
+
+from wlauto import AndroidUiAutoBenchmark
+from wlauto import UiAutomatorWorkload
+from wlauto import AndroidBenchmark
+
+
+class Facebook(AndroidUiAutoBenchmark):
+
+ name = 'facebook'
+ description = """
+ Uses com.facebook.patana apk for facebook workload.
+ This workload does the following activities in facebook
+
+ Login to facebook account.
+ Send a message.
+ Check latest notification.
+ Search particular user account and visit his/her facebook account.
+ Find friends.
+ Update the facebook status
+
+ [NOTE: This workload starts disableUpdate workload as a part of setup to
+ disable online updates, which helps to tackle problem of uncertain
+ behavier during facebook workload run.]
+
+ """
+ package = 'com.facebook.katana'
+ activity = '.LoginActivity'
+
+ #'du' specify 'disable update'
+ du_activity = 'com.android.vending/.AssetBrowserActivity'
+ du_method_string = 'com.arm.wlauto.uiauto.facebook.UiAutomation#disableUpdate'
+ du_jar_file = '/data/local/wa_usecases/com.arm.wlauto.uiauto.facebook.jar'
+ du_run_timeout = 4 * 60
+ du_working_dir = '/data/local/wa_usecases'
+ du_apk_file = '/disableupdateapk/com.android.vending-4.3.10.apk'
+ DELAY = 5
+
+ def setup(self, context):
+ UiAutomatorWorkload.setup(self, context)
+
+ #Start the play store activity
+ self.device.execute('am start {}'.format(self.du_activity))
+
+ #Creating command
+ command = 'uiautomator runtest {} -e workdir {} -c {}'.format(self.du_jar_file,
+ self.du_working_dir,
+ self.du_method_string)
+
+ #Start the disable update workload
+ self.device.execute(command, self.du_run_timeout)
+ time.sleep(self.DELAY)
+
+ #Stop the play store activity
+ self.device.execute('am force-stop com.android.vending')
+
+ AndroidBenchmark.setup(self, context)
+
+ def update_result(self, context):
+ super(Facebook, self).update_result(context)
+
+ def teardown(self, context):
+ pass
+
diff --git a/wlauto/workloads/facebook/com.arm.wlauto.uiauto.facebook.jar b/wlauto/workloads/facebook/com.arm.wlauto.uiauto.facebook.jar
new file mode 100644
index 00000000..098030b0
--- /dev/null
+++ b/wlauto/workloads/facebook/com.arm.wlauto.uiauto.facebook.jar
Binary files differ
diff --git a/wlauto/workloads/facebook/uiauto/build.sh b/wlauto/workloads/facebook/uiauto/build.sh
new file mode 100755
index 00000000..00535591
--- /dev/null
+++ b/wlauto/workloads/facebook/uiauto/build.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.facebook.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.facebook.jar ..
+fi
diff --git a/wlauto/workloads/facebook/uiauto/build.xml b/wlauto/workloads/facebook/uiauto/build.xml
new file mode 100644
index 00000000..e39db0ff
--- /dev/null
+++ b/wlauto/workloads/facebook/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.facebook" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/facebook/uiauto/project.properties b/wlauto/workloads/facebook/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/facebook/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/facebook/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/facebook/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..3c9dbb2c
--- /dev/null
+++ b/wlauto/workloads/facebook/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,257 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.facebook;
+
+import android.app.Activity;
+import android.os.Bundle;
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "facebook";
+
+ /*
+ * The 'runUiAutomation' method implements the following activities
+ * Login to facebook account.
+ * Send a message.
+ * Check latest notification.
+ * Search particular user account and visit his/her facebook account.
+ * Go to find friends.
+ * Update the facebook status
+ */
+ public void runUiAutomation() throws Exception {
+ final int timeout = 5;
+ UiSelector selector = new UiSelector();
+
+ UiObject logInButton = new UiObject(selector
+ .className("android.widget.Button").index(3).text("Log In"));
+
+ UiObject emailField = new UiObject(selector
+ .className("android.widget.EditText").index(1));
+ emailField.clearTextField();
+ emailField.setText("abkksathe@gmail.com");
+
+ UiObject passwordField = new UiObject(selector
+ .className("android.widget.EditText").index(2));
+ passwordField.clearTextField();
+ passwordField.setText("highelymotivated");
+
+ logInButton.clickAndWaitForNewWindow(timeout);
+
+ sleep(timeout);
+
+ //Click on message logo
+ UiObject messageLogo = new UiObject(new UiSelector()
+ .className("android.widget.RelativeLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(3)
+ .childSelector(new UiSelector()
+ .className("android.widget.RelativeLayout").index(1)
+ .childSelector(new UiSelector()
+ .className("android.widget.ImageButton").index(0)))));
+ messageLogo.clickAndWaitForNewWindow(timeout);
+
+ //send message
+ UiObject clickMessage = new UiObject(new UiSelector()
+ .className("android.support.v4.view.ViewPager").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.RelativeLayout").index(1)));
+ clickMessage.clickAndWaitForNewWindow(timeout);
+
+ sleep(timeout);
+
+ UiObject sendMessage = new UiObject(new UiSelector()
+ .className("android.widget.FrameLayout").index(4)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(2))
+ .childSelector(new UiSelector()
+ .className("android.widget.EditText").index(0)
+ .text("Write a message")));
+ sendMessage.click();
+
+ sleep(timeout);
+
+ UiObject editMessage = new UiObject(new UiSelector()
+ .className("android.widget.EditText").text("Write a message"));
+
+ editMessage.setText("Hi how are you?????");
+
+ UiObject sendButton = new UiObject(new UiSelector()
+ .className("android.widget.TextView").text("Send"));
+ sendButton.click();
+
+ getUiDevice().pressDPadDown();
+ sleep(timeout);
+ getUiDevice().pressBack();
+ sleep(timeout);
+ getUiDevice().pressBack();
+
+ //Check for notifications
+ UiObject clickNotificationsLogo = new UiObject(new UiSelector()
+ .className("android.widget.RelativeLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(3)
+ .childSelector(new UiSelector()
+ .className("android.widget.RelativeLayout").index(2)
+ .childSelector(new UiSelector()
+ .className("android.widget.ImageButton").index(0)))));
+ clickNotificationsLogo.clickAndWaitForNewWindow(timeout);
+
+ //Click on latest notification
+ UiObject clickNotify = new UiObject(new UiSelector()
+ .className("android.support.v4.view.ViewPager").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(1)));
+ clickNotify.clickAndWaitForNewWindow(timeout);
+
+ sleep(timeout);
+ getUiDevice().pressBack();
+ sleep(timeout);
+ getUiDevice().pressBack();
+
+ //Search for the facebook account
+ UiObject clickBar = new UiObject(new UiSelector()
+ .className("android.view.View").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.ImageButton").index(0)
+ .description("Main navigation menu")));
+ clickBar.clickAndWaitForNewWindow(timeout);
+
+ UiObject clickSearch = new UiObject(new UiSelector()
+ .className("android.widget.FrameLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.FrameLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.FrameLayout").index(1)
+ .childSelector(new UiSelector()
+ .className("android.widget.EditText").index(1)
+ .text("Search"))))));
+ clickSearch.clickAndWaitForNewWindow(timeout);
+
+ UiObject editSearch = new UiObject(new UiSelector()
+ .className("android.widget.EditText").index(0).text("Search"));
+
+ editSearch.clearTextField();
+ editSearch.setText("amol kamble");
+ sleep(timeout);
+
+ UiObject clickOnSearchResult = new UiObject(new UiSelector()
+ .className("android.webkit.WebView").index(0));
+ clickOnSearchResult.clickTopLeft();
+
+ sleep(2 * timeout);
+
+ getUiDevice().pressBack();
+ sleep(timeout);
+ getUiDevice().pressBack();
+
+ clickBar.click();
+
+ sleep(timeout);
+
+ //Click on find friends
+ UiObject clickFriends = new UiObject(new UiSelector()
+ .className("android.widget.FrameLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.FrameLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.FrameLayout").index(1)
+ .childSelector(new UiSelector()
+ .className("android.widget.RelativeLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.ListView").index(2)))))));
+
+ UiObject friends = clickFriends.getChild(new UiSelector()
+ .className("android.widget.RelativeLayout").index(3));
+ friends.click();
+ sleep(timeout);
+ getUiDevice().pressBack();
+
+ //Update the status
+ UiObject updateStatus = new UiObject(new UiSelector()
+ .className("android.widget.FrameLayout").index(1)
+ .childSelector(new UiSelector()
+ .className("android.widget.FrameLayout").index(1)
+ .childSelector(new UiSelector()
+ .className("android.widget.RelativeLayout").index(1)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(0)))))));
+
+ updateStatus.clickAndWaitForNewWindow(timeout);
+
+ UiObject editUpdateStatus = new UiObject(new UiSelector()
+ .className("android.widget.EditText")
+ .text("What's on your mind?"));
+ editUpdateStatus.clearTextField();
+ editUpdateStatus.setText("hellllooooooo its done!!");
+
+ UiObject clickPost = new UiObject(new UiSelector()
+ .className("android.widget.RelativeLayout").index(0)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout").index(3)));
+ clickPost.clickAndWaitForNewWindow(timeout);
+ getUiDevice().pressHome();
+ }
+
+ //disable update using playstore
+ public void disableUpdate() throws UiObjectNotFoundException {
+
+ UiObject accountSelect = new UiObject(new UiSelector()
+ .className("android.widget.Button").text("Accept"));
+
+ if (accountSelect.exists())
+ accountSelect.click();
+
+ UiObject moreOptions = new UiObject(new UiSelector()
+ .className("android.widget.ImageButton")
+ .description("More options"));
+ moreOptions.click();
+
+ UiObject settings = new UiObject(new UiSelector()
+ .className("android.widget.TextView").text("Settings"));
+ settings.clickAndWaitForNewWindow();
+
+ UiObject autoUpdate = new UiObject(new UiSelector()
+ .className("android.widget.TextView")
+ .text("Auto-update apps"));
+
+ autoUpdate.clickAndWaitForNewWindow();
+
+ UiObject clickAutoUpdate = new UiObject(new UiSelector()
+ .className("android.widget.CheckedTextView")
+ .text("Do not auto-update apps"));
+
+ clickAutoUpdate.clickAndWaitForNewWindow();
+
+ getUiDevice().pressBack();
+ getUiDevice().pressHome();
+ }
+}
diff --git a/wlauto/workloads/geekbench/__init__.py b/wlauto/workloads/geekbench/__init__.py
new file mode 100644
index 00000000..84a048f6
--- /dev/null
+++ b/wlauto/workloads/geekbench/__init__.py
@@ -0,0 +1,351 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+import os
+import re
+import tempfile
+import json
+from collections import defaultdict
+
+from wlauto import AndroidUiAutoBenchmark, Parameter, Artifact
+from wlauto.exceptions import ConfigError, WorkloadError
+from wlauto.utils.misc import capitalize
+import wlauto.common.android.resources
+
+
+class Geekbench(AndroidUiAutoBenchmark):
+
+ name = 'geekbench'
+ description = """
+ Geekbench provides a comprehensive set of benchmarks engineered to quickly
+ and accurately measure processor and memory performance.
+
+ http://www.primatelabs.com/geekbench/
+
+ From the website:
+
+ Designed to make benchmarks easy to run and easy to understand, Geekbench
+ takes the guesswork out of producing robust and reliable benchmark results.
+
+ Geekbench scores are calibrated against a baseline score of 1,000 (which is
+ the score of a single-processor Power Mac G5 @ 1.6GHz). Higher scores are
+ better, with double the score indicating double the performance.
+
+ The benchmarks fall into one of four categories:
+
+ - integer performance.
+ - floating point performance.
+ - memory performance.
+ - stream performance.
+
+ Geekbench benchmarks: http://www.primatelabs.com/geekbench/doc/benchmarks.html
+
+ Geekbench scoring methedology:
+ http://support.primatelabs.com/kb/geekbench/interpreting-geekbench-scores
+
+ """
+ summary_metrics = ['score', 'multicore_score']
+ versions = {
+ '3': {
+ 'package': 'com.primatelabs.geekbench3',
+ 'activity': '.HomeActivity',
+ },
+ '2': {
+ 'package': 'ca.primatelabs.geekbench2',
+ 'activity': '.HomeActivity',
+ },
+ }
+ begin_regex = re.compile(r'^\s*D/WebViewClassic.loadDataWithBaseURL\(\s*\d+\s*\)'
+ r'\s*:\s*(?P<content>\<.*)\s*$')
+ replace_regex = re.compile(r'<[^>]*>')
+
+ parameters = [
+ Parameter('version', default=sorted(versions.keys())[-1], allowed_values=sorted(versions.keys()),
+ description='Specifies which version of the workload should be run.'),
+ Parameter('times', kind=int, default=1,
+ description=('Specfies the number of times the benchmark will be run in a "tight '
+ 'loop", i.e. without performaing setup/teardown inbetween.')),
+ ]
+
+ @property
+ def activity(self):
+ return self.versions[self.version]['activity']
+
+ @property
+ def package(self):
+ return self.versions[self.version]['package']
+
+ def __init__(self, device, **kwargs):
+ super(Geekbench, self).__init__(device, **kwargs)
+ self.uiauto_params['version'] = self.version
+ self.uiauto_params['times'] = self.times
+ self.run_timeout = 3 * 60 * self.times
+
+ def init_resources(self, context):
+ self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self), version=self.version)
+ self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self))
+ self.device_uiauto_file = self.device.path.join(self.device.working_directory,
+ os.path.basename(self.uiauto_file))
+ if not self.uiauto_package:
+ self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
+
+ def update_result(self, context):
+ super(Geekbench, self).update_result(context)
+ update_method = getattr(self, 'update_result_{}'.format(self.version))
+ update_method(context)
+
+ def validate(self):
+ if (self.times > 1) and (self.version == '2'):
+ raise ConfigError('times parameter is not supported for version 2 of Geekbench.')
+
+ def update_result_2(self, context):
+ score_calculator = GBScoreCalculator()
+ score_calculator.parse(self.logcat_log)
+ score_calculator.update_results(context)
+
+ def update_result_3(self, context):
+ outfile_glob = self.device.path.join(self.device.package_data_directory, self.package, 'files', '*gb3')
+ on_device_output_files = [f.strip() for f in
+ self.device.execute('ls {}'.format(outfile_glob), as_root=True).split('\n')]
+ for i, on_device_output_file in enumerate(on_device_output_files):
+ host_temp_file = tempfile.mktemp()
+ self.device.pull_file(on_device_output_file, host_temp_file)
+ host_output_file = os.path.join(context.output_directory, os.path.basename(on_device_output_file))
+ with open(host_temp_file) as fh:
+ data = json.load(fh)
+ os.remove(host_temp_file)
+ with open(host_output_file, 'w') as wfh:
+ json.dump(data, wfh, indent=4)
+ context.iteration_artifacts.append(Artifact('geekout', path=os.path.basename(on_device_output_file),
+ kind='data',
+ description='Geekbench 3 output from device.'))
+ context.result.add_metric(namemify('score', i), data['score'])
+ context.result.add_metric(namemify('multicore_score', i), data['multicore_score'])
+ for section in data['sections']:
+ context.result.add_metric(namemify(section['name'] + '_score', i), section['score'])
+ context.result.add_metric(namemify(section['name'] + '_multicore_score', i),
+ section['multicore_score'])
+
+
+class GBWorkload(object):
+ """
+ Geekbench workload (not to be confused with WA's workloads). This is a single test run by
+ geek bench, such as preforming compression or generating Madelbrot.
+
+ """
+
+ # Index maps onto the hundreds digit of the ID.
+ categories = [None, 'integer', 'float', 'memory', 'stream']
+
+ # 2003 entry-level Power Mac G5 is considered to have a baseline score of
+ # 1000 for every category.
+ pmac_g5_base_score = 1000
+
+ units_conversion_map = {
+ 'K': 1,
+ 'M': 1000,
+ 'G': 1000000,
+ }
+
+ def __init__(self, wlid, name, pmac_g5_st_score, pmac_g5_mt_score):
+ """
+ :param wlid: A three-digit workload ID. Uniquely identifies a workload and also
+ determines the category a workload belongs to.
+ :param name: The name of the workload.
+ :param pmac_g5_st_score: Score achieved for this workload on 2003 entry-level
+ Power Mac G5 running in a single thread.
+ :param pmac_g5_mt_score: Score achieved for this workload on 2003 entry-level
+ Power Mac G5 running in multiple threads.
+
+ """
+ self.wlid = wlid
+ self.name = name
+ self.pmac_g5_st_score = pmac_g5_st_score
+ self.pmac_g5_mt_score = pmac_g5_mt_score
+ self.category = self.categories[int(wlid) // 100]
+ self.collected_results = []
+
+ def add_result(self, value, units):
+ self.collected_results.append(self.convert_to_kilo(value, units))
+
+ def convert_to_kilo(self, value, units):
+ return value * self.units_conversion_map[units[0]]
+
+ def clear(self):
+ self.collected_results = []
+
+ def get_scores(self):
+ """
+ Returns a tuple (single-thraded score, multi-threaded score) for this workload.
+ Some workloads only have a single-threaded score, in which case multi-threaded
+ score will be ``None``.
+
+ Geekbench will perform four iterations of each workload in single-threaded and,
+ for some workloads, multi-threaded configurations. Thus there should always be
+ either four or eight scores collected for each workload. Single-threaded iterations
+ are always done before multi-threaded, so the ordering of the scores can be used
+ to determine which configuration they belong to.
+
+ This method should not be called before score collection has finished.
+
+ """
+ no_of_results = len(self.collected_results)
+ if no_of_results == 4:
+ return (self._calculate(self.collected_results[:4], self.pmac_g5_st_score), None)
+ if no_of_results == 8:
+ return (self._calculate(self.collected_results[:4], self.pmac_g5_st_score),
+ self._calculate(self.collected_results[4:], self.pmac_g5_mt_score))
+ else:
+ msg = 'Collected {} results for Geekbench {} workload;'.format(no_of_results, self.name)
+ msg += ' expecting either 4 or 8.'
+ raise WorkloadError(msg)
+
+ def _calculate(self, values, scale_factor):
+ return max(values) * self.pmac_g5_base_score / scale_factor
+
+ def __str__(self):
+ return self.name
+
+ __repr__ = __str__
+
+
+class GBScoreCalculator(object):
+ """
+ Parses logcat output to extract raw Geekbench workload values and converts them into
+ category and overall scores.
+
+ """
+
+ result_regex = re.compile(r'workload (?P<id>\d+) (?P<value>[0-9.]+) '
+ r'(?P<units>[a-zA-Z/]+) (?P<time>[0-9.]+)s')
+
+ # Indicates contribution to the overall score.
+ category_weights = {
+ 'integer': 0.3357231,
+ 'float': 0.3594,
+ 'memory': 0.1926489,
+ 'stream': 0.1054738,
+ }
+ #pylint: disable=C0326
+ workloads = [
+ # ID Name Power Mac ST Power Mac MT
+ GBWorkload(101, 'Blowfish', 43971, 40979),
+ GBWorkload(102, 'Text Compress', 3202, 3280),
+ GBWorkload(103, 'Text Decompress', 4112, 3986),
+ GBWorkload(104, 'Image Compress', 8272, 8412),
+ GBWorkload(105, 'Image Decompress', 16800, 16330),
+ GBWorkload(107, 'Lua', 385, 385),
+
+ GBWorkload(201, 'Mandelbrot', 665589, 653746),
+ GBWorkload(202, 'Dot Product', 481449, 455422),
+ GBWorkload(203, 'LU Decomposition', 889933, 877657),
+ GBWorkload(204, 'Primality Test', 149394, 185502),
+ GBWorkload(205, 'Sharpen Image', 2340, 2304),
+ GBWorkload(206, 'Blur Image', 791, 787),
+
+ GBWorkload(302, 'Read Sequential', 1226708, None),
+ GBWorkload(304, 'Write Sequential', 683782, None),
+ GBWorkload(306, 'Stdlib Allocate', 3739, None),
+ GBWorkload(307, 'Stdlib Write', 2070681, None),
+ GBWorkload(308, 'Stdlib Copy', 1030360, None),
+
+ GBWorkload(401, 'Stream Copy', 1367892, None),
+ GBWorkload(402, 'Stream Scale', 1296053, None),
+ GBWorkload(403, 'Stream Add', 1507115, None),
+ GBWorkload(404, 'Stream Triad', 1384526, None),
+ ]
+
+ def __init__(self):
+ self.workload_map = {wl.wlid: wl for wl in self.workloads}
+
+ def parse(self, filepath):
+ """
+ Extract results from the specified file. The file should contain a logcat log of Geekbench execution.
+ Iteration results in the log appear as 'I/geekbench' category entries in the following format::
+
+ | worklod ID value units timing
+ | \------------- | ----/ ---/
+ | | | | |
+ | I/geekbench(29026): [....] workload 101 132.9 MB/sec 0.0300939s
+ | | |
+ | | -----\
+ | label random crap we don't care about
+
+ """
+ for wl in self.workloads:
+ wl.clear()
+ with open(filepath) as fh:
+ for line in fh:
+ match = self.result_regex.search(line)
+ if match:
+ wkload = self.workload_map[int(match.group('id'))]
+ wkload.add_result(float(match.group('value')), match.group('units'))
+
+ def update_results(self, context):
+ """
+ http://support.primatelabs.com/kb/geekbench/interpreting-geekbench-2-scores
+
+ From the website:
+
+ Each workload's performance is compared against a baseline to determine a score. These
+ scores are averaged together to determine an overall, or Geekbench, score for the system.
+
+ Geekbench uses the 2003 entry-level Power Mac G5 as the baseline with a score of 1,000
+ points. Higher scores are better, with double the score indicating double the performance.
+
+ Geekbench provides three different kinds of scores:
+
+ :Workload Scores: Each time a workload is executed Geekbench calculates a score based
+ on the computer's performance compared to the baseline
+ performance. There can be multiple workload scores for the
+ same workload as Geekbench can execute each workload multiple
+ times with different settings. For example, the "Dot Product"
+ workload is executed four times (single-threaded scalar code,
+ multi-threaded scalar code, single-threaded vector code, and
+ multi-threaded vector code) producing four "Dot Product" scores.
+
+ :Section Scores: A section score is the average of all the workload scores for
+ workloads that are part of the section. These scores are useful
+ for determining the performance of the computer in a particular
+ area. See the section descriptions above for a summary on what
+ each section measures.
+
+ :Geekbench Score: The Geekbench score is the weighted average of the four section
+ scores. The Geekbench score provides a way to quickly compare
+ performance across different computers and different platforms
+ without getting bogged down in details.
+
+ """
+ scores_by_category = defaultdict(list)
+ for wkload in self.workloads:
+ st_score, mt_score = wkload.get_scores()
+ scores_by_category[wkload.category].append(st_score)
+ context.result.add_metric(wkload.name + ' (single-threaded)', int(st_score))
+ if mt_score is not None:
+ scores_by_category[wkload.category].append(mt_score)
+ context.result.add_metric(wkload.name + ' (multi-threaded)', int(mt_score))
+
+ overall_score = 0
+ for category in scores_by_category:
+ scores = scores_by_category[category]
+ category_score = sum(scores) / len(scores)
+ overall_score += category_score * self.category_weights[category]
+ context.result.add_metric(capitalize(category) + ' Score', int(category_score))
+ context.result.add_metric('Geekbench Score', int(overall_score))
+
+
+def namemify(basename, i):
+ return basename + (' {}'.format(i) if i else '')
diff --git a/wlauto/workloads/geekbench/com.arm.wlauto.uiauto.geekbench.jar b/wlauto/workloads/geekbench/com.arm.wlauto.uiauto.geekbench.jar
new file mode 100644
index 00000000..5359cc30
--- /dev/null
+++ b/wlauto/workloads/geekbench/com.arm.wlauto.uiauto.geekbench.jar
Binary files differ
diff --git a/wlauto/workloads/geekbench/uiauto/build.sh b/wlauto/workloads/geekbench/uiauto/build.sh
new file mode 100755
index 00000000..7da9f5fe
--- /dev/null
+++ b/wlauto/workloads/geekbench/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.geekbench.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.geekbench.jar ..
+fi
diff --git a/wlauto/workloads/geekbench/uiauto/build.xml b/wlauto/workloads/geekbench/uiauto/build.xml
new file mode 100644
index 00000000..7fdf1685
--- /dev/null
+++ b/wlauto/workloads/geekbench/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.geekbench" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/geekbench/uiauto/project.properties b/wlauto/workloads/geekbench/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/geekbench/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/geekbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/geekbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..968d2abc
--- /dev/null
+++ b/wlauto/workloads/geekbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,121 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.geekbench;
+
+import java.util.concurrent.TimeUnit;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "geekbench";
+
+ public void runUiAutomation() throws Exception {
+ Bundle params = getParams();
+ int version = Integer.parseInt(params.getString("version"));
+ int times = Integer.parseInt(params.getString("times"));
+
+ for (int i = 0; i < times; i++) {
+ runBenchmarks();
+ switch(version) {
+ case 2:
+ // In version 2, we scroll through the results WebView to make sure
+ // all results appear on the screen, which causes them to be dumped into
+ // logcat by the Linaro hacks.
+ waitForResultsv2();
+ scrollThroughResults();
+ break;
+ case 3:
+ // Attempting to share the results will generate the .gb3 file with
+ // results that can then be pulled from the device. This is not possible
+ // in verison 2 of Geekbench (Share option was added later).
+ waitForResultsv3();
+ shareResults();
+ break;
+ }
+
+ if (i < (times - 1)) {
+ getUiDevice().pressBack();
+ getUiDevice().pressBack(); // twice
+ }
+ }
+
+ Bundle status = new Bundle();
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+ public void runBenchmarks() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject runButton = new UiObject(selector.text("Run Benchmarks")
+ .className("android.widget.Button"));
+ if (!runButton.exists()) {
+ getUiDevice().pressBack();
+ }
+ runButton.click();
+ }
+
+ public void waitForResultsv2() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject resultsWebview = new UiObject(selector.className("android.webkit.WebView"));
+ if (!resultsWebview.waitForExists(TimeUnit.SECONDS.toMillis(200))) {
+ throw new UiObjectNotFoundException("Did not see Geekbench results screen.");
+ }
+ }
+
+ public void waitForResultsv3() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject runningTextView = new UiObject(selector.text("Running Benchmarks...")
+ .className("android.widget.TextView"));
+ runningTextView.waitForExists(TimeUnit.SECONDS.toMillis(2));
+ if (!runningTextView.waitUntilGone(TimeUnit.SECONDS.toMillis(200))) {
+ throw new UiObjectNotFoundException("Did not get to Geekbench results screen.");
+ }
+ }
+
+ public void scrollThroughResults() throws Exception {
+ UiSelector selector = new UiSelector();
+ getUiDevice().pressKeyCode(KeyEvent.KEYCODE_PAGE_DOWN);
+ sleep(1);
+ getUiDevice().pressKeyCode(KeyEvent.KEYCODE_PAGE_DOWN);
+ sleep(1);
+ getUiDevice().pressKeyCode(KeyEvent.KEYCODE_PAGE_DOWN);
+ sleep(1);
+ getUiDevice().pressKeyCode(KeyEvent.KEYCODE_PAGE_DOWN);
+ }
+
+ public void shareResults() throws Exception {
+ sleep(2); // transition
+ UiSelector selector = new UiSelector();
+ getUiDevice().pressMenu();
+ UiObject runButton = new UiObject(selector.text("Share")
+ .className("android.widget.TextView"));
+ runButton.waitForExists(500);
+ runButton.click();
+ }
+}
diff --git a/wlauto/workloads/glbcorp/__init__.py b/wlauto/workloads/glbcorp/__init__.py
new file mode 100644
index 00000000..2ffcdb0c
--- /dev/null
+++ b/wlauto/workloads/glbcorp/__init__.py
@@ -0,0 +1,209 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,W0201,E0203
+
+from __future__ import division
+import os
+import re
+import time
+import select
+import json
+import threading
+import subprocess
+
+from wlauto import ApkWorkload, Parameter, Alias
+from wlauto.exceptions import WorkloadError
+
+
+DELAY = 2
+
+
+class GlbCorp(ApkWorkload):
+
+ name = 'glb_corporate'
+ description = """
+ GFXBench GL (a.k.a. GLBench) v3.0 Corporate version.
+
+ This is a version of GLBench available through a corporate license (distinct
+ from the version available in Google Play store).
+
+ """
+ package = 'net.kishonti.gfxbench'
+ activity = 'net.kishonti.benchui.TestActivity'
+
+ result_start_regex = re.compile(r'I/TfwActivity\s*\(\s*\d+\):\s+\S+\s+result: {')
+ preamble_regex = re.compile(r'I/TfwActivity\s*\(\s*\d+\):\s+')
+
+ valid_test_ids = [
+ 'gl_alu',
+ 'gl_alu_off',
+ 'gl_blending',
+ 'gl_blending_off',
+ 'gl_driver',
+ 'gl_driver_off',
+ 'gl_fill',
+ 'gl_fill_off',
+ 'gl_manhattan',
+ 'gl_manhattan_off',
+ 'gl_trex',
+ 'gl_trex_battery',
+ 'gl_trex_off',
+ 'gl_trex_qmatch',
+ 'gl_trex_qmatch_highp',
+ ]
+
+ supported_resolutions = {
+ '720p': {
+ '-ei -w': 1280,
+ '-ei -h': 720,
+ },
+ '1080p': {
+ '-ei -w': 1920,
+ '-ei -h': 1080,
+ }
+ }
+
+ run_timeout = 3 * 60
+
+ parameters = [
+ Parameter('times', kind=int, default=1, constraint=lambda x: x > 0,
+ description=('Specifies the number of times the benchmark will be run in a "tight '
+ 'loop", i.e. without performaing setup/teardown inbetween.')),
+ Parameter('resolution', default=None, allowed_values=['720p', '1080p', '720', '1080'],
+ description=('Explicitly specifies the resultion under which the benchmark will '
+ 'be run. If not specfied, device\'s native resoution will used.')),
+ Parameter('test_id', default='gl_manhattan_off', allowed_values=valid_test_ids,
+ description='ID of the GFXBench test to be run.')
+ ]
+
+ aliases = [
+ Alias('manhattan', test_id='gl_manhattan'),
+ Alias('manhattan_off', test_id='gl_manhattan_off'),
+ Alias('manhattan_offscreen', test_id='gl_manhattan_off'),
+ ]
+
+ def setup(self, context):
+ super(GlbCorp, self).setup(context)
+ self.command = self._build_command()
+ self.monitor = GlbRunMonitor(self.device)
+ self.monitor.start()
+
+ def start_activity(self):
+ # Unlike with most other APK workloads, we're invoking the use case
+ # directly by starting the activity with appropriate parameters on the
+ # command line during execution, so we dont' need to start activity
+ # during setup.
+ pass
+
+ def run(self, context):
+ for _ in xrange(self.times):
+ result = self.device.execute(self.command, timeout=self.run_timeout)
+ if 'FAILURE' in result:
+ raise WorkloadError(result)
+ else:
+ self.logger.debug(result)
+ time.sleep(DELAY)
+ self.monitor.wait_for_run_end(self.run_timeout)
+
+ def update_result(self, context): # NOQA
+ super(GlbCorp, self).update_result(context)
+ self.monitor.stop()
+ iteration = 0
+ results = []
+ with open(self.logcat_log) as fh:
+ try:
+ line = fh.next()
+ result_lines = []
+ while True:
+ if self.result_start_regex.search(line):
+ result_lines.append('{')
+ line = fh.next()
+ while self.preamble_regex.search(line):
+ result_lines.append(self.preamble_regex.sub('', line))
+ line = fh.next()
+ try:
+ result = json.loads(''.join(result_lines))
+ results.append(result)
+ if iteration:
+ suffix = '_{}'.format(iteration)
+ else:
+ suffix = ''
+ for sub_result in result['results']:
+ frames = sub_result['score']
+ elapsed_time = sub_result['elapsed_time'] / 1000
+ fps = frames / elapsed_time
+ context.result.add_metric('score' + suffix, frames, 'frames')
+ context.result.add_metric('fps' + suffix, fps)
+ except ValueError:
+ self.logger.warning('Could not parse result for iteration {}'.format(iteration))
+ result_lines = []
+ iteration += 1
+ line = fh.next()
+ except StopIteration:
+ pass # EOF
+ if results:
+ outfile = os.path.join(context.output_directory, 'glb-results.json')
+ with open(outfile, 'wb') as wfh:
+ json.dump(results, wfh, indent=4)
+
+ def _build_command(self):
+ command_params = []
+ command_params.append('-e test_ids "{}"'.format(self.test_id))
+ if self.resolution:
+ if not self.resolution.endswith('p'):
+ self.resolution += 'p'
+ for k, v in self.supported_resolutions[self.resolution].iteritems():
+ command_params.append('{} {}'.format(k, v))
+ return 'am start -W -S -n {}/{} {}'.format(self.package,
+ self.activity,
+ ' '.join(command_params))
+
+
+class GlbRunMonitor(threading.Thread):
+
+ regex = re.compile(r'I/Runner\s+\(\s*\d+\): finished:')
+
+ def __init__(self, device):
+ super(GlbRunMonitor, self).__init__()
+ self.device = device
+ self.daemon = True
+ self.run_ended = threading.Event()
+ self.stop_event = threading.Event()
+ if self.device.adb_name:
+ self.command = ['adb', '-s', self.device.adb_name, 'logcat']
+ else:
+ self.command = ['adb', 'logcat']
+
+ def run(self):
+ proc = subprocess.Popen(self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ while not self.stop_event.is_set():
+ if self.run_ended.is_set():
+ time.sleep(DELAY)
+ else:
+ ready, _, _ = select.select([proc.stdout, proc.stderr], [], [], 2)
+ if ready:
+ line = ready[0].readline()
+ if self.regex.search(line):
+ self.run_ended.set()
+
+ def stop(self):
+ self.stop_event.set()
+ self.join()
+
+ def wait_for_run_end(self, timeout):
+ self.run_ended.wait(timeout)
+ self.run_ended.clear()
+
diff --git a/wlauto/workloads/glbenchmark/__init__.py b/wlauto/workloads/glbenchmark/__init__.py
new file mode 100644
index 00000000..9710c206
--- /dev/null
+++ b/wlauto/workloads/glbenchmark/__init__.py
@@ -0,0 +1,158 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,E0203
+import re
+import os
+
+from wlauto import AndroidUiAutoBenchmark, Parameter, Alias
+from wlauto.exceptions import ConfigError
+import wlauto.common.android.resources
+
+# These maps provide use-friendly aliases for the most common options.
+USE_CASE_MAP = {
+ 'egypt': 'GLBenchmark 2.5 Egypt HD',
+ 'egypt-classic': 'GLBenchmark 2.1 Egypt Classic',
+ 't-rex': 'GLBenchmark 2.7 T-Rex HD',
+}
+
+VARIANT_MAP = {
+ 'onscreen': 'C24Z16 Onscreen Auto',
+ 'offscreen': 'C24Z16 Offscreen Auto',
+}
+
+
+class Glb(AndroidUiAutoBenchmark):
+
+ name = 'glbenchmark'
+ description = """
+ Measures the graphics performance of Android devices by testing
+ the underlying OpenGL (ES) implementation.
+
+ http://gfxbench.com/about-gfxbench.jsp
+
+ From the website:
+
+ The benchmark includes console-quality high-level 3D animations
+ (T-Rex HD and Egypt HD) and low-level graphics measurements.
+
+ With high vertex count and complex effects such as motion blur, parallax
+ mapping and particle systems, the engine of GFXBench stresses GPUs in order
+ provide users a realistic feedback on their device.
+
+ """
+ activity = 'com.glbenchmark.activities.GLBenchmarkDownloaderActivity'
+ view = 'com.glbenchmark.glbenchmark27/com.glbenchmark.activities.GLBRender'
+
+ packages = {
+ '2.7.0': 'com.glbenchmark.glbenchmark27',
+ '2.5.1': 'com.glbenchmark.glbenchmark25',
+ }
+ # If usecase is not specified the default usecase is the first supported usecase alias
+ # for the specified version.
+ supported_usecase_aliases = {
+ '2.7.0': ['t-rex', 'egypt'],
+ '2.5.1': ['egypt-classic', 'egypt'],
+ }
+
+ default_iterations = 1
+ install_timeout = 500
+
+ regex = re.compile(r'GLBenchmark (metric|FPS): (.*)')
+
+ parameters = [
+ Parameter('version', default='2.7.0', allowed_values=['2.7.0', '2.5.1'],
+ description=('Specifies which version of the benchmark to run (different versions '
+ 'support different use cases).')),
+ Parameter('use_case', default=None,
+ description="""Specifies which usecase to run, as listed in the benchmark menu; e.g.
+ ``'GLBenchmark 2.5 Egypt HD'``. For convenience, two aliases are provided
+ for the most common use cases: ``'egypt'`` and ``'t-rex'``. These could
+ be use instead of the full use case title. For version ``'2.7.0'`` it defaults
+ to ``'t-rex'``, for version ``'2.5.1'`` it defaults to ``'egypt-classic'``.
+ """),
+ Parameter('variant', default='onscreen',
+ description="""Specifies which variant of the use case to run, as listed in the benchmarks
+ menu (small text underneath the use case name); e.g. ``'C24Z16 Onscreen Auto'``.
+ For convenience, two aliases are provided for the most common variants:
+ ``'onscreen'`` and ``'offscreen'``. These may be used instead of full variant
+ names.
+ """),
+ Parameter('times', kind=int, default=1,
+ description=('Specfies the number of times the benchmark will be run in a "tight '
+ 'loop", i.e. without performaing setup/teardown inbetween.')),
+ Parameter('timeout', kind=int, default=200,
+ description="""Specifies how long, in seconds, UI automation will wait for results screen to
+ appear before assuming something went wrong.
+ """),
+ ]
+
+ aliases = [
+ Alias('glbench'),
+ Alias('egypt', use_case='egypt'),
+ Alias('t-rex', use_case='t-rex'),
+ Alias('egypt_onscreen', use_case='egypt', variant='onscreen'),
+ Alias('t-rex_onscreen', use_case='t-rex', variant='onscreen'),
+ Alias('egypt_offscreen', use_case='egypt', variant='offscreen'),
+ Alias('t-rex_offscreen', use_case='t-rex', variant='offscreen'),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(Glb, self).__init__(device, **kwargs)
+ self.uiauto_params['version'] = self.version
+
+ if self.use_case is None:
+ self.use_case = self.supported_usecase_aliases[self.version][0]
+ if self.use_case.lower() in USE_CASE_MAP:
+ if self.use_case not in self.supported_usecase_aliases[self.version]:
+ raise ConfigError('usecases {} is not supported in version {}'.format(self.use_case, self.version))
+ self.use_case = USE_CASE_MAP[self.use_case.lower()]
+ self.uiauto_params['use_case'] = self.use_case.replace(' ', '_')
+
+ if self.variant.lower() in VARIANT_MAP:
+ self.variant = VARIANT_MAP[self.variant.lower()]
+ self.uiauto_params['variant'] = self.variant.replace(' ', '_')
+
+ self.uiauto_params['iterations'] = self.times
+ self.run_timeout = 4 * 60 * self.times
+
+ self.uiauto_params['timeout'] = self.timeout
+ self.package = self.packages[self.version]
+
+ def init_resources(self, context):
+ self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self), version=self.version)
+ self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self))
+ self.device_uiauto_file = self.device.path.join(self.device.working_directory,
+ os.path.basename(self.uiauto_file))
+ if not self.uiauto_package:
+ self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
+
+ def update_result(self, context):
+ super(Glb, self).update_result(context)
+ match_count = 0
+ with open(self.logcat_log) as fh:
+ for line in fh:
+ match = self.regex.search(line)
+ if match:
+ metric = match.group(1)
+ value, units = match.group(2).split()
+ value = value.replace('*', '')
+ if metric == 'metric':
+ metric = 'Frames'
+ units = 'frames'
+ metric = metric + '_' + str(match_count // 2)
+ context.result.add_metric(metric, value, units)
+ match_count += 1
+
diff --git a/wlauto/workloads/glbenchmark/com.arm.wlauto.uiauto.glb.jar b/wlauto/workloads/glbenchmark/com.arm.wlauto.uiauto.glb.jar
new file mode 100644
index 00000000..57d0fb1e
--- /dev/null
+++ b/wlauto/workloads/glbenchmark/com.arm.wlauto.uiauto.glb.jar
Binary files differ
diff --git a/wlauto/workloads/glbenchmark/uiauto/build.sh b/wlauto/workloads/glbenchmark/uiauto/build.sh
new file mode 100755
index 00000000..820eae37
--- /dev/null
+++ b/wlauto/workloads/glbenchmark/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.glb.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.glb.jar ..
+fi
diff --git a/wlauto/workloads/glbenchmark/uiauto/build.xml b/wlauto/workloads/glbenchmark/uiauto/build.xml
new file mode 100644
index 00000000..54ccc98b
--- /dev/null
+++ b/wlauto/workloads/glbenchmark/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.glb" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/glbenchmark/uiauto/project.properties b/wlauto/workloads/glbenchmark/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/glbenchmark/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/glbenchmark/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/glbenchmark/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..2c244d64
--- /dev/null
+++ b/wlauto/workloads/glbenchmark/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,164 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.glb;
+
+import java.lang.Runtime;
+import java.lang.Process;
+import java.util.concurrent.TimeUnit;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "glb";
+ public static int maxScrolls = 15;
+
+ public void runUiAutomation() throws Exception {
+ Bundle parameters = getParams();
+ String version = parameters.getString("version");
+ String useCase = parameters.getString("use_case").replace('_', ' ');
+ String variant = parameters.getString("variant").replace('_', ' ');
+ int iterations = Integer.parseInt(parameters.getString("iterations"));
+ int testTimeoutSeconds = Integer.parseInt(parameters.getString("timeout"));
+ if (iterations < 1)
+ iterations = 1;
+
+ goToPreformanceTestsMenu();
+ selectUseCase(version, useCase, variant);
+ hitStart();
+ waitForResults(version, useCase, testTimeoutSeconds);
+ extractResults();
+ iterations -= 1;
+
+ while (iterations > 0) {
+ getUiDevice().pressBack();
+ goToPreformanceTestsMenu();
+ hitStart();
+ waitForResults(version, useCase, testTimeoutSeconds);
+ extractResults();
+ iterations -= 1;
+ }
+
+ Bundle status = new Bundle();
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+ public void goToPreformanceTestsMenu() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject choosePerfTest = new UiObject(selector.text("Performance Tests")
+ .className("android.widget.TextView"));
+ choosePerfTest.clickAndWaitForNewWindow();
+ }
+
+ public void selectUseCase(String version, String useCase, String variant) throws Exception {
+ UiSelector selector = new UiSelector();
+ UiScrollable testList = new UiScrollable(selector.className("android.widget.ListView"));
+ UiObject useCaseText = new UiObject(selector.className("android.widget.TextView")
+ .text(useCase)
+ );
+ if (version.equals("2.7.0")){
+ UiObject variantText = useCaseText.getFromParent(selector.className("android.widget.TextView")
+ .text(variant));
+ int scrolls = 0;
+ while(!variantText.exists()) {
+ testList.scrollForward();
+ scrolls += 1;
+ if (scrolls >= maxScrolls) {
+ break;
+ }
+ }
+ variantText.click();
+ }
+ else if (version.equals("2.5.1")){
+ int scrolls = 0;
+ while(!useCaseText.exists()) {
+ testList.scrollForward();
+ scrolls += 1;
+ if (scrolls >= maxScrolls) {
+ break;
+ }
+ }
+ useCaseText.click();
+ //UiSelector selector = new UiSelector();
+ UiObject modeDisableModeButton = null;
+ if (variant.contains("Onscreen"))
+ modeDisableModeButton = new UiObject(selector.text("Offscreen"));
+ else
+ modeDisableModeButton = new UiObject(selector.text("Onscreen"));
+ modeDisableModeButton.click();
+ }
+ }
+
+ public void hitStart() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject startButton = new UiObject(selector.text("Start"));
+ startButton.clickAndWaitForNewWindow();
+ }
+
+ public void waitForResults(String version, String useCase, int timeout) throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject results = null;
+ if (version.equals("2.7.0"))
+ results = new UiObject(selector.text("Results").className("android.widget.TextView"));
+ else
+ results = new UiObject(selector.text(useCase).className("android.widget.TextView"));
+ Log.v(TAG, "Waiting for results screen.");
+ // On some devices, the results screen sometimes gets "backgrounded" (or
+ // rather, doesn't seem to come to foreground to begin with). This code
+ // attemps to deal with that by explicitly bringing glbench to the
+ // foreground if results screen doesn't appear within testTimeoutSeconds seconds of
+ // starting GLB.
+ if (!results.waitForExists(TimeUnit.SECONDS.toMillis(timeout))) {
+ Log.v(TAG, "Results screen not found. Attempting to bring to foreground.");
+ String[] commandLine = {"am", "start",
+ "-a", "android.intent.action.MAIN",
+ "-c", "android.intent.category.LAUNCHER",
+ "-n", "com.glbenchmark.glbenchmark27/com.glbenchmark.activities.GLBenchmarkDownloaderActivity"};
+ Process proc = Runtime.getRuntime().exec(commandLine);
+ proc.waitFor();
+ Log.v(TAG, String.format("am start exit value: %d", proc.exitValue()));
+ if (!results.exists()) {
+ throw new UiObjectNotFoundException("Could not find results screen.");
+ }
+ }
+ Log.v(TAG, "Results screen found.");
+ }
+
+ public void extractResults() throws Exception {
+ Log.v(TAG, "Extracting results.");
+ sleep(2); // wait for the results screen to fully load.
+ UiSelector selector = new UiSelector();
+ UiObject fpsText = new UiObject(selector.className("android.widget.TextView")
+ .textContains("fps")
+ );
+ UiObject otherText = fpsText.getFromParent(selector.className("android.widget.TextView").index(0));
+
+ Log.v(TAG, String.format("GLBenchmark metric: %s", otherText.getText().replace('\n', ' ')));
+ Log.v(TAG, String.format("GLBenchmark FPS: %s", fpsText.getText().replace('\n', ' ')));
+ }
+}
diff --git a/wlauto/workloads/gunbros2/__init__.py b/wlauto/workloads/gunbros2/__init__.py
new file mode 100644
index 00000000..be33dc54
--- /dev/null
+++ b/wlauto/workloads/gunbros2/__init__.py
@@ -0,0 +1,42 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=R0801
+import os
+import time
+import tarfile
+import shutil
+
+from wlauto import settings
+from wlauto.common.android.workload import GameWorkload
+from wlauto.exceptions import WorkloadError, DeviceError
+from wlauto.utils.misc import check_output
+from wlauto.common.resources import ExtensionAsset
+
+
+class GunBros(GameWorkload):
+
+ name = 'gunbros2'
+ description = """
+ Gun Bros. 2 game.
+
+ """
+ package = 'com.glu.gunbros2'
+ activity = 'com.google.android.vending.expansion.downloader_impl.DownloaderActivity'
+ asset_file = 'com.glu.gunbros2.tar.gz'
+ ondevice_asset_root = '/data'
+ loading_time = 20
+ install_timeout = 500
+
diff --git a/wlauto/workloads/homescreen/__init__.py b/wlauto/workloads/homescreen/__init__.py
new file mode 100644
index 00000000..729054be
--- /dev/null
+++ b/wlauto/workloads/homescreen/__init__.py
@@ -0,0 +1,42 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+
+import time
+
+from wlauto import Workload, Parameter
+
+
+class HomeScreen(Workload):
+
+ name = 'homescreen'
+ description = """
+ A workload that goes to the home screen and idles for the the
+ specified duration.
+
+ """
+
+ parameters = [
+ Parameter('duration', kind=int, default=20,
+ description='Specifies the duration, in seconds, of this workload.'),
+ ]
+
+ def setup(self, context):
+ self.device.clear_logcat()
+ self.device.execute('input keyevent 3') # press the home key
+
+ def run(self, context):
+ time.sleep(self.duration)
diff --git a/wlauto/workloads/idle/__init__.py b/wlauto/workloads/idle/__init__.py
new file mode 100644
index 00000000..03e72470
--- /dev/null
+++ b/wlauto/workloads/idle/__init__.py
@@ -0,0 +1,56 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+
+import time
+
+from wlauto import Workload, Parameter
+from wlauto.exceptions import WorkloadError
+
+
+class IdleWorkload(Workload):
+
+ name = 'idle'
+ description = """
+ Stop Android and sleep for the specified duration before restarting it.
+
+ .. note:: This workload requires the device to be rooted.
+
+ """
+
+ parameters = [
+ Parameter('duration', kind=int, default=20,
+ description='Specifies the duration, in seconds, of this workload.'),
+ ]
+
+ def setup(self, context):
+ if not self.device.is_rooted:
+ raise WorkloadError('Idle workload requires the device to be rooted.')
+
+ def run(self, context):
+ self.device.execute('stop && sleep {} && start'.format(self.duration), as_root=True)
+
+ def update_result(self, context):
+ pass
+
+ def teardown(self, context):
+ self.logger.debug('Waiting for Android restart to complete...')
+ # Wait for the boot animation to start and then to finish.
+ while self.device.execute('getprop init.svc.bootanim').strip() == 'stopped':
+ time.sleep(0.2)
+ while self.device.execute('getprop init.svc.bootanim').strip() == 'running':
+ time.sleep(1)
+
diff --git a/wlauto/workloads/ironman/__init__.py b/wlauto/workloads/ironman/__init__.py
new file mode 100644
index 00000000..1bbef415
--- /dev/null
+++ b/wlauto/workloads/ironman/__init__.py
@@ -0,0 +1,35 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=R0801
+import os
+import time
+
+from wlauto import GameWorkload
+from wlauto.exceptions import WorkloadError, DeviceError
+from wlauto.utils.misc import check_output
+
+
+class IronMan(GameWorkload):
+
+ name = 'ironman3'
+ description = """
+ Iron Man 3 game.
+
+ """
+ package = 'com.gameloft.android.ANMP.GloftIMHM'
+ activity = '.GameActivity'
+
+ asset_file = 'obb:com.gameloft.android.ANMP.GloftIMHM.tar.gz'
diff --git a/wlauto/workloads/ironman/revent_files/Nexus10.run.revent b/wlauto/workloads/ironman/revent_files/Nexus10.run.revent
new file mode 100644
index 00000000..96955bad
--- /dev/null
+++ b/wlauto/workloads/ironman/revent_files/Nexus10.run.revent
Binary files differ
diff --git a/wlauto/workloads/ironman/revent_files/Nexus10.setup.revent b/wlauto/workloads/ironman/revent_files/Nexus10.setup.revent
new file mode 100644
index 00000000..8cc49d3a
--- /dev/null
+++ b/wlauto/workloads/ironman/revent_files/Nexus10.setup.revent
Binary files differ
diff --git a/wlauto/workloads/krazykart/__init__.py b/wlauto/workloads/krazykart/__init__.py
new file mode 100644
index 00000000..055816a7
--- /dev/null
+++ b/wlauto/workloads/krazykart/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import GameWorkload
+
+
+class KrazyKartRacing(GameWorkload):
+
+ name = 'krazykart'
+ description = """
+ Krazy Kart Racing game.
+
+ """
+ package = 'com.polarbit.sg2.krazyracers'
+ activity = '.krazyracers'
diff --git a/wlauto/workloads/krazykart/revent_files/.empty b/wlauto/workloads/krazykart/revent_files/.empty
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/workloads/krazykart/revent_files/.empty
diff --git a/wlauto/workloads/linpack/__init__.py b/wlauto/workloads/linpack/__init__.py
new file mode 100644
index 00000000..3f728ab9
--- /dev/null
+++ b/wlauto/workloads/linpack/__init__.py
@@ -0,0 +1,64 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,E0203
+
+import os
+import re
+
+from wlauto import AndroidUiAutoBenchmark, Parameter
+
+
+class Linpack(AndroidUiAutoBenchmark):
+
+ name = 'linpack'
+ description = """
+ The LINPACK Benchmarks are a measure of a system's floating point computing
+ power.
+
+ http://en.wikipedia.org/wiki/LINPACK_benchmarks
+
+ From the article:
+
+ Introduced by Jack Dongarra, they measure how fast a computer solves
+ a dense n by n system of linear equations Ax = b, which is a common task in
+ engineering.
+
+ """
+ package = 'com.greenecomputing.linpackpro'
+ activity = '.Linpack'
+ summary_metrics = ['Linpack ST', 'Linpack MT']
+ regex = re.compile(r'LINPACK RESULT: (?P<type>\w+) (?P<value>\S+)')
+
+ parameters = [
+ Parameter('output_file', default=None,
+ description='On-device output file path.'),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(Linpack, self).__init__(device, **kwargs)
+ if self.output_file is None:
+ self.output_file = os.path.join(self.device.working_directory, 'linpack.txt')
+ self.uiauto_params['output_file'] = self.output_file
+
+ def update_result(self, context):
+ super(Linpack, self).update_result(context)
+ with open(self.logcat_log) as fh:
+ for line in fh:
+ match = self.regex.search(line)
+ if match:
+ metric = 'Linpack ' + match.group('type')
+ value = float(match.group('value'))
+ context.result.add_metric(metric, value, 'MFLOPS')
diff --git a/wlauto/workloads/linpack/com.arm.wlauto.uiauto.linpack.jar b/wlauto/workloads/linpack/com.arm.wlauto.uiauto.linpack.jar
new file mode 100644
index 00000000..8835bdee
--- /dev/null
+++ b/wlauto/workloads/linpack/com.arm.wlauto.uiauto.linpack.jar
Binary files differ
diff --git a/wlauto/workloads/linpack/uiauto/build.sh b/wlauto/workloads/linpack/uiauto/build.sh
new file mode 100755
index 00000000..5ff5da2e
--- /dev/null
+++ b/wlauto/workloads/linpack/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.linpack.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.linpack.jar ..
+fi
diff --git a/wlauto/workloads/linpack/uiauto/build.xml b/wlauto/workloads/linpack/uiauto/build.xml
new file mode 100644
index 00000000..a532fd35
--- /dev/null
+++ b/wlauto/workloads/linpack/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.linpack" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/linpack/uiauto/project.properties b/wlauto/workloads/linpack/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/linpack/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/linpack/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/linpack/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..de6c39ef
--- /dev/null
+++ b/wlauto/workloads/linpack/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,59 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.linpack;
+
+import java.util.concurrent.TimeUnit;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "linpack";
+
+ public void runUiAutomation() throws Exception{
+ UiSelector selector = new UiSelector();
+ UiObject runSingleButton = new UiObject(selector.text("Run Single Thread"));
+ runSingleButton.click();
+ runSingleButton.waitUntilGone(500);
+ runSingleButton.waitForExists(TimeUnit.SECONDS.toMillis(30));
+
+ UiObject mflops = new UiObject(new UiSelector().className("android.widget.TextView").instance(2));
+ Log.v(TAG, String.format("LINPACK RESULT: ST %s", mflops.getText()));
+
+ UiObject runMultiButton = new UiObject(selector.text("Run Multi-Thread"));
+ runMultiButton.click();
+ runMultiButton.waitUntilGone(500);
+ runMultiButton.waitForExists(TimeUnit.SECONDS.toMillis(30));
+
+ Log.v(TAG, String.format("LINPACK RESULT: MT %s", mflops.getText()));
+
+ Bundle status = new Bundle();
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+}
diff --git a/wlauto/workloads/manual/__init__.py b/wlauto/workloads/manual/__init__.py
new file mode 100644
index 00000000..344a71fb
--- /dev/null
+++ b/wlauto/workloads/manual/__init__.py
@@ -0,0 +1,105 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pylint: disable=E1101,W0201,E0203
+import os
+import time
+
+from wlauto import Workload, Parameter
+from wlauto.exceptions import ConfigError
+from wlauto.utils.misc import getch
+from wlauto.utils.types import boolean
+
+
+class ManualWorkloadConfig(object):
+
+ default_duration = 30
+
+ def __init__(self,
+ duration=None, # Seconds
+ user_triggered=None,
+ view=None,
+ enable_logcat=True
+ ):
+ self.user_triggered = user_triggered if user_triggered is not None else (False if duration else True)
+ self.duration = duration or (None if self.user_triggered else self.default_duration)
+ self.view = view
+ self.enable_logcat = enable_logcat
+
+
+class ManualWorkload(Workload):
+
+ name = 'manual'
+ description = """
+ Yields control to the user, either for a fixed period or based on user input, to perform
+ custom operations on the device, about which workload automation does not know of.
+
+ """
+
+ parameters = [
+ Parameter('duration', kind=int, default=None,
+ description=('Control of the devices is yielded for the duration (in seconds) specified. '
+ 'If not specified, ``user_triggered`` is assumed.')),
+ Parameter('user_triggered', kind=boolean, default=None,
+ description="""If ``True``, WA will wait for user input after starting the workload;
+ otherwise fixed duration is expected. Defaults to ``True`` if ``duration``
+ is not specified, and ``False`` otherwise.
+ """),
+ Parameter('view', default='SurfaceView',
+ description="""Specifies the View of the workload. This enables instruments that require a
+ View to be specified, such as the ``fps`` instrument."""),
+ Parameter('enable_logcat', kind=boolean, default=True,
+ description='If ``True``, ``manual`` workload will collect logcat as part of the results.'),
+ ]
+
+ def setup(self, context):
+ self.logger.info('Any setup required by your workload should be done now.')
+ self.logger.info('As soon as you are done hit any key and wait for the message')
+ self.logger.info('"START NOW!" to begin your manual workload.')
+ self.logger.info('')
+ self.logger.info('hit any key to finalize your setup...')
+ getch()
+
+ def run(self, context):
+ self.logger.info('START NOW!')
+ if self.duration:
+ time.sleep(self.duration)
+ elif self.user_triggered:
+ self.logger.info('')
+ self.logger.info('hit any key to end your workload execution...')
+ getch()
+ else:
+ raise ConfigError('Illegal parameters for manual workload')
+ self.logger.info('DONE! your results are now being collected!')
+
+ def update_result(self, context):
+ if self.enable_logcat:
+ logcat_dir = os.path.join(context.output_directory, 'logcat')
+ self.device.dump_logcat(logcat_dir)
+
+ def teardown(self, context):
+ pass
+
+ def validate(self):
+ if self.duration is None:
+ if self.user_triggered is None:
+ self.user_triggered = True
+ elif self.user_triggered is False:
+ self.duration = self.default_duration
+ if self.user_triggered and self.duration:
+ message = 'Manual Workload can either specify duration or be user triggered, but not both'
+ raise ConfigError(message)
+ if not self.user_triggered and not self.duration:
+ raise ConfigError('Either user_triggered must be ``True`` or duration must be > 0.')
+
diff --git a/wlauto/workloads/memcpy/__init__.py b/wlauto/workloads/memcpy/__init__.py
new file mode 100644
index 00000000..54508363
--- /dev/null
+++ b/wlauto/workloads/memcpy/__init__.py
@@ -0,0 +1,76 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,W0201
+
+import os
+import re
+
+from wlauto import Workload, Parameter
+
+
+THIS_DIR = os.path.dirname(__file__)
+
+
+RESULT_REGEX = re.compile('Total time: ([\d.]+) s.*Bandwidth: ([\d.]+) MB/s', re.S)
+
+
+class MemcpyTest(Workload):
+
+ name = 'memcpy'
+ description = """
+ Runs memcpy in a loop.
+
+ This will run memcpy in a loop for a specified number of times on a buffer
+ of a specified size. Additionally, the affinity of the test can be set to one
+ or more specific cores.
+
+ This workload is single-threaded. It genrates no scores or metrics by itself.
+
+ """
+
+ parameters = [
+ Parameter('buffer_size', kind=int, default=1024 * 1024 * 5,
+ description='Specifies the size, in bytes, of the buffer to be copied.'),
+ Parameter('iterations', kind=int, default=1000,
+ description='Specfies the number of iterations that will be performed.'),
+ Parameter('cpus', kind=list, default=[],
+ description="""A list of integers specifying ordinals of cores to which the affinity
+ of the test process should be set. If not specified, all avaiable cores
+ will be used.
+ """),
+ ]
+
+ def setup(self, context):
+ self.host_binary = os.path.join(THIS_DIR, 'memcpy')
+ if not self.device.is_installed('memcpy'):
+ self.device_binary = self.device.install(self.host_binary)
+ else:
+ self.device_binary = 'memcpy'
+ self.command = '{} -i {} -s {}'.format(self.device_binary, self.iterations, self.buffer_size)
+ if self.cpus:
+ for c in self.cpus:
+ self.command += ' -c {}'.format(c)
+
+ def run(self, context):
+ self.result = self.device.execute(self.command, timeout=300)
+
+ def update_result(self, context):
+ match = RESULT_REGEX.search(self.result)
+ context.result.add_metric('time', float(match.group(1)), 'seconds', lower_is_better=True)
+ context.result.add_metric('bandwidth', float(match.group(2)), 'MB/s')
+
+ def teardown(self, context):
+ pass
diff --git a/wlauto/workloads/memcpy/memcpy b/wlauto/workloads/memcpy/memcpy
new file mode 100755
index 00000000..4af3239a
--- /dev/null
+++ b/wlauto/workloads/memcpy/memcpy
Binary files differ
diff --git a/wlauto/workloads/memcpy/src/build.sh b/wlauto/workloads/memcpy/src/build.sh
new file mode 100755
index 00000000..3638949a
--- /dev/null
+++ b/wlauto/workloads/memcpy/src/build.sh
@@ -0,0 +1,21 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+ndk-build
+
+if [[ $? -eq 0 ]]; then
+ cp libs/armeabi/memcpy ..
+fi
diff --git a/wlauto/workloads/memcpy/src/jni/Android.mk b/wlauto/workloads/memcpy/src/jni/Android.mk
new file mode 100644
index 00000000..77d438e6
--- /dev/null
+++ b/wlauto/workloads/memcpy/src/jni/Android.mk
@@ -0,0 +1,11 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := memcopy.c
+
+LOCAL_LD_LIBS := -lrt
+
+LOCAL_MODULE := memcpy
+
+
+include $(BUILD_EXECUTABLE)
diff --git a/wlauto/workloads/memcpy/src/jni/memcopy.c b/wlauto/workloads/memcpy/src/jni/memcopy.c
new file mode 100644
index 00000000..19f569d3
--- /dev/null
+++ b/wlauto/workloads/memcpy/src/jni/memcopy.c
@@ -0,0 +1,114 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sched.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <pthread.h>
+#include <time.h>
+
+const int MAX_CPUS = 8;
+const int DEFAULT_ITERATIONS = 1000;
+const int DEFAULT_BUFFER_SIZE = 1024 * 1024 * 5;
+
+int set_affinity(size_t cpus_size, int* cpus)
+{
+ int i;
+ int mask = 0;
+
+ for(i = 0; i < cpus_size; ++i)
+ {
+ mask |= 1 << cpus[i];
+ }
+
+ return syscall(__NR_sched_setaffinity, 0, sizeof(mask), &mask);
+}
+
+int main(int argc, char** argv)
+{
+ int cpus[MAX_CPUS];
+ int next_cpu = 0;
+ int iterations = DEFAULT_ITERATIONS;
+ int buffer_size = DEFAULT_BUFFER_SIZE;
+
+ int c;
+ while ((c = getopt(argc, argv, "i:c:s:")) != -1)
+ switch (c)
+ {
+ case 'c':
+ cpus[next_cpu++] = atoi(optarg);
+ if (next_cpu == MAX_CPUS)
+ {
+ fprintf(stderr, "Max CPUs exceeded.");
+ abort();
+ }
+ break;
+ case 'i':
+ iterations = atoi(optarg);
+ break;
+ case 's':
+ buffer_size = atoi(optarg);
+ break;
+ default:
+ abort();
+ break;
+ }
+
+ int ret;
+ if (next_cpu != 0)
+ if (ret = set_affinity(next_cpu, cpus))
+ {
+ fprintf(stderr, "sched_setaffinity returnred %i.", ret);
+ abort();
+ }
+
+ char* source = malloc(buffer_size);
+ char* dest = malloc(buffer_size);
+
+ struct timespec before, after;
+ if (clock_gettime(CLOCK_MONOTONIC, &before))
+ {
+ fprintf(stderr, "Could not get start time.");
+ abort();
+ }
+
+ int i;
+ for (i = 0; i < iterations; ++i)
+ {
+ memcpy(dest, source, buffer_size);
+ }
+
+ if (clock_gettime(CLOCK_MONOTONIC, &after))
+ {
+ fprintf(stderr, "Could not get end time.");
+ abort();
+ }
+
+ free(dest);
+ free(source);
+
+ long delta_sec = (long)(after.tv_sec - before.tv_sec);
+ long delta_nsec = after.tv_nsec - before.tv_nsec;
+ double delta = (double)delta_sec + delta_nsec / 1e9;
+ printf("Total time: %f s\n", delta);
+ printf("Bandwidth: %f MB/s\n", buffer_size / delta * iterations / 1e6);
+
+ return 0;
+}
diff --git a/wlauto/workloads/nenamark/__init__.py b/wlauto/workloads/nenamark/__init__.py
new file mode 100644
index 00000000..8a9f4a8d
--- /dev/null
+++ b/wlauto/workloads/nenamark/__init__.py
@@ -0,0 +1,58 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import re
+import time
+
+from wlauto import AndroidBenchmark
+
+
+class Nenamark(AndroidBenchmark):
+
+ name = 'nenamark'
+ description = """
+ NenaMark is an OpenGL-ES 2.0 graphics performance benchmark for Android
+ devices.
+
+ http://nena.se/nenamark_story
+
+ From the website:
+
+ The NenaMark2 benchmark scene averages about 45k triangles, with a span
+ between 26k and 68k triangles. It averages 96 batches per frame and contains
+ about 15 Mb of texture data (non-packed).
+ """
+ package = 'se.nena.nenamark2'
+ activity = 'se.nena.nenamark2.NenaMark2'
+
+ regex = re.compile('.*NenaMark2.*Score.*?([0-9\.]*)fps')
+
+ def run(self, context):
+ time.sleep(5) # wait for nenamark menu to show up
+ self.device.execute('input keyevent 23')
+ time.sleep(120) # wait two minutes for nenamark to complete
+
+ def update_result(self, context):
+ super(Nenamark, self).update_result(context)
+ with open(self.logcat_log) as fh:
+ for line in fh:
+ match = self.regex.search(line)
+ if match:
+ score = match.group(1)
+ context.result.add_metric('nenamark score', score)
+ break
+
diff --git a/wlauto/workloads/peacekeeper/__init__.py b/wlauto/workloads/peacekeeper/__init__.py
new file mode 100644
index 00000000..61c65f7c
--- /dev/null
+++ b/wlauto/workloads/peacekeeper/__init__.py
@@ -0,0 +1,129 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,W0201,E0203
+import os
+import urllib2
+from HTMLParser import HTMLParser
+
+from wlauto import AndroidUiAutoBenchmark, Parameter
+from wlauto.exceptions import WorkloadError
+
+
+BROWSER_MAP = {
+ 'firefox': {
+ 'package': 'org.mozilla.firefox',
+ 'activity': '.App',
+ },
+ 'chrome': {
+ 'package': 'com.android.chrome',
+ 'activity': 'com.google.android.apps.chrome.Main',
+ },
+}
+
+
+class Peacekeeper(AndroidUiAutoBenchmark):
+
+ name = 'peacekeeper'
+ description = """
+ Peacekeeper is a free and fast browser test that measures a browser's speed.
+
+ .. note::
+
+ This workload requires a network connection as well as support for
+ one of the two currently-supported browsers. Moreover, TC2 has
+ compatibility issue with chrome
+
+ """
+ run_timeout = 15 * 60
+
+ parameters = [
+ Parameter('browser', default='firefox', allowed_values=['firefox', 'chrome'],
+ description='The browser to be benchmarked.'),
+ Parameter('output_file', default=None,
+ description="""The result URL of peacekeeper benchmark will be written
+ into this file on device after completion of peacekeeper benchmark.
+ Defaults to peacekeeper.txt in the device's ``working_directory``.
+ """),
+ Parameter('peacekeeper_url', default='http://peacekeeper.futuremark.com/run.action',
+ description='The URL to run the peacekeeper benchmark.'),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(Peacekeeper, self).__init__(device, **kwargs)
+ self.version = self.browser
+
+ def update_result(self, context):
+ super(Peacekeeper, self).update_result(context)
+ url = None
+
+ # Pull the result page url, which contains the results, from the
+ # peacekeeper.txt file and process it
+ self.device.pull_file(self.output_file, context.output_directory)
+ result_file = os.path.join(context.output_directory, 'peacekeeper.txt')
+ with open(result_file) as fh:
+ for line in fh:
+ url = line
+
+ # Fetch the html page containing the results
+ if not url:
+ raise WorkloadError('The url is empty, error while running peacekeeper benchmark')
+
+ req = urllib2.Request(url)
+ response = urllib2.urlopen(req)
+ result_page = response.read()
+
+ # Parse the HTML content using HTML parser
+ parser = PeacekeeperParser()
+ parser.feed(result_page)
+
+ # Add peacekeeper_score into results file
+ context.result.add_metric('peacekeeper_score', parser.peacekeeper_score)
+
+ def validate(self):
+ if self.output_file is None:
+ self.output_file = os.path.join(self.device.working_directory, 'peacekeeper.txt')
+ if self.browser == 'chrome' and self.device == 'TC2':
+ raise WorkloadError('Chrome not supported on TC2')
+
+ self.uiauto_params['output_file'] = self.output_file
+ self.uiauto_params['browser'] = self.browser
+ self.uiauto_params['peacekeeper_url'] = self.peacekeeper_url
+
+ self.package = BROWSER_MAP[self.browser]['package']
+ self.activity = BROWSER_MAP[self.browser]['activity']
+
+
+class PeacekeeperParser(HTMLParser):
+ def __init__(self):
+ HTMLParser.__init__(self)
+ self.flag = False
+ self.peacekeeper_score = ''
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'div':
+ for name, value in attrs:
+ if name == 'class' and value == 'resultBarContainer clearfix resultBarSelected':
+ self.flag = True
+ elif self.flag and name == 'class' and value == 'resultBarComment':
+ self.flag = False
+ self.peacekeeper_score = self.peacekeeper_score.split('details')[1]
+
+ def handle_endtag(self, tag):
+ pass
+
+ def handle_data(self, data):
+ if self.flag:
+ self.peacekeeper_score += data.strip()
diff --git a/wlauto/workloads/peacekeeper/com.arm.wlauto.uiauto.peacekeeper.jar b/wlauto/workloads/peacekeeper/com.arm.wlauto.uiauto.peacekeeper.jar
new file mode 100644
index 00000000..4dbd9465
--- /dev/null
+++ b/wlauto/workloads/peacekeeper/com.arm.wlauto.uiauto.peacekeeper.jar
Binary files differ
diff --git a/wlauto/workloads/peacekeeper/uiauto/build.sh b/wlauto/workloads/peacekeeper/uiauto/build.sh
new file mode 100755
index 00000000..96df2690
--- /dev/null
+++ b/wlauto/workloads/peacekeeper/uiauto/build.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.peacekeeper.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.peacekeeper.jar ..
+fi
diff --git a/wlauto/workloads/peacekeeper/uiauto/build.xml b/wlauto/workloads/peacekeeper/uiauto/build.xml
new file mode 100644
index 00000000..7d60a557
--- /dev/null
+++ b/wlauto/workloads/peacekeeper/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.peacekeeper" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/peacekeeper/uiauto/project.properties b/wlauto/workloads/peacekeeper/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/peacekeeper/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/peacekeeper/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/peacekeeper/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..2384b800
--- /dev/null
+++ b/wlauto/workloads/peacekeeper/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,115 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.peacekeeper;
+
+import java.io.File;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.io.PrintWriter;
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "peacekeeper";
+
+ public void runUiAutomation() throws Exception {
+ // maximum time for running peacekeeper benchmark 80 * 10 sec
+ final int TIMEOUT = 80;
+
+ // reading the input parameter
+ Bundle parameters = getParams();
+ String browser = parameters.getString("browser");
+ String outputFile = parameters.getString("output_file");
+ String peacekeeperUrl = parameters.getString("peacekeeper_url");
+
+ String urlAddress = "";
+
+ PrintWriter writer = new PrintWriter(outputFile, "UTF-8");
+
+ // firefox browser uiautomator code
+ if (browser.equals("firefox")) {
+
+ UiObject addressBar = new UiObject(new UiSelector()
+ .className("android.widget.TextView")
+ .text("Enter Search or Address"));
+ addressBar.click();
+ UiObject setUrl = new UiObject(new UiSelector()
+ .className("android.widget.EditText"));
+ setUrl.clearTextField();
+ setUrl.setText(peacekeeperUrl);
+ getUiDevice().pressEnter();
+
+ UiObject currentUrl = new UiObject(new UiSelector()
+ .className("android.widget.TextView").index(1));
+ for (int i = 0; i < TIMEOUT; i++) {
+
+ if (currentUrl.getText()
+ .equals("Peacekeeper - free universal browser test for HTML5 from Futuremark")) {
+
+ // write url address to peacekeeper.txt file
+ currentUrl.click();
+ urlAddress = setUrl.getText();
+ writer.println(urlAddress);
+ break;
+ }
+ sleep(10);
+ }
+ } else if (browser.equals("chrome")) { // Code for Chrome browser
+ UiObject adressBar = new UiObject(new UiSelector()
+ .className("android.widget.EditText")
+ .description("Search or type url"));
+
+ adressBar.clearTextField();
+ adressBar.setText(peacekeeperUrl);
+ getUiDevice().pressEnter();
+ for (int i = 0; i < TIMEOUT; i++) {
+
+ if (!adressBar.getText().contains("run.action")) {
+
+ // write url address to peacekeeper.txt file
+ urlAddress = adressBar.getText();
+ if (!urlAddress.contains("http"))
+ urlAddress = "http://" + urlAddress;
+ writer.println(urlAddress);
+ break;
+ }
+ sleep(10);
+ }
+ }
+ writer.close();
+ getUiDevice().pressHome();
+ }
+}
diff --git a/wlauto/workloads/quadrant/__init__.py b/wlauto/workloads/quadrant/__init__.py
new file mode 100644
index 00000000..5670ceea
--- /dev/null
+++ b/wlauto/workloads/quadrant/__init__.py
@@ -0,0 +1,112 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import re
+from collections import defaultdict
+
+from wlauto import AndroidUiAutoBenchmark
+
+
+TEST_TYPES = {
+ 'benchmark_cpu_branching_logic': 'time',
+ 'benchmark_cpu_matrix_int': 'time',
+ 'benchmark_cpu_matrix_long': 'time',
+ 'benchmark_cpu_matrix_short': 'time',
+ 'benchmark_cpu_matrix_byte': 'time',
+ 'benchmark_cpu_matrix_float': 'time',
+ 'benchmark_cpu_matrix_double': 'time',
+ 'benchmark_cpu_checksum': 'time',
+ 'benchmark_cpu': 'aggregate',
+ 'benchmark_memory_transfer': 'time',
+ 'benchmark_memory': 'aggregate',
+ 'benchmark_io_fs_write': 'time',
+ 'benchmark_io_fs_read': 'time',
+ 'benchmark_io_db_write': 'time',
+ 'benchmark_io_db_read': 'time',
+ 'benchmark_io': 'aggregate',
+ 'benchmark_g2d_fractal': 'rate',
+ 'benchmark_g2d': 'aggregate',
+ 'benchmark_g3d_corridor': 'rate',
+ 'benchmark_g3d_planet': 'rate',
+ 'benchmark_g3d_dna': 'rate',
+ 'benchmark_g3d': 'aggregate',
+ 'benchmark': 'aggregate',
+}
+
+TYPE_TESTS = defaultdict(list)
+for k, v in TEST_TYPES.iteritems():
+ TYPE_TESTS[v].append(k)
+
+TYPE_UNITS = {
+ 'time': 'ms',
+ 'rate': 'Hz',
+}
+
+REGEX_TEMPLATES = {
+ 'aggregate': r'(?P<metric>{}) aggregate score is (?P<score>\d+)',
+ 'time': r'(?P<metric>{}) executed in (?P<time>\d+) ms, '
+ r'reference time: (?P<reference>\d+) ms, '
+ r'score: (?P<score>\d+)',
+ 'rate': r'(?P<metric>{}) executed with a rate of (?P<rate>[0-9.]+)/sec, '
+ r'reference rate: (?P<reference>[0-9.]+)/sec, '
+ r'score: (?P<score>\d+)',
+}
+
+TEST_REGEXES = {}
+for test_, type_ in TEST_TYPES.items():
+ TEST_REGEXES[test_] = re.compile(REGEX_TEMPLATES[type_].format(test_))
+
+
+class Quadrant(AndroidUiAutoBenchmark):
+
+ name = 'quadrant'
+ description = """
+ Quadrant is a benchmark for mobile devices, capable of measuring CPU, memory,
+ I/O and 3D graphics performance.
+
+ http://www.aurorasoftworks.com/products/quadrant
+
+ From the website:
+ Quadrant outputs a score for the following categories: 2D, 3D, Mem, I/O, CPU
+ , Total.
+ """
+ package = 'com.aurorasoftworks.quadrant.ui.professional'
+ activity = '.QuadrantProfessionalLauncherActivity'
+ summary_metrics = ['benchmark_score']
+
+ run_timeout = 10 * 60
+
+ def __init__(self, device, **kwargs):
+ super(Quadrant, self).__init__(device, **kwargs)
+ self.uiauto_params['has_gpu'] = self.device.has_gpu
+ self.regex = {}
+
+ def update_result(self, context):
+ super(Quadrant, self).update_result(context)
+ with open(self.logcat_log) as fh:
+ for line in fh:
+ for test, regex in TEST_REGEXES.items():
+ match = regex.search(line)
+ if match:
+ test_type = TEST_TYPES[test]
+ data = match.groupdict()
+ if test_type != 'aggregate':
+ context.result.add_metric(data['metric'] + '_' + test_type,
+ data[test_type],
+ TYPE_UNITS[test_type])
+ context.result.add_metric(data['metric'] + '_score', data['score'])
+ break
+
diff --git a/wlauto/workloads/quadrant/com.arm.wlauto.uiauto.quadrant.jar b/wlauto/workloads/quadrant/com.arm.wlauto.uiauto.quadrant.jar
new file mode 100644
index 00000000..2c5aac3d
--- /dev/null
+++ b/wlauto/workloads/quadrant/com.arm.wlauto.uiauto.quadrant.jar
Binary files differ
diff --git a/wlauto/workloads/quadrant/uiauto/build.sh b/wlauto/workloads/quadrant/uiauto/build.sh
new file mode 100755
index 00000000..eba2b1cc
--- /dev/null
+++ b/wlauto/workloads/quadrant/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.quadrant.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.quadrant.jar ..
+fi
diff --git a/wlauto/workloads/quadrant/uiauto/build.xml b/wlauto/workloads/quadrant/uiauto/build.xml
new file mode 100644
index 00000000..113eccbe
--- /dev/null
+++ b/wlauto/workloads/quadrant/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.quadrant" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/quadrant/uiauto/project.properties b/wlauto/workloads/quadrant/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/quadrant/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/quadrant/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/quadrant/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..f8fe8749
--- /dev/null
+++ b/wlauto/workloads/quadrant/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,120 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.quadrant;
+
+import java.util.concurrent.TimeUnit;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "quadrant";
+
+ public void runUiAutomation() throws Exception {
+ Bundle status = new Bundle();
+ Bundle params = getParams();
+ boolean hasGpu = Boolean.parseBoolean(params.getString("has_gpu").toLowerCase());
+
+ clearLogcat();
+ handleFtuInfoDialogIfNecessary();
+ goToRunCustomBenchmark();
+ selectTestsToRun(hasGpu);
+ hitStart();
+ handleWarningIfNecessary();
+ waitForResults();
+
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+ public void handleFtuInfoDialogIfNecessary() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject infoText = new UiObject(selector.text("Information"));
+ if (infoText.waitForExists(TimeUnit.SECONDS.toMillis(10)))
+ {
+ UiObject okButton = new UiObject(selector.text("OK")
+ .className("android.widget.Button"));
+ okButton.click();
+ }
+ else
+ {
+ // FTU dialog didn't come up.
+ }
+ }
+
+ public void goToRunCustomBenchmark() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject runCustom = new UiObject(selector.text("Run custom benchmark")
+ .className("android.widget.TextView"));
+ runCustom.clickAndWaitForNewWindow();
+ }
+
+ // By default, all tests are selected. However, if our device does not have a GPU, then
+ // running graphics tests may cause a crash, so we disable those.
+ public void selectTestsToRun(boolean hasGpu) throws Exception {
+ if(!hasGpu) {
+ UiSelector selector = new UiSelector();
+ UiObject gfx2d = new UiObject(selector.text("2D graphics")
+ .className("android.widget.CheckBox"));
+ gfx2d.click();
+
+ UiObject gfx3d = new UiObject(selector.text("3D graphics")
+ .className("android.widget.CheckBox"));
+ gfx3d.click();
+ }
+ }
+
+ public void hitStart() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject startButton = new UiObject(selector.text("Start")
+ .className("android.widget.Button")
+ .packageName("com.aurorasoftworks.quadrant.ui.professional"));
+ startButton.click();
+ }
+
+ // Even if graphics tests aren't selected, Quadrant will still show a warning about running
+ // with software rendering.
+ public void handleWarningIfNecessary() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject warning = new UiObject(selector.text("Warning"));
+ if (warning.waitForExists(TimeUnit.SECONDS.toMillis(2))) {
+ UiObject closeButton = new UiObject(selector.text("Close")
+ .className("android.widget.Button"));
+ if (closeButton.exists()) {
+ closeButton.click();
+ }
+ }
+ else
+ {
+ // Warning dialog didn't come up.
+ }
+ }
+
+ public void waitForResults() throws Exception {
+ waitForLogcatText("benchmark aggregate score is", TimeUnit.SECONDS.toMillis(200));
+ }
+}
diff --git a/wlauto/workloads/real_linpack/__init__.py b/wlauto/workloads/real_linpack/__init__.py
new file mode 100644
index 00000000..2f8121ab
--- /dev/null
+++ b/wlauto/workloads/real_linpack/__init__.py
@@ -0,0 +1,66 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,W0201,E0203
+
+import re
+
+from wlauto import AndroidUiAutoBenchmark, Parameter
+
+
+class RealLinpack(AndroidUiAutoBenchmark):
+
+ name = 'real-linpack'
+ description = """
+ This version of `Linpack <http://en.wikipedia.org/wiki/LINPACK_benchmarks>`
+ was developed by Dave Butcher. RealLinpack tries to find the number of threads
+ that give you the maximum linpack score.
+
+ RealLinpack runs 20 runs of linpack for each number of threads and
+ calculates the mean and confidence. It stops when the
+ score's confidence interval drops below the current best score
+ interval. That is, when (current_score + confidence) < (best_score -
+ best_score_confidence)
+
+ """
+ package = 'com.arm.RealLinpack'
+ activity = '.RealLinpackActivity'
+
+ parameters = [
+ Parameter('max_threads', kind=int, default=16, constraint=lambda x: x > 0,
+ description='The maximum number of threads that real linpack will try.'),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(RealLinpack, self).__init__(device, **kwargs)
+ self.uiauto_params['max_threads'] = self.max_threads
+ self.run_timeout = 120 + 120 * self.max_threads # a base of 2 minutes plus 2 minutes for each thread
+
+ def update_result(self, context):
+ super(RealLinpack, self).update_result(context)
+ score_regex = re.compile(r'Optimum.*threads:\s*([0-9])+.*score:\s*([0-9]+\.[0-9]+).*MFLOPS')
+ match_found = False
+ with open(self.logcat_log) as logcat_file:
+ for line in logcat_file:
+ match = re.search(score_regex, line)
+ if match:
+ number_of_threads = match.group(1)
+ score = match.group(2)
+ context.result.add_metric('optimal number of threads', number_of_threads, None)
+ context.result.add_metric('score', score, 'MFLOPS')
+ match_found = True
+ break
+ if not match_found:
+ self.logger.warning('Failed To collect results for real linpack')
diff --git a/wlauto/workloads/real_linpack/com.arm.wlauto.uiauto.reallinpack.jar b/wlauto/workloads/real_linpack/com.arm.wlauto.uiauto.reallinpack.jar
new file mode 100644
index 00000000..133435d0
--- /dev/null
+++ b/wlauto/workloads/real_linpack/com.arm.wlauto.uiauto.reallinpack.jar
Binary files differ
diff --git a/wlauto/workloads/real_linpack/uiauto/build.sh b/wlauto/workloads/real_linpack/uiauto/build.sh
new file mode 100755
index 00000000..645f225a
--- /dev/null
+++ b/wlauto/workloads/real_linpack/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.reallinpack.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.reallinpack.jar ..
+fi
diff --git a/wlauto/workloads/real_linpack/uiauto/build.xml b/wlauto/workloads/real_linpack/uiauto/build.xml
new file mode 100644
index 00000000..7771a5d3
--- /dev/null
+++ b/wlauto/workloads/real_linpack/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.reallinpack" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/real_linpack/uiauto/project.properties b/wlauto/workloads/real_linpack/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/real_linpack/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/real_linpack/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/real_linpack/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..a24d9783
--- /dev/null
+++ b/wlauto/workloads/real_linpack/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,51 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.reallinpack;
+
+import android.app.Activity;
+import android.os.Bundle;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public void runUiAutomation() throws Exception{
+ Bundle status = new Bundle();
+ status.putString("product", getUiDevice().getProductName());
+ UiSelector selector = new UiSelector();
+ // set the maximum number of threads
+ String maxThreads = getParams().getString("max_threads");
+ UiObject maxThreadNumberField = new UiObject(selector.index(3));
+ maxThreadNumberField.clearTextField();
+ maxThreadNumberField.setText(maxThreads);
+ // start the benchamrk
+ UiObject btn_st = new UiObject(selector.text("Run"));
+ btn_st.click();
+ btn_st.waitUntilGone(500);
+ // set timeout for the benchmark
+ btn_st.waitForExists(60 * 60 * 1000);
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+}
diff --git a/wlauto/workloads/realracing3/__init__.py b/wlauto/workloads/realracing3/__init__.py
new file mode 100644
index 00000000..cfeaa416
--- /dev/null
+++ b/wlauto/workloads/realracing3/__init__.py
@@ -0,0 +1,35 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import time
+
+from wlauto.common.android.workload import GameWorkload
+from wlauto.exceptions import WorkloadError, DeviceError
+
+
+class RealRacing3(GameWorkload):
+
+ name = 'realracing3'
+ description = """
+ Real Racing 3 game.
+ """
+ package = 'com.ea.games.r3_row'
+ activity = 'com.firemint.realracing3.MainActivity'
+ loading_time = 90
+ asset_file = 'com.ea.games.r3_row.tar.gz'
+ saved_state_file = 'rr3-save.tar.gz'
+
+
diff --git a/wlauto/workloads/shellscript/__init__.py b/wlauto/workloads/shellscript/__init__.py
new file mode 100644
index 00000000..9662f288
--- /dev/null
+++ b/wlauto/workloads/shellscript/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,W0201,E0203
+
+import os
+
+from wlauto import Workload, Parameter
+from wlauto.exceptions import ConfigError
+
+
+class ShellScript(Workload):
+
+ name = 'shellscript'
+ description = """
+ Runs an arbitrary shellscript on the device.
+
+ """
+
+ parameters = [
+ Parameter('script_file', mandatory=True,
+ description=('The path (on the host) to the shell script file. This must be '
+ 'an absolute path (though it may contain ~).')),
+ Parameter('argstring', default='',
+ description='A string that should contain arguments passed to the script.'),
+ Parameter('timeout', kind=int, default=60,
+ description='Timeout, in seconds, for the script run time.'),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(ShellScript, self).__init__(device, **kwargs)
+ self.script_file = os.path.expanduser(self.script_file)
+ if not os.path.isfile(self.script_file):
+ raise ConfigError('Can\'t access file (is the path correct?): {}'.format(self.script_file))
+ self.output = None
+ self.command = None
+ self.on_device_script_file = None
+
+ def setup(self, context):
+ self.on_device_script_file = self.device.path.join(self.device.working_directory,
+ os.path.basename(self.script_file))
+ self.device.push_file(self.script_file, self.on_device_script_file)
+ self.command = 'sh {} {}'.format(self.on_device_script_file, self.argstring)
+
+ def run(self, context):
+ self.output = self.device.execute(self.command, timeout=self.timeout)
+
+ def update_result(self, context):
+ with open(os.path.join(context.output_directory, 'output.txt'), 'w') as wfh:
+ wfh.write(self.output)
+
+ def teardown(self, context):
+ self.device.delete_file(self.on_device_script_file)
diff --git a/wlauto/workloads/skypevideo/__init__.py b/wlauto/workloads/skypevideo/__init__.py
new file mode 100644
index 00000000..58959e1f
--- /dev/null
+++ b/wlauto/workloads/skypevideo/__init__.py
@@ -0,0 +1,130 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,W0201,E0203
+
+import time
+
+from wlauto import UiAutomatorWorkload, Parameter
+from wlauto.utils.types import boolean
+
+
+class SkypeVideo(UiAutomatorWorkload):
+
+ name = 'skypevideo'
+ description = """
+ Initiates Skype video call to a specified contact for a pre-determined duration.
+ (Note: requires Skype to be set up appropriately).
+
+ This workload is intended for monitoring the behaviour of a device while a Skype
+ video call is in progress (a common use case). It does not produce any score or
+ metric and the intention is that some addition instrumentation is enabled while
+ running this workload.
+
+ This workload, obviously, requires a network connection (ideally, wifi).
+
+ This workload accepts the following parameters:
+
+
+ **Skype Setup**
+
+ - You should install Skype client from Google Play Store on the device
+ (this was tested with client version 4.5.0.39600; other recent versions
+ should also work).
+ - You must have an account set up and logged into Skype on the device.
+ - The contact to be called must be added (and has accepted) to the
+ account. It's possible to have multiple contacts in the list, however
+ the contact to be called *must* be visible on initial navigation to the
+ list.
+ - The contact must be able to received the call. This means that there
+ must be a Skype client running (somewhere) with the contact logged in
+ and that client must have been configured to auto-accept calls from the
+ account on the device (how to set this varies between different versions
+ of Skype and between platforms -- please search online for specific
+ instructions).
+ https://support.skype.com/en/faq/FA3751/can-i-automatically-answer-all-my-calls-with-video-in-skype-for-windows-desktop
+
+ """
+
+ package = 'com.skype.raider'
+
+ parameters = [
+ Parameter('duration', kind=int, default=300,
+ description='Duration of the video call in seconds.'),
+ Parameter('contact', mandatory=True,
+ description="""
+ The name of the Skype contact to call. The contact must be already
+ added (see below). *If use_gui is set*, then this must be the skype
+ ID of the contact, *otherwise*, this must be the name of the
+ contact as it appears in Skype client's contacts list. In the latter case
+ it *must not* contain underscore characters (``_``); it may, however, contain
+ spaces. There is no default, you **must specify the name of the contact**.
+
+ .. note:: You may alternatively specify the contact name as
+ ``skype_contact`` setting in your ``config.py``. If this is
+ specified, the ``contact`` parameter is optional, though
+ it may still be specified (in which case it will override
+ ``skype_contact`` setting).
+ """),
+ Parameter('use_gui', kind=boolean, default=False,
+ description="""
+ Specifies whether the call should be placed directly through a
+ Skype URI, or by navigating the GUI. The URI is the recommended way
+ to place Skype calls on a device, but that does not seem to work
+ correctly on some devices (the URI seems to just start Skype, but not
+ place the call), so an alternative exists that will start the Skype app
+ and will then navigate the UI to place the call (incidentally, this method
+ does not seem to work on all devices either, as sometimes Skype starts
+ backgrounded...). Please note that the meaning of ``contact`` prameter
+ is different depending on whether this is set. Defaults to ``False``.
+
+ .. note:: You may alternatively specify this as ``skype_use_gui`` setting
+ in your ``config.py``.
+ """),
+
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(SkypeVideo, self).__init__(device, **kwargs)
+ if self.use_gui:
+ self.uiauto_params['name'] = self.contact.replace(' ', '_')
+ self.uiauto_params['duration'] = self.duration
+ self.run_timeout = self.duration + 30
+
+ def setup(self, context):
+ if self.use_gui:
+ super(SkypeVideo, self).setup(context)
+ self.device.execute('am force-stop {}'.format(self.package))
+ self.device.execute('am start -W -a android.intent.action.VIEW -d skype:')
+ else:
+ self.device.execute('am force-stop {}'.format(self.package))
+
+ def run(self, context):
+ if self.use_gui:
+ super(SkypeVideo, self).run(context)
+ else:
+ command = "am start -W -a android.intent.action.VIEW -d \"skype:{}?call&video=true\""
+ self.logger.debug(self.device.execute(command.format(self.contact)))
+ self.logger.debug('Call started; waiting for {} seconds...'.format(self.duration))
+ time.sleep(self.duration)
+ self.device.execute('am force-stop com.skype.raider')
+
+ def update_result(self, context):
+ pass
+
+ def teardown(self, context):
+ if self.use_gui:
+ super(SkypeVideo, self).teardown(context)
+ self.device.execute('am force-stop {}'.format(self.package))
diff --git a/wlauto/workloads/skypevideo/com.arm.wlauto.uiauto.skypevideo.jar b/wlauto/workloads/skypevideo/com.arm.wlauto.uiauto.skypevideo.jar
new file mode 100644
index 00000000..dff2302a
--- /dev/null
+++ b/wlauto/workloads/skypevideo/com.arm.wlauto.uiauto.skypevideo.jar
Binary files differ
diff --git a/wlauto/workloads/skypevideo/uiauto/build.sh b/wlauto/workloads/skypevideo/uiauto/build.sh
new file mode 100755
index 00000000..db6f8ff4
--- /dev/null
+++ b/wlauto/workloads/skypevideo/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.skypevideo.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.skypevideo.jar ..
+fi
diff --git a/wlauto/workloads/skypevideo/uiauto/build.xml b/wlauto/workloads/skypevideo/uiauto/build.xml
new file mode 100644
index 00000000..c2fdeb90
--- /dev/null
+++ b/wlauto/workloads/skypevideo/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.skypevideo" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/skypevideo/uiauto/project.properties b/wlauto/workloads/skypevideo/uiauto/project.properties
new file mode 100644
index 00000000..ce39f2d0
--- /dev/null
+++ b/wlauto/workloads/skypevideo/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-18
diff --git a/wlauto/workloads/skypevideo/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/skypevideo/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..0743372e
--- /dev/null
+++ b/wlauto/workloads/skypevideo/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,72 @@
+/* Copyright 2014-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.skypevideo;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "skypevideo";
+ public static String videoCallButtonResourceId = "com.skype.raider:id/chat_menu_item_call_video";
+ public static String noContactMessage = "Could not find contact \"%s\" in the contacts list.";
+
+ public void runUiAutomation() throws Exception {
+ Bundle parameters = getParams();
+ String contactName = parameters.getString("name").replace('_', ' ');
+ int duration = Integer.parseInt(parameters.getString("duration"));
+
+ selectContact(contactName);
+ initiateCall(duration);
+ }
+
+ public void selectContact(String name) throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject peopleTab = new UiObject(selector.text("People"));
+ peopleTab.click();
+ sleep(1); // tab transition
+
+ // Note: this assumes that the contact is in view and does not attempt to scroll to find it.
+ // The expectation is that this automation will be used with a dedicated account that was set
+ // up for the purpose and so would only have the intended target plus one or two other contacts
+ // at most in the list. If that is not the case, then this needs to be re-written to scroll to
+ // find the contact if necessary.
+ UiObject contactCard = new UiObject(selector.text(name));
+ if (!contactCard.exists()) {
+ throw new UiObjectNotFoundException(String.format(noContactMessage, name));
+ }
+ contactCard.clickAndWaitForNewWindow();
+ }
+
+ public void initiateCall(int duration) throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject videoCallButton = new UiObject(selector.resourceId(videoCallButtonResourceId));
+ videoCallButton.click();
+ sleep(duration);
+ }
+}
diff --git a/wlauto/workloads/smartbench/__init__.py b/wlauto/workloads/smartbench/__init__.py
new file mode 100644
index 00000000..4b7cbe3e
--- /dev/null
+++ b/wlauto/workloads/smartbench/__init__.py
@@ -0,0 +1,59 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import re
+import time
+
+from wlauto import AndroidUiAutoBenchmark
+
+
+class Smartbench(AndroidUiAutoBenchmark):
+
+ name = 'smartbench'
+ description = """
+ Smartbench is a multi-core friendly benchmark application that measures the
+ overall performance of an android device. It reports both Productivity and
+ Gaming Index.
+
+ https://play.google.com/store/apps/details?id=com.smartbench.twelve&hl=en
+
+ From the website:
+
+ It will be better prepared for the quad-core world. Unfortunately this also
+ means it will run slower on older devices. It will also run slower on
+ high-resolution tablet devices. All 3D tests are now rendered in full native
+ resolutions so naturally it will stress hardware harder on these devices.
+ This also applies to higher resolution hand-held devices.
+ """
+ package = 'com.smartbench.twelve'
+ activity = '.Smartbench2012'
+ summary_metrics = ['Smartbench: valueGame', 'Smartbench: valueProd']
+ run_timeout = 10 * 60
+
+ prod_regex = re.compile('valueProd=(\d+)')
+ game_regex = re.compile('valueGame=(\d+)')
+
+ def update_result(self, context):
+ super(Smartbench, self).update_result(context)
+ with open(self.logcat_log) as fh:
+ text = fh.read()
+ match = self.prod_regex.search(text)
+ prod = int(match.group(1))
+ match = self.game_regex.search(text)
+ game = int(match.group(1))
+ context.result.add_metric('Smartbench: valueProd', prod)
+ context.result.add_metric('Smartbench: valueGame', game)
diff --git a/wlauto/workloads/smartbench/com.arm.wlauto.uiauto.smartbench.jar b/wlauto/workloads/smartbench/com.arm.wlauto.uiauto.smartbench.jar
new file mode 100644
index 00000000..f388cbe0
--- /dev/null
+++ b/wlauto/workloads/smartbench/com.arm.wlauto.uiauto.smartbench.jar
Binary files differ
diff --git a/wlauto/workloads/smartbench/uiauto/build.sh b/wlauto/workloads/smartbench/uiauto/build.sh
new file mode 100755
index 00000000..bf76a67e
--- /dev/null
+++ b/wlauto/workloads/smartbench/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.smartbench.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.smartbench.jar ..
+fi
diff --git a/wlauto/workloads/smartbench/uiauto/build.xml b/wlauto/workloads/smartbench/uiauto/build.xml
new file mode 100644
index 00000000..ee913c41
--- /dev/null
+++ b/wlauto/workloads/smartbench/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.smartbench" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/smartbench/uiauto/project.properties b/wlauto/workloads/smartbench/uiauto/project.properties
new file mode 100644
index 00000000..a3ee5ab6
--- /dev/null
+++ b/wlauto/workloads/smartbench/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-17
diff --git a/wlauto/workloads/smartbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/smartbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..e8c3aac1
--- /dev/null
+++ b/wlauto/workloads/smartbench/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,62 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.smartbench;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "smartbench";
+
+ public void runUiAutomation() throws Exception {
+ Bundle status = new Bundle();
+ status.putString("product", getUiDevice().getProductName());
+ UiSelector selector = new UiSelector();
+ sleep(3);
+ UiObject text_bench = new UiObject(selector.text("Run SmartBench")
+ .className("android.widget.TextView"));
+ text_bench.click();
+
+ try{
+ UiObject complete_text = new UiObject(selector .textContains("Display Index Scores")
+ .className("android.widget.TextView"));
+
+ waitObject(complete_text);
+
+ sleep(2);
+ complete_text.click();
+ } finally{
+ //complete_text.click();
+ }
+
+ sleep(5);
+ takeScreenshot("SmartBench");
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+
+}
diff --git a/wlauto/workloads/spec2000/__init__.py b/wlauto/workloads/spec2000/__init__.py
new file mode 100644
index 00000000..87529c63
--- /dev/null
+++ b/wlauto/workloads/spec2000/__init__.py
@@ -0,0 +1,356 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+#pylint: disable=E1101,W0201
+import os
+import re
+import string
+import tarfile
+from collections import defaultdict
+
+from wlauto import Workload, Parameter, Alias
+from wlauto.exceptions import ConfigError, WorkloadError
+from wlauto.common.resources import ExtensionAsset
+from wlauto.utils.misc import get_cpu_mask
+from wlauto.utils.types import boolean, list_or_string
+
+
+class Spec2000(Workload):
+
+ name = 'spec2000'
+ description = """
+ SPEC2000 benchmarks measuring processor, memory and compiler.
+
+ http://www.spec.org/cpu2000/
+
+ From the web site:
+
+ SPEC CPU2000 is the next-generation industry-standardized CPU-intensive benchmark suite. SPEC
+ designed CPU2000 to provide a comparative measure of compute intensive performance across the
+ widest practical range of hardware. The implementation resulted in source code benchmarks
+ developed from real user applications. These benchmarks measure the performance of the
+ processor, memory and compiler on the tested system.
+
+ .. note:: At the moment, this workload relies on pre-built SPEC binaries (included in an
+ asset bundle). These binaries *must* be built according to rules outlined here::
+
+ http://www.spec.org/cpu2000/docs/runrules.html#toc_2.0
+
+ in order for the results to be valid SPEC2000 results.
+
+ .. note:: This workload does not attempt to generate results in an admissible SPEC format. No
+ metadata is provided (though some, but not all, of the required metdata is colleted
+ by WA elsewhere). It is upto the user to post-process results to generated
+ SPEC-admissible results file, if that is their intention.
+
+ *base vs peak*
+
+ SPEC2000 defines two build/test configuration: base and peak. Base is supposed to use basic
+ configuration (e.g. default compiler flags) with no tuning, and peak is specifically optimized for
+ a system. Since this workload uses externally-built binaries, there is no way for WA to be sure
+ what configuration is used -- the user is expected to keep track of that. Be aware that
+ base/peak also come with specfic requirements for the way workloads are run (e.g. how many instances
+ on multi-core systems)::
+
+ http://www.spec.org/cpu2000/docs/runrules.html#toc_3
+
+ These are not enforced by WA, so it is again up to the user to ensure that correct workload
+ parameters are specfied inthe agenda, if they intend to collect "official" SPEC results. (Those
+ interested in collecting official SPEC results should also note that setting runtime parameters
+ would violate SPEC runs rules that state that no configuration must be done to the platform
+ after boot).
+
+ *bundle structure*
+
+ This workload expects the actual benchmark binaries to be provided in a tarball "bundle" that has
+ a very specific structure. At the top level of the tarball, there should be two directories: "fp"
+ and "int" -- for each of the SPEC2000 categories. Under those, there is a sub-directory per benchmark.
+ Each benchmark sub-directory contains three sub-sub-directorie:
+
+ - "cpus" contains a subdirector for each supported cpu (e.g. a15) with a single executable binary
+ for that cpu, in addition to a "generic" subdirectory that has not been optimized for a specific
+ cpu and should run on any ARM system.
+ - "data" contains all additional files (input, configuration, etc) that the benchmark executable
+ relies on.
+ - "scripts" contains one or more one-liner shell scripts that invoke the benchmark binary with
+ appropriate command line parameters. The name of the script must be in the format
+ <benchmark name>[.<variant name>].sh, i.e. name of benchmark, optionally followed by variant
+ name, followed by ".sh" extension. If there is more than one script, then all of them must
+ have a variant; if there is only one script the it should not cotain a variant.
+
+ A typical bundle may look like this::
+
+ |- fp
+ | |-- ammp
+ | | |-- cpus
+ | | | |-- generic
+ | | | | |-- ammp
+ | | | |-- a15
+ | | | | |-- ammp
+ | | | |-- a7
+ | | | | |-- ammp
+ | | |-- data
+ | | | |-- ammp.in
+ | | |-- scripts
+ | | | |-- ammp.sh
+ | |-- applu
+ . . .
+ . . .
+ . . .
+ |- int
+ .
+
+ """
+
+ # TODO: This is a bit of a hack. Need to re-think summary metric indication
+ # (also more than just summary/non-summary classification?)
+ class _SPECSummaryMetrics(object):
+ def __contains__(self, item):
+ if item.endswith('_real'):
+ return True
+ else:
+ return False
+
+ asset_file = 'spec2000-assets.tar.gz'
+
+ aliases = [
+ Alias('spec2k'),
+ ]
+
+ summary_metrics = _SPECSummaryMetrics()
+
+ parameters = [
+ Parameter('benchmarks', kind=list_or_string,
+ description='Specfiles the SPEC benchmarks to run.'),
+ Parameter('mode', kind=str, allowed_values=['speed', 'rate'], default='speed',
+ description='SPEC benchmarks can report either speed to execute or throughput/rate. '
+ 'In the latter case, several "threads" will be spawned.'),
+ Parameter('number_of_threads', kind=int, default=None,
+ description='Specify the number of "threads" to be used in \'rate\' mode. (Note: '
+ 'on big.LITTLE systems this is the number of threads, for *each cluster*). '),
+
+ Parameter('force_extract_assets', kind=boolean, default=False,
+ description='if set to ``True``, will extract assets from the bundle, even if they are '
+ 'already extracted. Note: this option implies ``force_push_assets``.'),
+ Parameter('force_push_assets', kind=boolean, default=False,
+ description='If set to ``True``, assets will be pushed to device even if they\'re already '
+ 'present.'),
+ Parameter('timeout', kind=int, default=20 * 60,
+ description='Timemout, in seconds, for the execution of single spec test.'),
+ ]
+
+ speed_run_template = 'cd {datadir}; time ({launch_command})'
+ rate_run_template = 'cd {datadir}; time ({loop}; wait)'
+ loop_template = 'for i in $(busybox seq 1 {threads}); do {launch_command} 1>/dev/null 2>&1 & done'
+ launch_template = 'busybox taskset {cpumask} {command} 1>/dev/null 2>&1'
+
+ timing_regex = re.compile(r'(?P<minutes>\d+)m(?P<seconds>[\d.]+)s\s+(?P<category>\w+)')
+
+ def init_resources(self, context):
+ self._load_spec_benchmarks(context)
+
+ def setup(self, context):
+ cpus = self.device.core_names
+ if not cpus:
+ raise WorkloadError('Device has not specifed CPU cores configruation.')
+ cpumap = defaultdict(list)
+ for i, cpu in enumerate(cpus):
+ cpumap[cpu.lower()].append(i)
+ for benchspec in self.benchmarks:
+ commandspecs = self._verify_and_deploy_benchmark(benchspec, cpumap)
+ self._build_command(benchspec, commandspecs)
+
+ def run(self, context):
+ for name, command in self.commands:
+ self.timings[name] = self.device.execute(command, timeout=self.timeout)
+
+ def update_result(self, context):
+ for benchmark, output in self.timings.iteritems():
+ matches = self.timing_regex.finditer(output)
+ found = False
+ for match in matches:
+ category = match.group('category')
+ mins = float(match.group('minutes'))
+ secs = float(match.group('seconds'))
+ total = secs + 60 * mins
+ context.result.add_metric('_'.join([benchmark, category]),
+ total, 'seconds',
+ lower_is_better=True)
+ found = True
+ if not found:
+ self.logger.error('Could not get timings for {}'.format(benchmark))
+
+ def validate(self):
+ if self.force_extract_assets:
+ self.force_push_assets = True
+ if self.benchmarks is None: # pylint: disable=access-member-before-definition
+ self.benchmarks = 'all'
+ if isinstance(self.benchmarks, basestring):
+ if self.benchmarks == 'all':
+ self.benchmarks = self.loaded_benchmarks.keys()
+ else:
+ self.benchmarks = [self.benchmarks]
+ for benchname in self.benchmarks:
+ if benchname not in self.loaded_benchmarks:
+ raise ConfigError('Unknown SPEC benchmark: {}'.format(benchname))
+ if self.mode == 'speed':
+ if self.number_of_threads is not None:
+ raise ConfigError('number_of_threads cannot be specified in speed mode.')
+ else:
+ raise ValueError('Unexpected SPEC2000 mode: {}'.format(self.mode)) # Should never get here
+ self.commands = []
+ self.timings = {}
+
+ def _load_spec_benchmarks(self, context):
+ self.loaded_benchmarks = {}
+ self.categories = set()
+ if self.force_extract_assets or len(os.listdir(self.dependencies_directory)) < 2:
+ bundle = context.resolver.get(ExtensionAsset(self, self.asset_file))
+ with tarfile.open(bundle, 'r:gz') as tf:
+ tf.extractall(self.dependencies_directory)
+ for entry in os.listdir(self.dependencies_directory):
+ entrypath = os.path.join(self.dependencies_directory, entry)
+ if os.path.isdir(entrypath):
+ for bench in os.listdir(entrypath):
+ self.categories.add(entry)
+ benchpath = os.path.join(entrypath, bench)
+ self._load_benchmark(benchpath, entry)
+
+ def _load_benchmark(self, path, category):
+ datafiles = []
+ cpus = []
+ for df in os.listdir(os.path.join(path, 'data')):
+ datafiles.append(os.path.join(path, 'data', df))
+ for cpu in os.listdir(os.path.join(path, 'cpus')):
+ cpus.append(cpu)
+ commandsdir = os.path.join(path, 'commands')
+ for command in os.listdir(commandsdir):
+ bench = SpecBenchmark()
+ bench.name = os.path.splitext(command)[0]
+ bench.path = path
+ bench.category = category
+ bench.datafiles = datafiles
+ bench.cpus = cpus
+ with open(os.path.join(commandsdir, command)) as fh:
+ bench.command_template = string.Template(fh.read().strip())
+ self.loaded_benchmarks[bench.name] = bench
+
+ def _verify_and_deploy_benchmark(self, benchspec, cpumap): # pylint: disable=R0914
+ """Verifies that the supplied benchmark spec is valid and deploys the required assets
+ to the device (if necessary). Returns a list of command specs (one for each CPU cluster)
+ that can then be used to construct the final command."""
+ bench = self.loaded_benchmarks[benchspec]
+ basename = benchspec.split('.')[0]
+ datadir = self.device.path.join(self.device.working_directory, self.name, basename)
+ if self.force_push_assets or not self.device.file_exists(datadir):
+ self.device.execute('mkdir -p {}'.format(datadir))
+ for datafile in bench.datafiles:
+ self.device.push_file(datafile, self.device.path.join(datadir, os.path.basename(datafile)))
+
+ if self.mode == 'speed':
+ cpus = [self._get_fastest_cpu().lower()]
+ else:
+ cpus = cpumap.keys()
+
+ cmdspecs = []
+ for cpu in cpus:
+ try:
+ host_bin_file = bench.get_binary(cpu)
+ except ValueError, e:
+ try:
+ msg = e.message
+ msg += ' Attempting to use generic binary instead.'
+ self.logger.debug(msg)
+ host_bin_file = bench.get_binary('generic')
+ cpu = 'generic'
+ except ValueError, e:
+ raise ConfigError(e.message) # re-raising as user error
+ binname = os.path.basename(host_bin_file)
+ binary = self.device.install(host_bin_file, with_name='.'.join([binname, cpu]))
+ commandspec = CommandSpec()
+ commandspec.command = bench.command_template.substitute({'binary': binary})
+ commandspec.datadir = datadir
+ commandspec.cpumask = get_cpu_mask(cpumap[cpu])
+ cmdspecs.append(commandspec)
+ return cmdspecs
+
+ def _build_command(self, name, commandspecs):
+ if self.mode == 'speed':
+ if len(commandspecs) != 1:
+ raise AssertionError('Must be exactly one command spec specifed in speed mode.')
+ spec = commandspecs[0]
+ launch_command = self.launch_template.format(command=spec.command, cpumask=spec.cpumask)
+ self.commands.append((name,
+ self.speed_run_template.format(datadir=spec.datadir,
+ launch_command=launch_command)))
+ elif self.mode == 'rate':
+ loops = []
+ for spec in commandspecs:
+ launch_command = self.launch_template.format(command=spec.command, cpumask=spec.cpumask)
+ loops.append(self.loop_template.format(launch_command=launch_command, threads=spec.threads))
+ self.commands.append((name,
+ self.rate_run_template.format(datadir=spec.datadir,
+ loop='; '.join(loops))))
+ else:
+ raise ValueError('Unexpected SPEC2000 mode: {}'.format(self.mode)) # Should never get here
+
+ def _get_fastest_cpu(self):
+ cpu_types = set(self.device.core_names)
+ if len(cpu_types) == 1:
+ return cpu_types.pop()
+ fastest_cpu = None
+ fastest_freq = 0
+ for cpu_type in cpu_types:
+ try:
+ idx = self.device.get_core_cpu(cpu_type)
+ freq = self.device.get_cpu_max_frequency(idx)
+ if freq > fastest_freq:
+ fastest_freq = freq
+ fastest_cpu = cpu_type
+ except ValueError:
+ pass
+ if not fastest_cpu:
+ raise WorkloadError('No active CPUs found on device. Something is very wrong...')
+ return fastest_cpu
+
+
+class SpecBenchmark(object):
+
+ def __init__(self):
+ self.name = None
+ self.path = None
+ self.category = None
+ self.command_template = None
+ self.cpus = []
+ self.datafiles = []
+
+ def get_binary(self, cpu):
+ if cpu not in self.cpus:
+ raise ValueError('CPU {} is not supported by {}.'.format(cpu, self.name))
+ binpath = os.path.join(self.path, 'cpus', cpu, self.name.split('.')[0])
+ if not os.path.isfile(binpath):
+ raise ValueError('CPU {} is not supported by {}.'.format(cpu, self.name))
+ return binpath
+
+
+class CommandSpec(object):
+
+ def __init__(self):
+ self.cpumask = None
+ self.datadir = None
+ self.command = None
+ self.threads = None
+
diff --git a/wlauto/workloads/sqlite/__init__.py b/wlauto/workloads/sqlite/__init__.py
new file mode 100644
index 00000000..60a01276
--- /dev/null
+++ b/wlauto/workloads/sqlite/__init__.py
@@ -0,0 +1,48 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import re
+
+from wlauto import AndroidUiAutoBenchmark
+
+
+class Sqlite(AndroidUiAutoBenchmark):
+
+ name = 'sqlitebm'
+ description = """
+ Measures the performance of the sqlite database. It determines within
+ what time the target device processes a number of SQL queries.
+
+ """
+ package = 'com.redlicense.benchmark.sqlite'
+ activity = '.Main'
+ summary_metrics = ['Overall']
+
+ score_regex = re.compile(r'V/sqlite.*:\s+([\w ]+) = ([\d\.]+) sec')
+
+ def update_result(self, context):
+ super(Sqlite, self).update_result(context)
+ with open(self.logcat_log) as fh:
+ text = fh.read()
+ for match in self.score_regex.finditer(text):
+ metric = match.group(1)
+ value = match.group(2)
+ try:
+ value = float(value)
+ except ValueError:
+ self.logger.warn("Reported results do not match expected format (seconds)")
+ context.result.add_metric(metric, value, 'Seconds', lower_is_better=True)
+
diff --git a/wlauto/workloads/sqlite/com.arm.wlauto.uiauto.sqlite.jar b/wlauto/workloads/sqlite/com.arm.wlauto.uiauto.sqlite.jar
new file mode 100644
index 00000000..e8b77514
--- /dev/null
+++ b/wlauto/workloads/sqlite/com.arm.wlauto.uiauto.sqlite.jar
Binary files differ
diff --git a/wlauto/workloads/sqlite/uiauto/build.sh b/wlauto/workloads/sqlite/uiauto/build.sh
new file mode 100755
index 00000000..b8bcdf89
--- /dev/null
+++ b/wlauto/workloads/sqlite/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.sqlite.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.sqlite.jar ..
+fi
diff --git a/wlauto/workloads/sqlite/uiauto/build.xml b/wlauto/workloads/sqlite/uiauto/build.xml
new file mode 100644
index 00000000..aa324270
--- /dev/null
+++ b/wlauto/workloads/sqlite/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.sqlite" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/sqlite/uiauto/project.properties b/wlauto/workloads/sqlite/uiauto/project.properties
new file mode 100644
index 00000000..ce39f2d0
--- /dev/null
+++ b/wlauto/workloads/sqlite/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-18
diff --git a/wlauto/workloads/sqlite/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/sqlite/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..c11725e3
--- /dev/null
+++ b/wlauto/workloads/sqlite/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,103 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.sqlite;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "sqlite";
+
+ public void runUiAutomation() throws Exception {
+ Bundle status = new Bundle();
+ status.putString("product", getUiDevice().getProductName());
+ UiSelector selector = new UiSelector();
+
+ UiObject text_start = new UiObject(selector.text("Start")
+ .className("android.widget.Button"));
+ text_start.click();
+
+ try {
+ UiObject stop_text = new UiObject(selector.textContains("Stop")
+ .className("android.widget.Button"));
+ waitUntilNoObject(stop_text, 600);
+
+ sleep(2);
+ this.extractResults();
+ } finally {
+ }
+ }
+
+ public void extractResults() throws UiObjectNotFoundException{
+ UiSelector selector = new UiSelector();
+ UiScrollable resultList = new UiScrollable(selector.className("android.widget.ScrollView"));
+ resultList.scrollToBeginning(5);
+ selector = resultList.getSelector();
+ int index = 0;
+ while (true){
+ UiObject lastEntry = new UiObject(selector.childSelector(new UiSelector()
+ .className("android.widget.LinearLayout")
+ .childSelector(new UiSelector()
+ .index(index)
+ .childSelector(new UiSelector()
+ .className("android.widget.LinearLayout")))));
+ if (lastEntry.exists()){
+ UiObject value = new UiObject(selector.childSelector(new UiSelector()
+ .className("android.widget.LinearLayout")
+ .childSelector(new UiSelector()
+ .index(index)
+ .childSelector(new UiSelector()
+ .resourceIdMatches(".*test_result.*")))));
+ Log.v("sqlite", "Overall = " + value.getText().replace("\n", " "));
+ break;
+ }
+
+ UiObject label = new UiObject(selector.childSelector(new UiSelector()
+ .className("android.widget.LinearLayout")
+ .childSelector(new UiSelector()
+ .index(index)
+ .childSelector(new UiSelector()
+ .index(0)))));
+ UiObject value = new UiObject(selector.childSelector(new UiSelector()
+ .className("android.widget.LinearLayout")
+ .childSelector(new UiSelector()
+ .index(index)
+ .childSelector(new UiSelector()
+ .index(1)))));
+ index++;
+ if (!label.exists()){
+ resultList.scrollForward();
+ index--;
+ sleep(1);
+ continue;
+ }
+ Log.v("sqlite", label.getText() + " = " + value.getText().replace("\n", " "));
+ }
+ }
+}
diff --git a/wlauto/workloads/sysbench/__init__.py b/wlauto/workloads/sysbench/__init__.py
new file mode 100644
index 00000000..b97a8512
--- /dev/null
+++ b/wlauto/workloads/sysbench/__init__.py
@@ -0,0 +1,111 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,W0201,E0203
+
+import os
+
+from wlauto import Workload, Parameter, File
+from wlauto.exceptions import WorkloadError
+from wlauto.utils.misc import parse_value
+
+
+class Sysbench(Workload):
+
+ name = 'sysbench'
+ description = """
+ SysBench is a modular, cross-platform and multi-threaded benchmark tool
+ for evaluating OS parameters that are important for a system running a
+ database under intensive load.
+
+ The idea of this benchmark suite is to quickly get an impression about
+ system performance without setting up complex database benchmarks or
+ even without installing a database at all.
+
+ **Features of SysBench**
+
+ * file I/O performance
+ * scheduler performance
+ * memory allocation and transfer speed
+ * POSIX threads implementation performance
+ * database server performance
+
+
+ See: http://sysbench.sourceforge.net/docs/
+
+ """
+
+ parameters = [
+ Parameter('timeout', kind=int, default=300),
+ Parameter('test', kind=str, default='cpu'),
+ Parameter('num_threads', kind=int, default=8),
+ Parameter('max_requests', kind=int, default=2000),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(Sysbench, self).__init__(device)
+ self.command = self._build_command(test=self.test,
+ num_threads=self.num_threads,
+ max_requests=self.max_requests)
+ self.results_file = self.device.path.join(self.device.working_directory, 'sysbench_result.txt')
+
+ def setup(self, context):
+ self._check_executable(context)
+ self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank ')
+
+ def run(self, context):
+ self.device.execute(self.command, timeout=self.timeout)
+
+ def update_result(self, context):
+ host_results_file = os.path.join(context.output_directory, 'sysbench_result.txt')
+ self.device.pull_file(self.results_file, host_results_file)
+
+ with open(host_results_file) as fh:
+ in_summary = False
+ metric_prefix = ''
+ for line in fh:
+ if line.startswith('Test execution summary:'):
+ in_summary = True
+ elif in_summary:
+ if not line.strip():
+ break # end of summary section
+ parts = [p.strip() for p in line.split(':') if p.strip()]
+ if len(parts) == 2:
+ metric = metric_prefix + parts[0]
+ value, units = parse_value(parts[1])
+ context.result.add_metric(metric, value, units)
+ elif len(parts) == 1:
+ metric_prefix = line.strip() + ' '
+ else:
+ self.logger.warn('Could not parse line: "{}"'.format(line.rstrip('\n')))
+ context.add_iteration_artifact('sysbench_output', kind='raw', path='sysbench_result.txt')
+
+ def teardown(self, context):
+ self.device.execute('am force-stop com.android.browser')
+ self.device.delete_file(self.results_file)
+
+ def _check_executable(self, context):
+ if self.device.is_installed('sysbench'):
+ return
+ path = context.resolver.get(File(owner=self, path='sysbench'))
+ if not path:
+ raise WorkloadError('sysbench binary is not installed on the device, and it does not found in dependencies on the host.')
+ self.device.install(path)
+
+ def _build_command(self, **parameters):
+ param_strings = ['--{}={}'.format(k.replace('_', '-'), v)
+ for k, v in parameters.iteritems()]
+ sysbench_command = 'sysbench {} run'.format(' '.join(param_strings))
+ return 'cd {} && {} > sysbench_result.txt'.format(self.device.working_directory, sysbench_command)
diff --git a/wlauto/workloads/sysbench/sysbench b/wlauto/workloads/sysbench/sysbench
new file mode 100644
index 00000000..094cad31
--- /dev/null
+++ b/wlauto/workloads/sysbench/sysbench
Binary files differ
diff --git a/wlauto/workloads/templerun/__init__.py b/wlauto/workloads/templerun/__init__.py
new file mode 100644
index 00000000..f967a49d
--- /dev/null
+++ b/wlauto/workloads/templerun/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import GameWorkload
+
+
+class Templerun(GameWorkload):
+
+ name = 'templerun'
+ description = """
+ Templerun game.
+
+ """
+ package = 'com.imangi.templerun'
+ activity = 'com.unity3d.player.UnityPlayerProxyActivity'
+ install_timeout = 500
diff --git a/wlauto/workloads/templerun/revent_files/.empty b/wlauto/workloads/templerun/revent_files/.empty
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/wlauto/workloads/templerun/revent_files/.empty
diff --git a/wlauto/workloads/templerun/revent_files/Nexus10.run.revent b/wlauto/workloads/templerun/revent_files/Nexus10.run.revent
new file mode 100644
index 00000000..c1019cf7
--- /dev/null
+++ b/wlauto/workloads/templerun/revent_files/Nexus10.run.revent
Binary files differ
diff --git a/wlauto/workloads/templerun/revent_files/Nexus10.setup.revent b/wlauto/workloads/templerun/revent_files/Nexus10.setup.revent
new file mode 100644
index 00000000..90b1cb43
--- /dev/null
+++ b/wlauto/workloads/templerun/revent_files/Nexus10.setup.revent
Binary files differ
diff --git a/wlauto/workloads/thechase/__init__.py b/wlauto/workloads/thechase/__init__.py
new file mode 100755
index 00000000..48c94efa
--- /dev/null
+++ b/wlauto/workloads/thechase/__init__.py
@@ -0,0 +1,46 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101
+import time
+
+from wlauto import ApkWorkload, Parameter
+
+
+class TheChase(ApkWorkload):
+
+ name = 'thechase'
+ description = """
+ The Chase demo showcasing the capabilities of Unity game engine.
+
+ This demo, is a static video-like game demo, that demonstrates advanced features
+ of the unity game engine. It loops continuously until terminated.
+
+ """
+
+ package = 'com.unity3d.TheChase'
+ activity = 'com.unity3d.player.UnityPlayerNativeActivity'
+ install_timeout = 200
+ view = 'SurfaceView'
+
+ parameters = [
+ Parameter('duration', kind=int, default=70,
+ description=('Duration, in seconds, note that the demo loops the same (roughly) 60 '
+ 'second sceene until stopped.')),
+ ]
+
+ def run(self, context):
+ time.sleep(self.duration)
+
diff --git a/wlauto/workloads/truckerparking3d/__init__.py b/wlauto/workloads/truckerparking3d/__init__.py
new file mode 100644
index 00000000..8180d4fd
--- /dev/null
+++ b/wlauto/workloads/truckerparking3d/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wlauto import GameWorkload
+
+
+class TruckerParking3D(GameWorkload):
+
+ name = 'truckerparking3d'
+ description = """
+ Trucker Parking 3D game.
+
+ (yes, apparently that's a thing...)
+ """
+ package = 'com.tapinator.truck.parking.bus3d'
+ activity = 'com.tapinator.truck.parking.bus3d.GCMNotificationActivity'
diff --git a/wlauto/workloads/vellamo/__init__.py b/wlauto/workloads/vellamo/__init__.py
new file mode 100644
index 00000000..1c70a958
--- /dev/null
+++ b/wlauto/workloads/vellamo/__init__.py
@@ -0,0 +1,215 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import logging
+from HTMLParser import HTMLParser
+from collections import defaultdict, OrderedDict
+
+from wlauto import AndroidUiAutoBenchmark, Parameter
+from wlauto.utils.types import list_of_strs, numeric
+from wlauto.exceptions import WorkloadError
+
+
+#pylint: disable=no-member
+class Vellamo(AndroidUiAutoBenchmark):
+
+ name = 'vellamo'
+ description = """
+ Android benchmark designed by Qualcomm.
+
+ Vellamo began as a mobile web benchmarking tool that today has expanded
+ to include three primary chapters. The Browser Chapter evaluates mobile
+ web browser performance, the Multicore chapter measures the synergy of
+ multiple CPU cores, and the Metal Chapter measures the CPU subsystem
+ performance of mobile processors. Through click-and-go test suites,
+ organized by chapter, Vellamo is designed to evaluate: UX, 3D graphics,
+ and memory read/write and peak bandwidth performance, and much more!
+
+ Note: Vellamo v3.0 fails to run on Juno
+
+ """
+ package = 'com.quicinc.vellamo'
+ run_timeout = 15 * 60
+ benchmark_types = {
+ '2.0.3': ['html5', 'metal'],
+ '3.0': ['Browser', 'Metal', 'Multi'],
+ }
+ valid_versions = benchmark_types.keys()
+ summary_metrics = None
+
+ parameters = [
+ Parameter('version', kind=str, allowed_values=valid_versions, default=sorted(benchmark_types, reverse=True)[0],
+ description=('Specify the version of Vellamo to be run. '
+ 'If not specified, the latest available version will be used.')),
+ Parameter('benchmarks', kind=list_of_strs, allowed_values=benchmark_types['3.0'], default=benchmark_types['3.0'],
+ description=('Specify which benchmark sections of Vellamo to be run. Only valid on version 3.0 and newer.'
+ '\nNOTE: Browser benchmark can be problematic and seem to hang,'
+ 'just wait and it will progress after ~5 minutes')),
+ Parameter('browser', kind=int, default=1,
+ description=('Specify which of the installed browsers will be used for the tests. The number refers to '
+ 'the order in which browsers are listed by Vellamo. E.g. ``1`` will select the first browser '
+ 'listed, ``2`` -- the second, etc. Only valid for version ``3.0``.'))
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(Vellamo, self).__init__(device, **kwargs)
+ if self.version == '2.0.3':
+ self.activity = 'com.quicinc.vellamo.VellamoActivity'
+ if self.version == '3.0':
+ self.activity = 'com.quicinc.vellamo.main.MainActivity'
+ self.summary_metrics = self.benchmark_types[self.version]
+
+ def setup(self, context):
+ self.uiauto_params['version'] = self.version
+ self.uiauto_params['browserToUse'] = self.browser
+ self.uiauto_params['metal'] = 'Metal' in self.benchmarks
+ self.uiauto_params['browser'] = 'Browser' in self.benchmarks
+ self.uiauto_params['multicore'] = 'Multi' in self.benchmarks
+ super(Vellamo, self).setup(context)
+
+ def validate(self):
+ super(Vellamo, self).validate()
+ if self.version == '2.0.3' or not self.benchmarks or self.benchmarks == []: # pylint: disable=access-member-before-definition
+ self.benchmarks = self.benchmark_types[self.version] # pylint: disable=attribute-defined-outside-init
+ else:
+ for benchmark in self.benchmarks:
+ if benchmark not in self.benchmark_types[self.version]:
+ raise WorkloadError('Version {} does not support {} benchmarks'.format(self.version, benchmark))
+
+ def update_result(self, context):
+ super(Vellamo, self).update_result(context)
+
+ # Get total scores from logcat
+ self.non_root_update_result(context)
+
+ if not self.device.is_rooted:
+ return
+
+ for test in self.benchmarks: # Get all scores from HTML files
+ filename = None
+ if test == "Browser":
+ result_folder = self.device.path.join(self.device.package_data_directory, self.package, 'files')
+ for result_file in self.device.listdir(result_folder, as_root=True):
+ if result_file.startswith("Browser"):
+ filename = result_file
+ else:
+ filename = '{}_results.html'.format(test)
+
+ device_file = self.device.path.join(self.device.package_data_directory, self.package, 'files', filename)
+ host_file = os.path.join(context.output_directory, filename)
+ self.device.pull_file(device_file, host_file, as_root=True)
+ with open(host_file) as fh:
+ parser = VellamoResultParser()
+ parser.feed(fh.read())
+ for benchmark in parser.benchmarks:
+ benchmark.name = benchmark.name.replace(' ', '_')
+ context.result.add_metric('{}_Total'.format(benchmark.name), benchmark.score)
+ for name, score in benchmark.metrics.items():
+ name = name.replace(' ', '_')
+ context.result.add_metric('{}_{}'.format(benchmark.name, name), score)
+ context.add_iteration_artifact('vellamo_output', kind='raw', path=filename)
+
+ def non_root_update_result(self, context):
+ failed = []
+ with open(self.logcat_log) as logcat:
+ metrics = OrderedDict()
+ for line in logcat:
+ if 'VELLAMO RESULT:' in line:
+ info = line.split(':')
+ parts = info[2].split(" ")
+ metric = parts[1].strip()
+ value = int(parts[2].strip())
+ metrics[metric] = value
+ if 'VELLAMO ERROR:' in line:
+ self.logger.warning("Browser crashed during benchmark, results may not be accurate")
+ for key, value in metrics.iteritems():
+ key = key.replace(' ', '_')
+ context.result.add_metric(key, value)
+ if value == 0:
+ failed.append(key)
+ if failed:
+ raise WorkloadError("The following benchmark groups failed: {}".format(", ".join(failed)))
+
+
+class VellamoResult(object):
+
+ def __init__(self, name):
+ self.name = name
+ self.score = None
+ self.metrics = {}
+
+ def add_metric(self, data):
+ split_data = data.split(":")
+ name = split_data[0].strip()
+ score = split_data[1].strip()
+
+ if name in self.metrics:
+ raise KeyError("A metric of that name is already present")
+ self.metrics[name] = float(score)
+
+
+class VellamoResultParser(HTMLParser):
+
+ class StopParsingException(Exception):
+ pass
+
+ def __init__(self):
+ HTMLParser.__init__(self)
+ self.inside_div = False
+ self.inside_span = 0
+ self.inside_li = False
+ self.got_data = False
+ self.failed = False
+ self.benchmarks = []
+
+ def feed(self, text):
+ try:
+ HTMLParser.feed(self, text)
+ except self.StopParsingException:
+ pass
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'div':
+ self.inside_div = True
+ if tag == 'span':
+ self.inside_span += 1
+ if tag == 'li':
+ self.inside_li = True
+
+ def handle_endtag(self, tag):
+ if tag == 'div':
+ self.inside_div = False
+ self.inside_span = 0
+ self.got_data = False
+ self.failed = False
+ if tag == 'li':
+ self.inside_li = False
+
+ def handle_data(self, data):
+ if self.inside_div and not self.failed:
+ if "Problem" in data:
+ self.failed = True
+ elif self.inside_span == 1:
+ self.benchmarks.append(VellamoResult(data))
+ elif self.inside_span == 3 and not self.got_data:
+ self.benchmarks[-1].score = int(data)
+ self.got_data = True
+ elif self.inside_li and self.got_data:
+ if 'failed' not in data:
+ self.benchmarks[-1].add_metric(data)
+ else:
+ self.failed = True
+
diff --git a/wlauto/workloads/vellamo/com.arm.wlauto.uiauto.vellamo.jar b/wlauto/workloads/vellamo/com.arm.wlauto.uiauto.vellamo.jar
new file mode 100644
index 00000000..9e71e7eb
--- /dev/null
+++ b/wlauto/workloads/vellamo/com.arm.wlauto.uiauto.vellamo.jar
Binary files differ
diff --git a/wlauto/workloads/vellamo/uiauto/build.sh b/wlauto/workloads/vellamo/uiauto/build.sh
new file mode 100755
index 00000000..4c3ad807
--- /dev/null
+++ b/wlauto/workloads/vellamo/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.vellamo.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.vellamo.jar ..
+fi
diff --git a/wlauto/workloads/vellamo/uiauto/build.xml b/wlauto/workloads/vellamo/uiauto/build.xml
new file mode 100644
index 00000000..c137d62d
--- /dev/null
+++ b/wlauto/workloads/vellamo/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.vellamo" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/vellamo/uiauto/project.properties b/wlauto/workloads/vellamo/uiauto/project.properties
new file mode 100644
index 00000000..ce39f2d0
--- /dev/null
+++ b/wlauto/workloads/vellamo/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-18
diff --git a/wlauto/workloads/vellamo/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/vellamo/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..a49a18ee
--- /dev/null
+++ b/wlauto/workloads/vellamo/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,260 @@
+/* Copyright 2014-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.vellamo;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.KeyEvent;
+import java.util.concurrent.TimeUnit;
+import java.util.ArrayList;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.core.UiDevice;
+import com.android.uiautomator.core.UiWatcher;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "vellamo";
+ public static ArrayList<String> scores = new ArrayList();
+ public static Boolean wasError = false;
+
+ public void runUiAutomation() throws Exception {
+ Bundle parameters = getParams();
+ String version = parameters.getString("version");
+ Boolean browser = Boolean.parseBoolean(parameters.getString("browser"));
+ Boolean metal = Boolean.parseBoolean(parameters.getString("metal"));
+ Boolean multicore = Boolean.parseBoolean(parameters.getString("multicore"));
+ Integer browserToUse = Integer.parseInt(parameters.getString("browserToUse")) - 1;
+
+ dismissEULA();
+
+ if (version.equals("2.0.3")) {
+ dissmissWelcomebanner();
+ startTest();
+ dismissNetworkConnectionDialogIfNecessary();
+ dismissExplanationDialogIfNecessary();
+ waitForTestCompletion(15 * 60, "com.quicinc.vellamo:id/act_ba_results_btn_no");
+ getScore("html5", "com.quicinc.vellamo:id/act_ba_results_img_0");
+ getScore("metal", "com.quicinc.vellamo:id/act_ba_results_img_1");
+ }
+
+ else {
+ dismissLetsRoll();
+ if (browser) {
+ startBrowserTest(browserToUse);
+ proccessTest("Browser");
+ }
+ if (multicore) {
+ startTestV3(1);
+ proccessTest("Multicore");
+
+ }
+ if (metal) {
+ startTestV3(2);
+ proccessTest("Metal");
+ }
+ }
+ for(String result : scores){
+ Log.v(TAG, String.format("VELLAMO RESULT: %s", result));
+ }
+ if (wasError) Log.v("vellamoWatcher", "VELLAMO ERROR: Something crashed while running browser benchmark");
+ }
+
+ public void startTest() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject runButton = new UiObject(selector.textContains("Run All Chapters"));
+
+ if (!runButton.waitForExists(TimeUnit.SECONDS.toMillis(5))) {
+ UiObject pager = new UiObject(selector.className("android.support.v4.view.ViewPager"));
+ pager.swipeLeft(2);
+ if (!runButton.exists()) {
+ throw new UiObjectNotFoundException("Could not find \"Run All Chapters\" button.");
+ }
+ }
+ runButton.click();
+ }
+
+ public void startBrowserTest(int browserToUse) throws Exception {
+ //Ensure chrome is selected as "browser" fails to run the benchmark
+ UiSelector selector = new UiSelector();
+ UiObject browserToUseButton = new UiObject(selector.className("android.widget.ImageButton")
+ .longClickable(true).instance(browserToUse));
+ UiObject browserButton = new UiObject(selector.className("android.widget.ImageButton")
+ .longClickable(true).selected(true));
+ //Disable browsers
+ while(browserButton.exists()) browserButton.click();
+ if (browserToUseButton.waitForExists(TimeUnit.SECONDS.toMillis(5))) {
+ if (browserToUseButton.exists()) {
+ browserToUseButton.click();
+ }
+ }
+
+ //enable a watcher to dismiss browser dialogs
+ UiWatcher stoppedWorkingDialogWatcher = new UiWatcher() {
+ @Override
+ public boolean checkForCondition() {
+ UiObject stoppedWorkingDialog = new UiObject(new UiSelector().textStartsWith("Unfortunately"));
+ if(stoppedWorkingDialog.exists()){
+ wasError = true;
+ UiObject okButton = new UiObject(new UiSelector().className("android.widget.Button").text("OK"));
+ try {
+ okButton.click();
+ } catch (UiObjectNotFoundException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ return (stoppedWorkingDialog.waitUntilGone(25000));
+ }
+ return false;
+ }
+ };
+ // Register watcher
+ UiDevice.getInstance().registerWatcher("stoppedWorkingDialogWatcher", stoppedWorkingDialogWatcher);
+
+ // Run watcher
+ UiDevice.getInstance().runWatchers();
+
+ startTestV3(0);
+ }
+
+ public void startTestV3(int run) throws Exception {
+ UiSelector selector = new UiSelector();
+
+ UiObject thirdRunButton = new UiObject(selector.resourceId("com.quicinc.vellamo:id/card_launcher_run_button").instance(run));
+ if (!thirdRunButton.waitForExists(TimeUnit.SECONDS.toMillis(5))) {
+ if (!thirdRunButton.exists()) {
+ throw new UiObjectNotFoundException("Could not find three \"Run\" buttons.");
+ }
+ }
+
+ //Run benchmarks
+ UiObject runButton = new UiObject(selector.resourceId("com.quicinc.vellamo:id/card_launcher_run_button").instance(run));
+ if (!runButton.waitForExists(TimeUnit.SECONDS.toMillis(5))) {
+ if (!runButton.exists()) {
+ throw new UiObjectNotFoundException("Could not find correct \"Run\" button.");
+ }
+ }
+ runButton.click();
+
+ //Skip tutorial screens
+ UiObject swipeScreen = new UiObject(selector.textContains("Swipe left to continue"));
+ if (!swipeScreen.waitForExists(TimeUnit.SECONDS.toMillis(5))) {
+ if (!swipeScreen.exists()) {
+ throw new UiObjectNotFoundException("Could not find \"Swipe screen\".");
+ }
+ }
+ sleep(1);
+ swipeScreen.swipeLeft(2);
+ sleep(1);
+ swipeScreen.swipeLeft(2);
+
+ }
+
+ public void proccessTest(String metric) throws Exception{
+ waitForTestCompletion(15 * 60, "com.quicinc.vellamo:id/button_no");
+
+ //Remove watcher
+ UiDevice.getInstance().removeWatcher("stoppedWorkingDialogWatcher");
+
+ getScore(metric, "com.quicinc.vellamo:id/card_score_score");
+ getUiDevice().pressBack();
+ getUiDevice().pressBack();
+ getUiDevice().pressBack();
+ }
+
+ public void getScore(String metric, String resourceID) throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject score = new UiObject(selector.resourceId(resourceID));
+ if (!score.waitForExists(TimeUnit.SECONDS.toMillis(5))) {
+ if (!score.exists()) {
+ throw new UiObjectNotFoundException("Could not find score on screen.");
+ }
+ }
+ scores.add(metric + " " + score.getText().trim());
+ }
+
+ public void waitForTestCompletion(int timeout, String resourceID) throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject resultsNoButton = new UiObject(selector.resourceId(resourceID));
+ if (!resultsNoButton.waitForExists(TimeUnit.SECONDS.toMillis(timeout))) {
+ throw new UiObjectNotFoundException("Did not see results screen.");
+ }
+
+ }
+
+ public void dismissEULA() throws Exception {
+ UiSelector selector = new UiSelector();
+ waitText("Vellamo EULA");
+ UiObject acceptButton = new UiObject(selector.text("Accept")
+ .className("android.widget.Button"));
+ if (acceptButton.exists()) {
+ acceptButton.click();
+ }
+ }
+
+ public void dissmissWelcomebanner() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject welcomeBanner = new UiObject(selector.textContains("WELCOME"));
+ if (welcomeBanner.waitForExists(TimeUnit.SECONDS.toMillis(5))) {
+ UiObject pager = new UiObject(selector.className("android.support.v4.view.ViewPager"));
+ pager.swipeLeft(2);
+ pager.swipeLeft(2);
+ }
+ }
+
+ public void dismissLetsRoll() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject letsRollButton = new UiObject(selector.className("android.widget.Button")
+ .textContains("Let's Roll"));
+ if (!letsRollButton.waitForExists(TimeUnit.SECONDS.toMillis(5))) {
+ if (!letsRollButton.exists()) {
+ throw new UiObjectNotFoundException("Could not find \"Let's Roll\" button.");
+ }
+ }
+ letsRollButton.click();
+ }
+
+ public void dismissNetworkConnectionDialogIfNecessary() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject dialog = new UiObject(selector.className("android.widget.TextView")
+ .textContains("No Network Connection"));
+ if (dialog.exists()) {
+ UiObject yesButton = new UiObject(selector.className("android.widget.Button")
+ .text("Yes"));
+ yesButton.click();
+ }
+ }
+
+ public void dismissExplanationDialogIfNecessary() throws Exception {
+ UiSelector selector = new UiSelector();
+ UiObject dialog = new UiObject(selector.className("android.widget.TextView")
+ .textContains("Benchmarks Explanation"));
+ if (dialog.exists()) {
+ UiObject noButton = new UiObject(selector.className("android.widget.Button")
+ .text("No"));
+ noButton.click();
+ }
+ }
+}
diff --git a/wlauto/workloads/video/__init__.py b/wlauto/workloads/video/__init__.py
new file mode 100644
index 00000000..711a7b82
--- /dev/null
+++ b/wlauto/workloads/video/__init__.py
@@ -0,0 +1,137 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,E0203,W0201
+
+import os
+import time
+import urllib
+from collections import defaultdict
+
+from wlauto import Workload, settings, Parameter, Alias
+from wlauto.exceptions import ConfigError, WorkloadError
+from wlauto.utils.misc import ensure_directory_exists as _d
+from wlauto.utils.types import boolean
+
+DOWNLOAD_URLS = {
+ '1080p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_1080p_surround.avi',
+ '720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_surround.avi',
+ '480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_surround-fix.avi'
+}
+
+
+class VideoWorkload(Workload):
+ name = 'video'
+ description = """
+ Plays a video file using the standard android video player for a predetermined duration.
+
+ The video can be specified either using ``resolution`` workload parameter, in which case
+ `Big Buck Bunny`_ MP4 video of that resolution will be downloaded and used, or using
+ ``filename`` parameter, in which case the video file specified will be used.
+
+
+ .. _Big Buck Bunny: http://www.bigbuckbunny.org/
+
+ """
+
+ parameters = [
+ Parameter('play_duration', kind=int, default=20,
+ description='Playback duration of the video file. This become the duration of the workload.'),
+ Parameter('resolution', default='720p', allowed_values=['480p', '720p', '1080p'],
+ description='Specifies which resolution video file to play.'),
+ Parameter('filename',
+ description="""
+ The name of the video file to play. This can be either a path
+ to the file anywhere on your file system, or it could be just a
+ name, in which case, the workload will look for it in
+ ``~/.workloads_automation/dependency/video``
+ *Note*: either resolution or filename should be specified, but not both!
+ """),
+ Parameter('force_dependency_push', kind=boolean, default=False,
+ description="""
+ If true, video will always be pushed to device, regardless
+ of whether the file is already on the device. Default is ``False``.
+ """),
+ ]
+
+ aliases = [
+ Alias('video_720p', resolution='720p'),
+ Alias('video_1080p', resolution='1080p'),
+ ]
+
+ @property
+ def host_video_file(self):
+ if not self._selected_file:
+ if self.filename:
+ if self.filename[0] in './' or len(self.filename) > 1 and self.filename[1] == ':':
+ filepath = os.path.abspath(self.filename)
+ else:
+ filepath = os.path.join(self.video_directory, self.filename)
+ if not os.path.isfile(filepath):
+ raise WorkloadError('{} does not exist.'.format(filepath))
+ self._selected_file = filepath
+ else:
+ files = self.video_files[self.resolution]
+ if not files:
+ url = DOWNLOAD_URLS[self.resolution]
+ filepath = os.path.join(self.video_directory, os.path.basename(url))
+ self.logger.debug('Downloading {}...'.format(filepath))
+ urllib.urlretrieve(url, filepath)
+ self._selected_file = filepath
+ else:
+ self._selected_file = files[0]
+ if len(files) > 1:
+ self.logger.warn('Multiple files for 720p found. Using {}.'.format(self._selected_file))
+ self.logger.warn('Use \'filename\'parameter instead of \'resolution\' to specify a different file.')
+ return self._selected_file
+
+ def init_resources(self, context):
+ self.video_directory = _d(os.path.join(settings.dependencies_directory, 'video'))
+ self.video_files = defaultdict(list)
+ self.enum_video_files()
+ self._selected_file = None
+
+ def setup(self, context):
+ on_device_video_file = os.path.join(self.device.working_directory, os.path.basename(self.host_video_file))
+ if self.force_dependency_push or not self.device.file_exists(on_device_video_file):
+ self.logger.debug('Copying {} to device.'.format(self.host_video_file))
+ self.device.push_file(self.host_video_file, on_device_video_file, timeout=120)
+ self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank')
+ time.sleep(5)
+ self.device.execute('am force-stop com.android.browser')
+ time.sleep(5)
+ self.device.clear_logcat()
+ command = 'am start -W -S -n com.android.gallery3d/.app.MovieActivity -d {}'.format(on_device_video_file)
+ self.device.execute(command)
+
+ def run(self, context):
+ time.sleep(self.play_duration)
+
+ def update_result(self, context):
+ self.device.execute('am force-stop com.android.gallery3d')
+
+ def teardown(self, context):
+ pass
+
+ def validate(self):
+ if (self.resolution and self.filename) and (self.resolution != self.parameters['resolution'].default):
+ raise ConfigError('Ether resolution *or* filename must be specified; but not both.')
+
+ def enum_video_files(self):
+ for filename in os.listdir(self.video_directory):
+ for resolution in self.parameters['resolution'].allowed_values:
+ if resolution in filename:
+ self.video_files[resolution].append(os.path.join(self.video_directory, filename))
+
diff --git a/wlauto/workloads/videostreaming/__init__.py b/wlauto/workloads/videostreaming/__init__.py
new file mode 100644
index 00000000..1374e67b
--- /dev/null
+++ b/wlauto/workloads/videostreaming/__init__.py
@@ -0,0 +1,73 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=E1101,E0203,W0201
+import os
+
+from wlauto import AndroidUiAutoBenchmark, Parameter
+import wlauto.common.android.resources
+
+
+class Videostreaming(AndroidUiAutoBenchmark):
+ name = 'videostreaming'
+ description = """
+ Uses the FREEdi video player to search, stream and play the specified
+ video content from YouTube.
+
+ """
+ name = 'videostreaming'
+ package = 'tw.com.freedi.youtube.player'
+ activity = '.MainActivity'
+
+ parameters = [
+ Parameter('video_name', kind=str,
+ description='Name of the video to be played.'),
+ Parameter('resolution', kind=str, default='320p', allowed_values=['320p', '720p', '1080p'],
+ description='Resolution of the video to be played. If video_name is set'
+ 'this setting will be ignored'),
+ Parameter('sampling_interval', kind=int, default=20,
+ description="""
+ Time interval, in seconds, after which the status of the video playback to
+ be monitoreThe elapsed time of the video playback is
+ monitored after after every ``sampling_interval`` seconds and
+ compared against the actual time elapsed and the previous
+ sampling point. If the video elapsed time is less that
+ (sampling time - ``tolerance``) , then the playback is aborted as
+ the video has not been playing continuously.
+ """),
+ Parameter('tolerance', kind=int, default=3,
+ description="""
+ Specifies the amount, in seconds, by which sampling time is
+ allowed to deviate from elapsed video playback time. If the delta
+ is greater than this value (which could happen due to poor network
+ connection), workload result will be invalidated.
+ """),
+ Parameter('run_timeout', kind=int, default=200,
+ description='The duration in second for which to play the video'),
+ ]
+
+ def init_resources(self, context):
+ self.uiauto_params['tolerance'] = self.tolerance
+ self.uiauto_params['sampling_interval'] = self.sampling_interval
+ if self.video_name and self.video_name != "":
+ self.uiauto_params['video_name'] = self.video_name.replace(" ", "0space0") # hack to get around uiautomator limitation
+ else:
+ self.uiauto_params['video_name'] = "abkk sathe {}".format(self.resolution).replace(" ", "0space0")
+ self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self))
+ self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self))
+ self.device_uiauto_file = self.device.path.join(self.device.working_directory,
+ os.path.basename(self.uiauto_file))
+ if not self.uiauto_package:
+ self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
diff --git a/wlauto/workloads/videostreaming/com.arm.wlauto.uiauto.videostreaming.jar b/wlauto/workloads/videostreaming/com.arm.wlauto.uiauto.videostreaming.jar
new file mode 100644
index 00000000..beb6790b
--- /dev/null
+++ b/wlauto/workloads/videostreaming/com.arm.wlauto.uiauto.videostreaming.jar
Binary files differ
diff --git a/wlauto/workloads/videostreaming/uiauto/build.sh b/wlauto/workloads/videostreaming/uiauto/build.sh
new file mode 100755
index 00000000..07e2131e
--- /dev/null
+++ b/wlauto/workloads/videostreaming/uiauto/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+class_dir=bin/classes/com/arm/wlauto/uiauto
+base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
+mkdir -p $class_dir
+cp $base_class $class_dir
+
+ant build
+
+if [[ -f bin/com.arm.wlauto.uiauto.videostreaming.jar ]]; then
+ cp bin/com.arm.wlauto.uiauto.videostreaming.jar ..
+fi
diff --git a/wlauto/workloads/videostreaming/uiauto/build.xml b/wlauto/workloads/videostreaming/uiauto/build.xml
new file mode 100644
index 00000000..e897fec2
--- /dev/null
+++ b/wlauto/workloads/videostreaming/uiauto/build.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="com.arm.wlauto.uiauto.videostreaming" default="help">
+
+ <!-- The local.properties file is created and updated by the 'android' tool.
+ It contains the path to the SDK. It should *NOT* be checked into
+ Version Control Systems. -->
+ <property file="local.properties" />
+
+ <!-- The ant.properties file can be created by you. It is only edited by the
+ 'android' tool to add properties to it.
+ This is the place to change some Ant specific build properties.
+ Here are some properties you may want to change/update:
+
+ source.dir
+ The name of the source directory. Default is 'src'.
+ out.dir
+ The name of the output directory. Default is 'bin'.
+
+ For other overridable properties, look at the beginning of the rules
+ files in the SDK, at tools/ant/build.xml
+
+ Properties related to the SDK location or the project target should
+ be updated using the 'android' tool with the 'update' action.
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems.
+
+ -->
+ <property file="ant.properties" />
+
+ <!-- if sdk.dir was not set from one of the property file, then
+ get it from the ANDROID_HOME env var.
+ This must be done before we load project.properties since
+ the proguard config can use sdk.dir -->
+ <property environment="env" />
+ <condition property="sdk.dir" value="${env.ANDROID_HOME}">
+ <isset property="env.ANDROID_HOME" />
+ </condition>
+
+ <!-- The project.properties file is created and updated by the 'android'
+ tool, as well as ADT.
+
+ This contains project specific properties such as project target, and library
+ dependencies. Lower level build properties are stored in ant.properties
+ (or in .classpath for Eclipse projects).
+
+ This file is an integral part of the build system for your
+ application and should be checked into Version Control Systems. -->
+ <loadproperties srcFile="project.properties" />
+
+ <!-- quick check on sdk.dir -->
+ <fail
+ message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
+ unless="sdk.dir"
+ />
+
+ <!--
+ Import per project custom build rules if present at the root of the project.
+ This is the place to put custom intermediary targets such as:
+ -pre-build
+ -pre-compile
+ -post-compile (This is typically used for code obfuscation.
+ Compiled code location: ${out.classes.absolute.dir}
+ If this is not done in place, override ${out.dex.input.absolute.dir})
+ -post-package
+ -post-build
+ -pre-clean
+ -->
+ <import file="custom_rules.xml" optional="true" />
+
+ <!-- Import the actual build file.
+
+ To customize existing targets, there are two options:
+ - Customize only one target:
+ - copy/paste the target into this file, *before* the
+ <import> task.
+ - customize it to your needs.
+ - Customize the whole content of build.xml
+ - copy/paste the content of the rules files (minus the top node)
+ into this file, replacing the <import> task.
+ - customize to your needs.
+
+ ***********************
+ ****** IMPORTANT ******
+ ***********************
+ In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
+ in order to avoid having your file be overridden by tools such as "android update project"
+ -->
+ <!-- version-tag: VERSION_TAG -->
+ <import file="${sdk.dir}/tools/ant/uibuild.xml" />
+
+</project>
diff --git a/wlauto/workloads/videostreaming/uiauto/project.properties b/wlauto/workloads/videostreaming/uiauto/project.properties
new file mode 100644
index 00000000..ce39f2d0
--- /dev/null
+++ b/wlauto/workloads/videostreaming/uiauto/project.properties
@@ -0,0 +1,14 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system edit
+# "ant.properties", and override values to adapt the script to your
+# project structure.
+#
+# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
+#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
+
+# Project target.
+target=android-18
diff --git a/wlauto/workloads/videostreaming/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java b/wlauto/workloads/videostreaming/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
new file mode 100644
index 00000000..e532b499
--- /dev/null
+++ b/wlauto/workloads/videostreaming/uiauto/src/com/arm/wlauto/uiauto/UiAutomation.java
@@ -0,0 +1,155 @@
+/* Copyright 2013-2015 ARM Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+
+package com.arm.wlauto.uiauto.videostreaming;
+
+import android.app.Activity;
+import java.util.Date;
+import android.os.Bundle;
+import java.util.concurrent.TimeUnit;
+
+// Import the uiautomator libraries
+import com.android.uiautomator.core.UiObject;
+import com.android.uiautomator.core.UiObjectNotFoundException;
+import com.android.uiautomator.core.UiScrollable;
+import com.android.uiautomator.core.UiSelector;
+import com.android.uiautomator.testrunner.UiAutomatorTestCase;
+
+import com.arm.wlauto.uiauto.BaseUiAutomation;
+
+public class UiAutomation extends BaseUiAutomation {
+
+ public static String TAG = "videostreaming";
+
+ /*function to convert time in string to sec*/
+ public int computeTimeInSec(String time) {
+ final int seconds = 60;
+ if (!time.contains(":"))
+ return -1;
+
+ int totalTime = 0, mulfactor = 1;
+ String [] strArr = time.split(":");
+
+ for (int j = strArr.length - 1; j >= 0; j--) {
+ totalTime += Integer.parseInt(strArr[j]) * (mulfactor);
+ mulfactor = mulfactor * seconds;
+ }
+ return totalTime;
+ }
+
+ public void runUiAutomation() throws Exception {
+ final int timeout = 5;
+ int currentTime = 0, timeAfter20Sec = 0, videoTime = 0;
+ long timeBeforeGetText = 0, timeAfterGetText = 0, timeForGetText = 0;
+ Bundle status = new Bundle();
+
+ Bundle parameters = getParams();
+ if (parameters.size() <= 0)
+ return;
+
+ int tolerance = Integer.parseInt(parameters.getString("tolerance"));
+ int samplingInterval = Integer.parseInt(parameters
+ .getString("sampling_interval"));
+ String videoName = parameters.getString("video_name").replace("0space0", " "); //Hack to get around uiautomator limitation
+
+ UiObject search = new UiObject(new UiSelector()
+ .className("android.widget.ImageButton").index(0));
+ if (search.exists()) {
+ search.clickAndWaitForNewWindow(timeout);
+ }
+
+ UiObject clickVideoTab = new UiObject(new UiSelector()
+ .className("android.widget.Button").text("Video"));
+ clickVideoTab.click();
+
+ UiObject enterKeyword = new UiObject(new UiSelector()
+ .className("android.widget.EditText")
+ .text("Please input the keywords"));
+ enterKeyword.clearTextField();
+ enterKeyword.setText(videoName);
+
+ UiSelector selector = new UiSelector();
+ UiObject clickSearch = new UiObject(selector.resourceId("tw.com.freedi.youtube.player:id/startSearchBtn"));
+ clickSearch.clickAndWaitForNewWindow(timeout);
+
+ UiObject clickVideo = new UiObject(new UiSelector().className("android.widget.TextView").textContains(videoName));
+ if (!clickVideo.waitForExists(TimeUnit.SECONDS.toMillis(10))) {
+ if (!clickVideo.exists()) {
+ throw new UiObjectNotFoundException("Could not find video.");
+ }
+ }
+
+ clickVideo.clickAndWaitForNewWindow(timeout);
+
+ UiObject totalVideoTime = new UiObject(new UiSelector()
+ .className("android.widget.TextView").index(2));
+
+ UiObject rewind = new UiObject(new UiSelector()
+ .className("android.widget.RelativeLayout")
+ .index(0).childSelector(new UiSelector()
+ .className("android.widget.LinearLayout")
+ .index(1).childSelector(new UiSelector()
+ .className("android.widget.LinearLayout")
+ .index(1).childSelector(new UiSelector()
+ .className("android.widget.ImageButton")
+ .enabled(true).index(2)))));
+ rewind.click();
+
+ videoTime = computeTimeInSec(totalVideoTime.getText());
+
+ /**
+ * Measure the video elapsed time between sampling intervals and
+ * compare it against the actual time elapsed minus tolerance.If the
+ * video elapsed time is less than the (actual time elapsed -
+ * tolerance), raise the message.
+ */
+ if (videoTime > samplingInterval) {
+ for (int i = 0; i < (videoTime / samplingInterval); i++) {
+ UiObject videoCurrentTime = new UiObject(new UiSelector()
+ .className("android.widget.TextView").index(0));
+
+ sleep(samplingInterval);
+
+ // Handle the time taken by the getText function
+ timeBeforeGetText = new Date().getTime() / 1000;
+ timeAfter20Sec = computeTimeInSec(videoCurrentTime.getText());
+ timeAfterGetText = new Date().getTime() / 1000;
+ timeForGetText = timeAfterGetText - timeBeforeGetText;
+
+ if (timeAfter20Sec == -1) {
+ getUiDevice().pressHome();
+ return;
+ }
+
+ if ((timeAfter20Sec - (currentTime + timeForGetText)) <
+ (samplingInterval - tolerance)) {
+ getUiDevice().pressHome();
+
+ getAutomationSupport().sendStatus(Activity.RESULT_CANCELED,
+ status);
+ return;
+ }
+ currentTime = timeAfter20Sec;
+
+ }
+ } else {
+ sleep(videoTime);
+ }
+ getUiDevice().pressBack();
+ getUiDevice().pressHome();
+ getAutomationSupport().sendStatus(Activity.RESULT_OK, status);
+ }
+}