aboutsummaryrefslogtreecommitdiff
path: root/automated
diff options
context:
space:
mode:
authorArthur She <arthur.she@linaro.org>2020-08-04 11:43:43 -0700
committerArthur She <arthur.she@linaro.org>2020-08-04 11:43:43 -0700
commit37ddbe9d0db1ec8e57bf2107382dd73e9277c5dd (patch)
tree4fc2440b3954fd785ea0af375f91ab354a174154 /automated
parent04ccc87abd9d92a6f3adb77497623702acbc254f (diff)
parent55c8097e9a0c0b93ae70b0ea21da9cd6bbf8cbec (diff)
Merge remote-tracking branch 'arthur-github/igt-check-chamelium-ip'
Diffstat (limited to 'automated')
-rw-r--r--automated/android/adb-join-wifi/adb-join-wifi.yaml2
-rwxr-xr-xautomated/android/apk-automation/andebenchpro2015.py12
-rwxr-xr-xautomated/android/apk-automation/antutu6.py23
-rwxr-xr-xautomated/android/apk-automation/apk-automation.sh12
-rw-r--r--automated/android/apk-automation/apk-automation.yaml6
-rwxr-xr-xautomated/android/apk-automation/benchmarkpi.py3
-rwxr-xr-xautomated/android/apk-automation/caffeinemark.py3
-rwxr-xr-xautomated/android/apk-automation/cf-bench.py3
-rwxr-xr-xautomated/android/apk-automation/common/__init__.py12
-rwxr-xr-xautomated/android/apk-automation/glbenchmark25.py12
-rwxr-xr-xautomated/android/apk-automation/jbench.py22
-rwxr-xr-xautomated/android/apk-automation/linpack.py3
-rwxr-xr-xautomated/android/apk-automation/main.py2
-rwxr-xr-xautomated/android/apk-automation/quadrantpro.py10
-rwxr-xr-xautomated/android/apk-automation/scimark.py19
-rwxr-xr-xautomated/android/apk-automation/vellamo3.py8
-rw-r--r--automated/android/bionic-benchmarks/bionic-benchmarks.yaml2
-rw-r--r--automated/android/bionic-libc-tests/bionic-libc-tests.yaml2
-rw-r--r--automated/android/bootchart/bootchart.yaml2
-rw-r--r--automated/android/bootstat/bootstat_v1.yaml2
-rwxr-xr-xautomated/android/boottime/boottime.sh12
-rw-r--r--automated/android/boottime/boottime.yaml6
-rwxr-xr-xautomated/android/boottime/device-script-master.sh695
-rw-r--r--automated/android/dd-wr-speed/dd-wr-speed.yaml2
-rw-r--r--automated/android/ion-monitor-tool/ion-monitor-tool.yaml2
-rw-r--r--automated/android/libc-bench/libc-bench.yaml2
-rw-r--r--automated/android/linaro-android-kernel-tests/linaro-android-kernel-tests.yaml2
-rw-r--r--automated/android/media-codecs-functional-tests/media-codecs-functional-tests.yaml2
-rw-r--r--automated/android/meminfo/meminfo.yaml2
-rw-r--r--automated/android/microbenchmarks/manifest.xml3
-rwxr-xr-xautomated/android/microbenchmarks/microbenchmarks.sh7
-rw-r--r--automated/android/microbenchmarks/microbenchmarks.yaml2
-rwxr-xr-xautomated/android/monkey/monkey.yaml2
-rw-r--r--automated/android/multinode/connect-to-remote-adb-tcpip-devices.yaml2
-rw-r--r--automated/android/multinode/release-remote-adb-tcpip-devices.yaml2
-rw-r--r--automated/android/multinode/remote-adb-devices-smoke-test.yaml2
-rw-r--r--automated/android/multinode/share-local-device-over-adb-tcpip.yaml2
-rw-r--r--automated/android/multinode/tradefed/tradefed-multinode.yaml2
-rw-r--r--automated/android/multinode/wait-and-keep-local-device-accessible.yaml2
-rw-r--r--automated/android/multinode/wait-for-release-and-reset.yaml2
-rwxr-xr-xautomated/android/noninteractive-tradefed/tradefed-runner.py7
-rw-r--r--automated/android/noninteractive-tradefed/tradefed.yaml4
-rwxr-xr-xautomated/android/optee/optee-gtest-gatekeeper-keymaster.yaml2
-rwxr-xr-xautomated/android/optee/optee-gtest-kmgk.yaml2
-rwxr-xr-xautomated/android/optee/optee-xtest.yaml2
-rw-r--r--automated/android/piglit-gles2/piglit-gles2.yaml2
-rw-r--r--automated/android/piglit-gles3/piglit-gles3.yaml2
-rw-r--r--automated/android/piglit-glslparser/piglit-glslparser.yaml2
-rw-r--r--automated/android/piglit-shader-runner/piglit-shader-runner.yaml2
-rw-r--r--automated/android/pm-qa/pm-qa.yaml2
-rw-r--r--automated/android/stringbench/stringbench.yaml2
-rw-r--r--automated/android/tradefed/result_parser.py7
-rw-r--r--automated/android/tradefed/tradefed.yaml2
-rw-r--r--automated/android/wait-single-boot-completed.yaml2
-rw-r--r--automated/android/wait-single-network-connected.yaml2
-rw-r--r--automated/android/workload-automation/workload-automation.yaml2
-rw-r--r--automated/android/workload-automation3/workload-automation.yaml2
-rwxr-xr-xautomated/lib/sh-test-lib89
-rwxr-xr-xautomated/linux/24h-stress-test/24h-stress-test.sh2
-rw-r--r--automated/linux/24h-stress-test/24h-stress-test.yaml2
-rwxr-xr-xautomated/linux/android-platform-tools/install.sh20
-rw-r--r--automated/linux/android-platform-tools/install.yaml2
-rw-r--r--automated/linux/armnn/armnn-unit-tests.sh43
-rw-r--r--automated/linux/armnn/armnn-unit-tests.yaml25
-rw-r--r--automated/linux/badblocks/badblocks.yaml2
-rw-r--r--automated/linux/blogbench/blogbench.yaml2
-rw-r--r--automated/linux/chroot/kselftest_chroot.yaml2
-rw-r--r--automated/linux/chroot/ltp_chroot.yaml2
-rw-r--r--automated/linux/cyclicdeadline/cyclicdeadline.yaml2
-rw-r--r--automated/linux/cyclictest/cyclictest.yaml2
-rw-r--r--automated/linux/dd-wr-speed/dd-wr-speed.yaml2
-rw-r--r--automated/linux/device-tree/device-tree.yaml2
-rw-r--r--automated/linux/disk-partitioning/disk-partitioning.yaml2
-rw-r--r--automated/linux/docker-integration-test/local-daemon.yaml2
-rw-r--r--automated/linux/docker/docker.yaml2
-rw-r--r--automated/linux/dockerized-tests/local-run.yaml2
-rw-r--r--automated/linux/dockerized-tests/over-ssh.yaml2
-rw-r--r--automated/linux/dsdbench/dsdbench.yaml2
-rw-r--r--automated/linux/ethernet/ethernet.yaml2
-rwxr-xr-xautomated/linux/fdisk/fdisk.sh64
-rw-r--r--automated/linux/fdisk/fdisk.yaml38
-rw-r--r--automated/linux/fio-test/fio-test.yaml2
-rw-r--r--automated/linux/gpiod/gpiod.yaml2
-rw-r--r--automated/linux/httperf-client/httperf-client.yaml2
-rw-r--r--automated/linux/igt/igt-test.yaml5
-rw-r--r--automated/linux/iozone/iozone.yaml2
-rw-r--r--automated/linux/iperf/iperf-client.yaml2
-rw-r--r--automated/linux/iperf/iperf-server.yaml2
-rw-r--r--automated/linux/isolate-task/isolate-task.yaml2
-rw-r--r--automated/linux/kernel-compilation/kernel-compilation.yaml2
-rw-r--r--automated/linux/kernel-config-checker/kernel-config-checker.yaml2
-rwxr-xr-xautomated/linux/kselftest/kselftest.sh6
-rw-r--r--automated/linux/kselftest/kselftest.yaml2
-rw-r--r--automated/linux/kselftest/skipfile-lkft.yaml34
-rwxr-xr-xautomated/linux/kvm-unit-tests/kvm-unit-tests.sh2
-rw-r--r--automated/linux/kvm-unit-tests/kvm-unit-tests.yaml2
-rw-r--r--automated/linux/kvm/start-kvm.yaml2
-rw-r--r--automated/linux/kvm/stop-guest.yaml2
-rw-r--r--automated/linux/lapack/lapack.yaml2
-rw-r--r--automated/linux/libhugetlbfs/libhugetlbfs.yaml2
-rw-r--r--automated/linux/lshw/lshw.yaml2
-rw-r--r--automated/linux/ltp-open-posix/ltp-open-posix.yaml2
-rw-r--r--automated/linux/ltp-realtime/ltp-realtime.yaml2
-rw-r--r--automated/linux/ltp/ltp.yaml2
-rw-r--r--automated/linux/ltp/skipfile-lkft.yaml25
-rw-r--r--automated/linux/meminfo/meminfo.yaml2
-rw-r--r--automated/linux/nginx-server/nginx-linux.yaml2
-rw-r--r--automated/linux/nginx-server/nginx-odp-dpdk-git.yaml2
-rw-r--r--automated/linux/nginx-server/nginx-odp-dpdk.yaml2
-rw-r--r--automated/linux/openssh/openssh-debian.yaml2
-rw-r--r--automated/linux/optee/optee-xtest-qemu.yaml2
-rw-r--r--automated/linux/optee/optee-xtest.yaml2
-rw-r--r--automated/linux/ota-update/ota-update.yaml2
-rw-r--r--automated/linux/overlayfs/overlayfs.yaml2
-rw-r--r--automated/linux/perf/perf.yaml2
-rw-r--r--automated/linux/pm-qa/pm-qa.yaml2
-rw-r--r--automated/linux/pmqtest/pmqtest.yaml2
-rw-r--r--automated/linux/pointer-tagging/pointer-tagging-tests.yaml2
-rw-r--r--automated/linux/pritee_test_utility/pritee_test_utility.yaml2
-rw-r--r--automated/linux/ptsematest/ptsematest.yaml2
-rw-r--r--automated/linux/rcutorture/rcutorture.yaml2
-rw-r--r--automated/linux/rt-migrate-test/rt-migrate-test.yaml2
-rw-r--r--automated/linux/signaltest/signaltest.yaml2
-rw-r--r--automated/linux/sigwaittest/sigwaittest.yaml2
-rw-r--r--automated/linux/spectre-meltdown-checker-test/spectre-meltdown-checker-test.yaml2
-rwxr-xr-xautomated/linux/ssuite/run-bench.sh11
-rw-r--r--automated/linux/ssuite/ssuite-bench.yaml5
-rw-r--r--automated/linux/svsematest/svsematest.yaml2
-rw-r--r--automated/linux/sysbench/sysbench.yaml2
-rw-r--r--automated/linux/sysfs-bus-iio-smoke/sysfs-bus-iio-smoke.yaml2
-rw-r--r--automated/linux/toolchain-smoke/toolchain-smoke.yaml2
-rw-r--r--automated/linux/ui-browser-test/ui-browser-test.yaml2
-rw-r--r--automated/linux/unixbench/unixbench.yaml2
-rw-r--r--automated/linux/v4l2/v4l2-compliance.yaml2
-rw-r--r--automated/linux/widevine/widevine_unittest.yaml5
-rw-r--r--automated/linux/workload-automation/workload-automation.yaml2
-rw-r--r--automated/linux/workload-automation3/workload-automation.yaml2
-rw-r--r--automated/utils/requirements.txt1
-rwxr-xr-xautomated/utils/test-runner.py163
139 files changed, 1451 insertions, 176 deletions
diff --git a/automated/android/adb-join-wifi/adb-join-wifi.yaml b/automated/android/adb-join-wifi/adb-join-wifi.yaml
index 85df455..2bde260 100644
--- a/automated/android/adb-join-wifi/adb-join-wifi.yaml
+++ b/automated/android/adb-join-wifi/adb-join-wifi.yaml
@@ -1,6 +1,6 @@
metadata:
name: adb-join-wifi
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run tradefed based tests in LAVA."
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/android/apk-automation/andebenchpro2015.py b/automated/android/apk-automation/andebenchpro2015.py
index 909f279..43b7210 100755
--- a/automated/android/apk-automation/andebenchpro2015.py
+++ b/automated/android/apk-automation/andebenchpro2015.py
@@ -81,6 +81,18 @@ class ApkRunnerImpl(ApkTestRunner):
time.sleep(10)
self.dump_always()
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
+ if continue_btn:
+ continue_btn.touch()
+
+ self.dump_always()
+ warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ if warn_msg:
+ self.logger.info("Older version warning popped up")
+ warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
+ warning_ok_btn.touch()
+
+ self.dump_always()
btn_license = self.vc.findViewWithText(u'I Agree')
if btn_license:
btn_license.touch()
diff --git a/automated/android/apk-automation/antutu6.py b/automated/android/apk-automation/antutu6.py
index 381bc4a..2ff4f53 100755
--- a/automated/android/apk-automation/antutu6.py
+++ b/automated/android/apk-automation/antutu6.py
@@ -123,19 +123,22 @@ class ApkRunnerImpl(ApkTestRunner):
btn_ok.touch()
# cancel the update
- update_msg = "New update available"
- update_window = self.vc.findViewWithText(update_msg)
+ update_window = self.vc.findViewWithText("New update available")
+ need_permission_msg = self.vc.findViewWithText("Please allow the permissions we need for test")
+ allow_permission_btn = self.vc.findViewWithText('ALLOW')
+ warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
if update_window:
btn_cancel = self.vc.findViewWithTextOrRaise(u'Cancel')
btn_cancel.touch()
-
- msg = "Please allow the permissions we need for test"
- need_permission_msg = self.vc.findViewWithText(msg)
- if need_permission_msg:
+ elif need_permission_msg:
btn_ok = self.vc.findViewWithTextOrRaise(u'OK')
btn_ok.touch()
-
- allow_permission_btn = self.vc.findViewById('com.android.packageinstaller'
- ':id/permission_allow_button')
- if allow_permission_btn:
+ elif allow_permission_btn:
allow_permission_btn.touch()
+ elif warn_msg:
+ self.logger.info("Older version warning popped up")
+ warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
+ warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
diff --git a/automated/android/apk-automation/apk-automation.sh b/automated/android/apk-automation/apk-automation.sh
index 937b956..f5bda12 100755
--- a/automated/android/apk-automation/apk-automation.sh
+++ b/automated/android/apk-automation/apk-automation.sh
@@ -5,6 +5,7 @@
. ./../../lib/android-test-lib
SKIP_INSTALL="true"
+SET_GOVERNOR_POLICY=true
ANDROID_SERIAL=""
BOOT_TIMEOUT="300"
LOOPS="1"
@@ -13,11 +14,11 @@ APK_DIR="./apks"
BASE_URL="http://testdata.validation.linaro.org/apks/"
usage() {
- echo "Usage: $0 [-S <true|false>] [-s <serialno>] [-t <timeout>] [-l <loops>] [-n <test_name>] [-d <apk_dir>] ['-u <base_url>']" 1>&2
+ echo "Usage: $0 [-S <true|false>] [-s <serialno>] [-t <timeout>] [-l <loops>] [-n <test_name>] [-d <apk_dir>] ['-u <base_url>'] [ -g <true|false>]" 1>&2
exit 1
}
-while getopts ":S:s:t:l:n:d:u:" opt; do
+while getopts ":S:s:t:l:n:d:u:g:" opt; do
case "${opt}" in
S) SKIP_INSTALL="${OPTARG}" ;;
s) ANDROID_SERIAL="${OPTARG}" ;;
@@ -26,6 +27,7 @@ while getopts ":S:s:t:l:n:d:u:" opt; do
n) TEST_NAME="${OPTARG}" ;;
d) APK_DIR="${OPTARG}" ;;
u) BASE_URL="${OPTARG}" ;;
+ g) SET_GOVERNOR_POLICY="${OPTARG}" ;;
*) usage ;;
esac
done
@@ -53,4 +55,8 @@ wait_boot_completed "${BOOT_TIMEOUT}"
disable_suspend
info_msg "device-${ANDROID_SERIAL}: About to run ${TEST_NAME}..."
-python main.py -l "${LOOPS}" -n "${TEST_NAME}" -d "${APK_DIR}" -u "${BASE_URL}"
+option_g="-g"
+if [ -n "${SET_GOVERNOR_POLICY}" ] && [ "X${SET_GOVERNOR_POLICY}" = "Xfalse" ]; then
+ option_g=""
+fi
+python main.py -l "${LOOPS}" -n "${TEST_NAME}" -d "${APK_DIR}" -u "${BASE_URL}" ${option_g}
diff --git a/automated/android/apk-automation/apk-automation.yaml b/automated/android/apk-automation/apk-automation.yaml
index 6fcafe7..805e8eb 100644
--- a/automated/android/apk-automation/apk-automation.yaml
+++ b/automated/android/apk-automation/apk-automation.yaml
@@ -1,6 +1,6 @@
metadata:
name: apk-automation
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Automate testing with Android APK."
maintainer:
- milosz.wasilewski@linaro.org
@@ -16,6 +16,8 @@ metadata:
params:
SKIP_INSTALL: "false"
+ # specify if to set the governor policy
+ SET_GOVERNOR_POLICY: "true"
# Timeout for wait_boot_completed in seconds.
BOOT_TIMEOUT: "300"
# Specify adb device SN if more then one device connected.
@@ -34,7 +36,7 @@ params:
run:
steps:
- cd ./automated/android/apk-automation
- - ./apk-automation.sh -S "${SKIP_INSTALL}" -t "${BOOT_TIMEOUT}" -s "${ANDROID_SERIAL}" -n "${TEST_NAME}" -l "${LOOPS}" -d "${APK_DIR}" -u "${BASE_URL}"
+ - ./apk-automation.sh -S "${SKIP_INSTALL}" -t "${BOOT_TIMEOUT}" -s "${ANDROID_SERIAL}" -n "${TEST_NAME}" -l "${LOOPS}" -d "${APK_DIR}" -u "${BASE_URL}" -g "${SET_GOVERNOR_POLICY}"
# Upload test output to artifactorial.
- cp "./output/${TEST_NAME}/result.txt" "./output/result.txt"
- tar caf "output-${TEST_NAME}.tar.xz" "./output"
diff --git a/automated/android/apk-automation/benchmarkpi.py b/automated/android/apk-automation/benchmarkpi.py
index 41a70ec..e1d444b 100755
--- a/automated/android/apk-automation/benchmarkpi.py
+++ b/automated/android/apk-automation/benchmarkpi.py
@@ -18,10 +18,13 @@ class ApkRunnerImpl(ApkTestRunner):
time.sleep(2)
self.dump_always()
warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
if warn_msg:
self.logger.info("Older version warning popped up")
warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
else:
start_button = self.vc.findViewByIdOrRaise("gr.androiddev.BenchmarkPi:id/Button01")
start_button.touch()
diff --git a/automated/android/apk-automation/caffeinemark.py b/automated/android/apk-automation/caffeinemark.py
index 22ce5e2..cf0f5a4 100755
--- a/automated/android/apk-automation/caffeinemark.py
+++ b/automated/android/apk-automation/caffeinemark.py
@@ -25,10 +25,13 @@ class ApkRunnerImpl(ApkTestRunner):
time.sleep(2)
self.dump_always()
warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
if warn_msg:
self.logger.info("Older version warning popped up")
warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
else:
start_button = self.vc.findViewByIdOrRaise("com.flexycore.caffeinemark:id/startButton")
start_button.touch()
diff --git a/automated/android/apk-automation/cf-bench.py b/automated/android/apk-automation/cf-bench.py
index c777157..f6646dd 100755
--- a/automated/android/apk-automation/cf-bench.py
+++ b/automated/android/apk-automation/cf-bench.py
@@ -19,10 +19,13 @@ class ApkRunnerImpl(ApkTestRunner):
time.sleep(2)
self.dump_always()
warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
if warn_msg:
self.logger.info("Older version warning popped up")
warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
else:
# Start test button
start_button = self.vc.findViewWithTextOrRaise("Full Benchmark")
diff --git a/automated/android/apk-automation/common/__init__.py b/automated/android/apk-automation/common/__init__.py
index f000ac8..db68292 100755
--- a/automated/android/apk-automation/common/__init__.py
+++ b/automated/android/apk-automation/common/__init__.py
@@ -12,7 +12,7 @@ import time
try:
import urlparse
except ImportError:
- from urllib.parse import urlparse
+ from urllib import parse as urlparse
from com.dtmilano.android.viewclient import ViewClient
@@ -240,7 +240,7 @@ class ApkTestRunner(object):
def uninstall_apk(self, package):
install_packages = subprocess.check_output(['adb', 'shell', 'pm', 'list', 'packages'])
- if package in install_packages:
+ if package in str(install_packages):
self.logger.info('Stopping %s' % package)
self.call_adb("shell am force-stop %s" % package)
@@ -260,6 +260,10 @@ class ApkTestRunner(object):
self.call_adb('shell dmesg > %s/dmesg.log' % self.config['output'])
def set_performance_governor(self, target_governor="performance"):
+ if self.config.get('set_governor_policy') is not None \
+ and self.config.get('set_governor_policy') is False:
+ return
+
f_scaling_governor = ('/sys/devices/system/cpu/'
'cpu0/cpufreq/scaling_governor')
f_governor_backup = '/data/local/tmp/scaling_governor'
@@ -280,6 +284,10 @@ class ApkTestRunner(object):
cpu.strip()))
def set_back_governor(self):
+ if self.config.get('set_governor_policy') is not None \
+ and self.config.get('set_governor_policy') is False:
+ return
+
dir_sys_cpu = '/sys/devices/system/cpu/'
f_governor_backup = '/data/local/tmp/scaling_governor'
f_governor_local = os.path.join(os.path.abspath(self.config['output']),
diff --git a/automated/android/apk-automation/glbenchmark25.py b/automated/android/apk-automation/glbenchmark25.py
index 9853a84..22e4410 100755
--- a/automated/android/apk-automation/glbenchmark25.py
+++ b/automated/android/apk-automation/glbenchmark25.py
@@ -1,3 +1,4 @@
+import sys
import time
import xml.dom.minidom
from common import ApkTestRunner
@@ -13,7 +14,7 @@ class ApkRunnerImpl(ApkTestRunner):
def setUp(self):
# set to peformance governor policay
- self.set_performance_governor()
+ # self.set_performance_governor()
# download apk related files
self.download_apk('main.1.com.glbenchmark.glbenchmark25.obb')
self.download_apk(self.config['apk_file_name'])
@@ -42,6 +43,9 @@ class ApkRunnerImpl(ApkTestRunner):
select_all_btn = self.vc.findViewWithText("All")
display_tests_menu = self.vc.findViewWithText("Performance Tests")
warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
+ attention_msg = self.vc.findViewWithText(u'''Network connection not found!
+Do you want to setup network connection? (If you can not upload the results you will not see it)''')
if select_all_btn:
select_all_btn.touch()
self.logger.info("All selected!")
@@ -53,6 +57,12 @@ class ApkRunnerImpl(ApkTestRunner):
self.logger.info("Older version warning popped up")
warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
+ elif attention_msg:
+ self.report_result('glbenchmark25-run', 'fail')
+ self.logger.info("Network connection is required")
+ sys.exit(1)
else:
# continue
pass
diff --git a/automated/android/apk-automation/jbench.py b/automated/android/apk-automation/jbench.py
index 5ba24c5..33b8eb6 100755
--- a/automated/android/apk-automation/jbench.py
+++ b/automated/android/apk-automation/jbench.py
@@ -13,10 +13,24 @@ class ApkRunnerImpl(ApkTestRunner):
super(ApkRunnerImpl, self).__init__(self.config)
def execute(self):
- self.dump_always()
- btn_jbench = self.vc.findViewByIdOrRaise("it.JBench.bench:id/button1")
- btn_jbench.touch()
- time.sleep(2)
+ find_start_btn = False
+ while not find_start_btn:
+ time.sleep(2)
+ self.dump_always()
+ btn_jbench = self.vc.findViewById("it.JBench.bench:id/button1")
+ warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
+ if warn_msg:
+ self.logger.info("Older version warning popped up")
+ warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
+ warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
+ elif btn_jbench:
+ btn_jbench.touch()
+ find_start_btn = True
+ else:
+ self.logger.info("Nothing found, need to check manually")
finished = False
while (not finished):
diff --git a/automated/android/apk-automation/linpack.py b/automated/android/apk-automation/linpack.py
index 517e8c9..9fba9bf 100755
--- a/automated/android/apk-automation/linpack.py
+++ b/automated/android/apk-automation/linpack.py
@@ -17,10 +17,13 @@ class ApkRunnerImpl(ApkTestRunner):
time.sleep(2)
self.dump_always()
warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
if warn_msg:
self.logger.info("Older version warning popped up")
warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
else:
start_single_button = self.vc.findViewByIdOrRaise("com.greenecomputing.linpack:id/btnsingle")
start_single_button.touch()
diff --git a/automated/android/apk-automation/main.py b/automated/android/apk-automation/main.py
index 1b06c80..5320fad 100755
--- a/automated/android/apk-automation/main.py
+++ b/automated/android/apk-automation/main.py
@@ -10,6 +10,8 @@ parser.add_argument('-n', '--name', dest='name', default='linpack',
help='Specify test name.')
parser.add_argument('-l', '--loops', type=int, dest='loops', default=1,
help='Set the number of test loops.')
+parser.add_argument('-g', '--governor', action='store_true', dest='set_governor_policy', default=False,
+ help='Specify if to set the governor policy to performance')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
default=False, help='Set the number of test loops.')
args = parser.parse_args()
diff --git a/automated/android/apk-automation/quadrantpro.py b/automated/android/apk-automation/quadrantpro.py
index 3360a2b..d6fd963 100755
--- a/automated/android/apk-automation/quadrantpro.py
+++ b/automated/android/apk-automation/quadrantpro.py
@@ -24,7 +24,15 @@ class ApkRunnerImpl(ApkTestRunner):
self.dump_always()
view_license_btn = self.vc.findViewWithText("View license")
run_full_item = self.vc.findViewWithText(u'Run full benchmark')
- if view_license_btn:
+ warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
+ if warn_msg:
+ self.logger.info("Older version warning popped up")
+ warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
+ warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
+ elif view_license_btn:
ok_button = self.vc.findViewWithTextOrRaise("OK")
ok_button.touch()
elif run_full_item:
diff --git a/automated/android/apk-automation/scimark.py b/automated/android/apk-automation/scimark.py
index bcc30d5..9fcc96f 100755
--- a/automated/android/apk-automation/scimark.py
+++ b/automated/android/apk-automation/scimark.py
@@ -12,10 +12,21 @@ class ApkRunnerImpl(ApkTestRunner):
super(ApkRunnerImpl, self).__init__(self.config)
def execute(self):
- time.sleep(5)
- self.dump_always()
- btn_java_bench = self.vc.findViewWithTextOrRaise(u'Java bench')
- btn_java_bench.touch()
+ find_start_btn = False
+ while not find_start_btn:
+ self.dump_always()
+ warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
+ if warn_msg:
+ self.logger.info("Older version warning popped up")
+ warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
+ warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
+ else:
+ btn_java_bench = self.vc.findViewWithTextOrRaise(u'Java bench')
+ btn_java_bench.touch()
+ find_start_btn = True
finished = False
while not finished:
diff --git a/automated/android/apk-automation/vellamo3.py b/automated/android/apk-automation/vellamo3.py
index 413ee61..3d763aa 100755
--- a/automated/android/apk-automation/vellamo3.py
+++ b/automated/android/apk-automation/vellamo3.py
@@ -48,9 +48,17 @@ class ApkRunnerImpl(ApkTestRunner):
btn_setup_1 = self.vc.findViewById("android:id/button1")
btn_settings = self.vc.findViewById('com.quicinc.vellamo:id/main_toolbar_wheel')
btn_animations = self.vc.findViewWithText(u'Make Vellamo even more beautiful')
+ warn_msg = self.vc.findViewWithText(u'This app was built for an older version of Android and may not work properly. Try checking for updates, or contact the developer.')
+ continue_btn = self.vc.findViewWithText(u'CONTINUE')
if btn_setup_1:
# Accept Vellamo EULA
btn_setup_1.touch()
+ elif warn_msg:
+ self.logger.info("Older version warning popped up")
+ warning_ok_btn = self.vc.findViewWithTextOrRaise(u'OK')
+ warning_ok_btn.touch()
+ elif continue_btn:
+ continue_btn.touch()
elif btn_settings:
# Open settings
btn_settings.touch()
diff --git a/automated/android/bionic-benchmarks/bionic-benchmarks.yaml b/automated/android/bionic-benchmarks/bionic-benchmarks.yaml
index 94bf56a..eb64cf7 100644
--- a/automated/android/bionic-benchmarks/bionic-benchmarks.yaml
+++ b/automated/android/bionic-benchmarks/bionic-benchmarks.yaml
@@ -1,6 +1,6 @@
metadata:
name: bionic-benchmarks-with-units
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Collect the bionic-benchmarks data and try to analyse it."
maintainer:
- daniel.diaz@linaro.org
diff --git a/automated/android/bionic-libc-tests/bionic-libc-tests.yaml b/automated/android/bionic-libc-tests/bionic-libc-tests.yaml
index 335076a..5e5fb23 100644
--- a/automated/android/bionic-libc-tests/bionic-libc-tests.yaml
+++ b/automated/android/bionic-libc-tests/bionic-libc-tests.yaml
@@ -1,6 +1,6 @@
metadata:
name: bionic-libc-tests
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Execute all available bionic-libc-tests and fetch data."
maintainer:
- daniel.diaz@linaro.org
diff --git a/automated/android/bootchart/bootchart.yaml b/automated/android/bootchart/bootchart.yaml
index a303be5..1b1f6d7 100644
--- a/automated/android/bootchart/bootchart.yaml
+++ b/automated/android/bootchart/bootchart.yaml
@@ -1,6 +1,6 @@
metadata:
name: bootchart
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Collect the bootchart data and try to analyse."
maintainer:
- yongqin.liu@linaro.org
diff --git a/automated/android/bootstat/bootstat_v1.yaml b/automated/android/bootstat/bootstat_v1.yaml
index 4eb6596..857b581 100644
--- a/automated/android/bootstat/bootstat_v1.yaml
+++ b/automated/android/bootstat/bootstat_v1.yaml
@@ -1,6 +1,6 @@
metadata:
name: bootstat
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "collect the bootstat data and try to analyse
Please Note:
This yaml test definition is only for LAVA V1"
diff --git a/automated/android/boottime/boottime.sh b/automated/android/boottime/boottime.sh
index 50bb4bb..7cbb6c9 100755
--- a/automated/android/boottime/boottime.sh
+++ b/automated/android/boottime/boottime.sh
@@ -16,13 +16,14 @@ usage() {
exit 1
}
-while getopts ":S:s:t:o:n:" o; do
+while getopts ":S:s:t:o:n:v:" o; do
case "$o" in
S) SKIP_INSTALL="${OPTARG}" ;;
s) ANDROID_SERIAL="${OPTARG}" ;;
t) BOOT_TIMEOUT="${OPTARG}" ;;
o) OPERATION="${OPTARG}" ;;
n) COLLECT_NO="${OPTARG}" ;;
+ v) ANDROID_VERSION="${OPTARG}" ;;
*) usage ;;
esac
done
@@ -43,9 +44,14 @@ wait_boot_completed "${BOOT_TIMEOUT}"
echo "BOOT_TO_UI pass" >> boot_result.txt
mv boot_result.txt output/
-adb_push "./device-script.sh" "/data/local/tmp/"
+f_device_script_name="device-script.sh"
+if [ -n "${ANDROID_VERSION}" ] && [ "X${ANDROID_VERSION}" = "Xmaster" ]; then
+ f_device_script_name="device-script-master.sh"
+fi
+adb_push "./${f_device_script_name}" "/data/local/tmp/"
+
info_msg "device-${ANDROID_SERIAL}: About to run boottime ${OPERATION} ${COLLECT_NO}..."
-adb shell "/data/local/tmp/device-script.sh ${OPERATION} ${COLLECT_NO}" \
+adb shell "/data/local/tmp/${f_device_script_name} ${OPERATION} ${COLLECT_NO}" \
| tee "${OUTPUT}/device-stdout.log"
adb_pull "/data/local/tmp/boottime/" "${OUTPUT}/device-boottime"
diff --git a/automated/android/boottime/boottime.yaml b/automated/android/boottime/boottime.yaml
index 791aeed..8c48b0a 100644
--- a/automated/android/boottime/boottime.yaml
+++ b/automated/android/boottime/boottime.yaml
@@ -1,6 +1,6 @@
metadata:
name: boottime
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "collect the boottime data many times and try to analyse,
when run more than 4 times(including), the average will be
calculated without the maximum and the minmun, if run less
@@ -18,6 +18,8 @@ metadata:
- hi6220-hikey
params:
+ # specify the android version
+ ANDROID_VERSION: ""
# specify true or false to skip or not the installation of lxc packages
SKIP_INSTALL: "false"
# Specify device serial no. when more than one device connected.
@@ -34,7 +36,7 @@ params:
run:
steps:
- cd ./automated/android/boottime
- - ./boottime.sh -S "${SKIP_INSTALL}" -s "${ANDROID_SERIAL}" -t "${BOOT_TIMEOUT}" -o "${OPERATION}" -n "${COLLECT_NO}"
+ - ./boottime.sh -S "${SKIP_INSTALL}" -s "${ANDROID_SERIAL}" -t "${BOOT_TIMEOUT}" -o "${OPERATION}" -n "${COLLECT_NO}" -v "${ANDROID_VERSION}"
- if [ "${OPERATION}" = "ANALYZE" ]; then ../../utils/upload-to-artifactorial.sh -a "output/boottime.tgz" -u "${URL}" -t "${TOKEN}"; fi
- ../../utils/send-to-lava.sh ./output/boot_result.txt
- ../../utils/send-to-lava.sh ./output/result.txt
diff --git a/automated/android/boottime/device-script-master.sh b/automated/android/boottime/device-script-master.sh
new file mode 100755
index 0000000..afa45db
--- /dev/null
+++ b/automated/android/boottime/device-script-master.sh
@@ -0,0 +1,695 @@
+#!/system/bin/sh
+##############################################################################
+## Description about this boot time measuring script ####
+##############################################################################
+## This script will be run on the device, it has following 2 functions:
+## 1. collecting the dmesg log and logcat information, and save them under
+## /data/local/tmp/boottime
+## directory in the name for following format:
+## logcat_all_${COLLECT_NO}.log :
+## collected via command "logcat -d -v time *:V"
+## logcat_events_${COLLECT_NO}.log:
+## collected via command "logcat -d -b events -v time"
+## dmesg_${COLLECT_NO}.log:
+## collected via command "dmesg"
+## prop_${COLLECT_NO}.log:
+## collected via command "dmesg"
+## when this script is run as following:
+## ./android/scripts/boottime2.sh COLLECT ${COLLECT_NO}
+##
+## 2. analyzing boottime inforamtion from the collected log information
+## when this script is run as following:
+## ./android/scripts/boottime2.sh ANALYZE ${COLLECT_NO}
+##
+## it will get the average of multiple iterations for the boot time,
+## so that to get more stable and accurate boot time information:
+##
+## iterations < 4: the average will be calculated with all data
+## iterations >= 4: the average will be calculated with maximum
+## and minimum will be removed
+## For each iteration, it will get following boot time information:
+## (assuming kernel started at 0 timestamp in this script)
+##
+## TOTAL_BOOT_TIME:
+## the sum of KERNEL_BOOT_TIME and ANDROID_BOOT_TIME
+##
+## KERNEL_BOOT_TIME:
+## from kernel started to line "Freeing unused kernel memory" printed,
+## it does not include kernel loading and uncompression part done
+## by bootloader or kernel itself
+##
+## ANDROID_BOOT_TIME:
+## the sum of INIT_TO_SURFACEFLINGER_START_TIME and SURFACEFLINGER_BOOT_TIME
+##
+## SURFACEFLINGER_BOOT_TIME: the time information is gotten from the line
+## contains "Boot is finished" like following in logcat:
+## 1-01 00:00:27.158 I/SurfaceFlinger( 1835): Boot is finished (13795 ms)
+## the time here means the time from surfaceflinger service started
+## to the time boot animation finished.
+## it does not include the time from init start to the time
+## surfaceflinger service started
+##
+## Also following time values are gotten from dmesg log information,
+## they are not accurate as what we expects, but are able to be used for
+## reference and used for checking our boot time improvements
+##
+## INIT_TO_SURFACEFLINGER_START_TIME:
+## from the time "Freeing unused kernel memory" printed in dmesg
+## to the time "init: Starting service 'surfaceflinger'..." is printed
+##
+## FS_MOUNT_TIME:
+## from the time "Freeing unused kernel memory:" printed
+## to the time "init: Starting service 'logd'..." printed.
+##
+## FS_MOUNT_DURATION:
+## from the line "init: /dev/hw_random not found" printed
+## to the time "init: Starting service 'logd'..." printed
+##
+## BOOTANIM_TIME:
+## from the time "init: Starting service 'bootanim'..." printed
+## to the time "init: Service 'bootanim'.* exited with status" printed
+##
+## ANDROID_SERVICE_START_TIME:
+## from the time kernel started to the time healthd service started
+##
+## ANDROID_UI_SHOWN:
+## time from freeing unused kernel memory to the time
+## when UI is shown on display
+##############################################################################
+
+local_file_path="$0"
+local_file_parent=$(dirname "${local_file_path}")
+local_file_parent=$(cd "${local_file_parent}"||exit; pwd)
+
+local_tmp="/data/local/tmp/"
+dir_boottime_data="${local_tmp}/boottime"
+F_RAW_DATA_CSV="${dir_boottime_data}/boot_time_raw_data.csv"
+F_STATISTIC_DATA_CSV="${dir_boottime_data}/boot_time_statistic_data.csv"
+RESULT_FILE="${dir_boottime_data}/result.txt"
+
+## Copied from android/scripts/common.sh.
+G_RECORD_LOCAL_CSV=TRUE
+G_VERBOSE_OUTPUT=FALSE
+G_RESULT_NOT_RECORD=FALSE
+
+## Description:
+## output the max value of the passed 2 parameters
+## Usage:
+## f_max "${val1}" "${val2}"
+## Example:
+## max=$(f_max "1.5" "2.0")
+f_max(){
+ val1=$1
+ val2=$2
+ [ -z "$val1" ] && echo "$val2"
+ [ -z "$val2" ] && echo "$val1"
+
+ echo "$val1,$val2"|awk -F, '{if($1<$2) print $2; else print $1}'
+}
+
+## Description:
+## output the min value of the passed 2 parameters
+## Usage:
+## f_min "${val1}" "${val2}"
+## Example:
+## min=$(f_min "1.5" "2.0")
+f_min(){
+ val1=$1
+ val2=$2
+ [ -z "$val1" ] && echo "$val1"
+ [ -z "$val2" ] && echo "$val2"
+
+ echo "$val1,$val2"|awk -F, '{if($1>$2) print $2; else print $1}'
+}
+
+## Description:
+## calculate the average value for specified csv file.
+## The first field of that csv file should be the key/name of that line,
+## Lines have the same key should be together.
+## Usage:
+## statistic "${csv_file_path}" "${file_number}"
+## Example:
+## statistic "$f_res_starttime" 2
+## Note:
+## if less than 4 samples for that key/item there, average will be calculated as total/count
+## if 4 or more samples for that key/item there, average will be calculated with max and min excluded
+statistic(){
+ f_data=$1
+ if ! [ -f "$f_data" ]; then
+ return
+ fi
+ field_no=$2
+ if [ -z "$field_no" ]; then
+ field_no=2
+ fi
+ total=0
+ max=0
+ min=0
+ old_key=""
+ new_key=""
+ count=0
+ units=""
+ sort "${f_data}" >"${f_data}.sort"
+ while read -r line; do
+ line=$(echo "$line"|tr ' ' '~')
+ if ! echo "$line"|grep -q ,; then
+ continue
+ fi
+ new_key=$(echo "$line"|cut -d, -f1)
+ measurement_units=$(echo "$line"|cut -d, -f${field_no})
+ if echo "${measurement_units}"|grep -q '~'; then
+ value=$(echo "${measurement_units}"|cut -d~ -f1)
+ else
+ value=${measurement_units}
+ fi
+
+ if [ "X${new_key}" = "X${old_key}" ]; then
+ # for the second record and later
+ total=$(echo "${total},${value}"|awk -F, '{printf "%.2f",$1+$2;}')
+ count=$(echo "${count},1"|awk -F, '{printf $1+$2;}')
+ max=$(f_max "$max" "$value")
+ min=$(f_min "$min" "$value")
+ else
+ # for the first record of the same key
+ if [ "X${old_key}" != "X" ]; then
+ # next key started
+ if [ "${count}" -ge 4 ]; then
+ average=$(echo "${total},${max},${min},$count"|awk -F, '{printf "%.2f",($1-$2-$3)/($4-2);}')
+ else
+ average=$(echo "${total},$count"|awk -F, '{printf "%.2f",$1/$2;}')
+ fi
+ if [ -z "${units}" ]; then
+ echo "${old_key}=${average}"
+ else
+ echo "${old_key}=${average},${units}"
+ fi
+ fi
+ total="${value}"
+ max="${value}"
+ min="${value}"
+ old_key="${new_key}"
+ count=1
+ if echo "${measurement_units}"|grep -q '~'; then
+ units=$(echo "${measurement_units}"|cut -d~ -f2)
+ else
+ units=""
+ fi
+ fi
+ done < "${f_data}.sort"
+ if [ "X${new_key}" != "X" ]; then
+ if [ $count -ge 4 ]; then
+ average=$(echo "${total},${max},${min},$count"|awk -F, '{printf "%.2f",($1-$2-$3)/($4-2);}')
+ else
+ average=$(echo "${total},$count"|awk -F, '{printf "%.2f",$1/$2;}')
+ fi
+ if [ -z "${units}" ]; then
+ echo "${new_key}=${average}"
+ else
+ echo "${new_key}=${average},${units}"
+ fi
+ fi
+ rm "${f_data}.sort"
+}
+
+## Description:
+## output the test result to console and add for lava-test-shell,
+## also write into one csv file for comparing manually
+## Usage:
+## output_test_result $test_name $result [ $measurement [ $units ] ]
+## Note:
+## G_RECORD_LOCAL_CSV: when this environment variant is set to "TRUE",
+## the result will be recorded in a csv file in the following path:
+## rawdata/final_result.csv
+## G_VERBOSE_OUTPUT: when this environment variant is set to "TRUE", and only it is TRUE,
+## the verbose informain about the result will be outputed
+output_test_result(){
+ test_name=$1
+ result=$2
+ measurement=$3
+ units=$4
+
+ if [ -z "${test_name}" ] || [ -z "$result" ]; then
+ return
+ fi
+ output=""
+ lava_paras=""
+ output_csv=""
+ if [ -z "$units" ]; then
+ units="points"
+ fi
+ if [ -z "${measurement}" ]; then
+ output="${test_name}=${result}"
+ lava_paras="${test_name} ${result}"
+ else
+ output="${test_name}=${measurement} ${units}"
+ lava_paras="${test_name} ${result} ${measurement} ${units}"
+ output_csv="${test_name},${measurement} ${units}"
+ fi
+
+ echo "${lava_paras}" | tee -a "${RESULT_FILE}"
+
+ if [ "X${G_VERBOSE_OUTPUT}" = "XTRUE" ];then
+ echo "${output}"
+ fi
+
+ if [ "X${G_RECORD_LOCAL_CSV}" = "XTRUE" ]; then
+ if [ -n "${output_csv}" ]; then
+ echo "${output_csv}">>${F_RAW_DATA_CSV}
+ fi
+ fi
+}
+
+# dmeg line example
+# [ 7.410422] init: Starting service 'logd'...
+getTime(){
+ key=$1
+ if [ -z "${key}" ]; then
+ return
+ fi
+
+ key_line=$(grep -i "${key}" "${LOG_DMESG}")
+ if [ -n "${key_line}" ]; then
+ timestamp=$(echo "${key_line}"|awk '{print $2}' | awk -F "]" '{print $1}')
+ echo "${timestamp}"
+ fi
+}
+
+# logcat_all line example
+# 01-01 00:00:26.313 I/SurfaceFlinger( 1850): Boot is finished (11570 ms)
+calculate_logcat_timestamp(){
+ key_line=$1
+ if [ -z "${key_line}" ]; then
+ return
+ fi
+
+ year=$(date +%G)
+ mmdd=$(echo "${key_line}" |awk '{printf "%s\n", $1}')
+ hhmmss_ms=$(echo "${key_line}" |awk '{printf "%s\n", $2}')
+ ms=$(echo "${hhmmss_ms}"|cut -d. -f2)
+ hhmmss=$(echo "${hhmmss_ms}"|cut -d. -f1)
+ hhmm=$(echo "${hhmmss}"|cut -d: -f1,2)
+ ss=$(echo "${hhmmss}"|cut -d: -f3)
+ mmddhhmm_ss=$(echo "${mmdd}${hhmm}${year}.${ss}"|tr -d ':-')
+ sec=$(date -d "${mmddhhmm_ss}" +%s)
+ echo "${sec}.${ms}"
+}
+
+getTimeStampFromLogcat(){
+ key=$1
+ if [ -z "${key}" ]; then
+ return
+ fi
+
+ key_line=$(grep -i "${key}" "${LOG_LOGCAT_ALL}")
+ calculate_logcat_timestamp "${key_line}"
+}
+
+getTimeStampFromLogcatDmesg(){
+ key=$1
+ if [ -z "${key}" ]; then
+ return
+ fi
+
+ key_line=$(grep -i "${key}" "${LOG_LOGCAT_DMESG}")
+ calculate_logcat_timestamp "${key_line}"
+}
+
+getBootTimeInfoFromProperty(){
+ #"ro.runtime.firstboot"
+ #"ro.boottime."
+ # Time after boot in ns (via the CLOCK\_BOOTTIME clock) that the service was first started.
+ while read -r line; do
+ if ! echo "${line}"|grep -q "ro.boottime."; then
+ continue
+ fi
+
+ line=$(echo "$line"|tr -d "[]:")
+ key=$(echo "$line"|awk '{printf $1;}')
+ value=$(echo "$line"|awk '{printf $2;}')
+ output_test_result "${key}" "pass" "${value}" "ns"
+ done < "${LOG_PROPERTY}"
+
+ line_runtime_firstboot=$(grep "firstboot" "${LOG_PROPERTY}")
+ if [ -n "${line_runtime_firstboot}" ]; then
+ line_runtime_firstboot=$(echo "$line_runtime_firstboot"|tr -d "[]:")
+ key=$(echo "$line_runtime_firstboot"|awk '{printf $1;}')
+ value=$(echo "$line_runtime_firstboot"|awk '{printf $2;}')
+ output_test_result "${key}" "pass" "${value}" "ns"
+ fi
+}
+
+getTimeFromPropertyWithKey(){
+ key="${1}"
+ line=$(grep "\[${key}\]:" "${LOG_PROPERTY}")
+ if [ -n "${line}" ]; then
+ line=$(echo "$line"|tr -d "[]:")
+ value=$(echo "$line"|awk '{printf $2;}')
+ echo "${value}"
+ fi
+}
+
+getTimestampWithMMDDAndHHMMSSMS(){
+ mmdd="${1}"
+ hhmmss_ms="${2}"
+
+ year=$(date +%G)
+ hhmmss=$(echo "${hhmmss_ms}"|cut -d. -f1)
+ hhmm=$(echo "${hhmmss}"|cut -d: -f1,2)
+ ss=$(echo "${hhmmss}"|cut -d: -f3)
+ mmddhhmm_ss=$(echo "${mmdd}${hhmm}${year}.${ss}"|tr -d ':-')
+
+ sec=$(date -d "${mmddhhmm_ss}" +%s)
+ echo "${sec}.${ms}"
+}
+
+getBootTimeInfoFromLogs(){
+ COLLECT_NO=$1
+ LOG_LOGCAT_ALL="${dir_boottime_data}/logcat_all_${COLLECT_NO}.log"
+ LOG_LOGCAT_DMESG="${dir_boottime_data}/logcat_dmesg_${COLLECT_NO}.log"
+ LOG_PROPERTY="${dir_boottime_data}/prop_${COLLECT_NO}.log"
+
+ # dmesg starts before all timers are initialized, so kernel reports time as 0.0.
+ # we can't work around this without external time metering.
+ # here we presume kernel message starts from 0
+ KERNEL_BOOT_TIME_NS=$(getTimeFromPropertyWithKey "ro.boottime.init")
+ if [ -n "${KERNEL_BOOT_TIME_NS}" ]; then
+ KERNEL_BOOT_TIME=$(echo "${KERNEL_BOOT_TIME_NS}"| awk '{printf "%.3f",$1/1000/1000/1000;}')
+ output_test_result "KERNEL_BOOT_TIME" "pass" "${KERNEL_BOOT_TIME}" "s"
+ else
+ CONSOLE_SECONDS_START=$(getTimeStampFromLogcatDmesg "Booting Linux on")
+ CONSOLE_SECONDS_END=$(getTimeStampFromLogcatDmesg "Freeing unused kernel memory")
+ if [ -n "${CONSOLE_SECONDS_END}" ] && [ -n "${CONSOLE_SECONDS_START}" ]; then
+ KERNEL_BOOT_TIME=$(echo "${CONSOLE_SECONDS_END} ${CONSOLE_SECONDS_START}" | awk '{printf "%.3f",$1-$2;}')
+ output_test_result "KERNEL_BOOT_TIME" "pass" "${KERNEL_BOOT_TIME}" "s"
+ fi
+ fi
+
+ INIT_FIRST_STAGE_TIME_NS=$(getTimeFromPropertyWithKey "ro.boottime.init.first_stage")
+ if [ -n "${INIT_FIRST_STAGE_TIME_NS}" ]; then
+ INIT_FIRST_STAGE_TIME=$(echo "${INIT_FIRST_STAGE_TIME_NS}"| awk '{printf "%.3f",$1/1000/1000/1000;}')
+ output_test_result "INIT_FIRST_STAGE_TIME" "pass" "${INIT_FIRST_STAGE_TIME}" "s"
+ else
+ POINT_FIRST_STAGE_START=$(getTimeStampFromLogcatDmesg "init .* init first stage started"|tail -n1)
+ POINT_SECOND_STAGE_START=$(getTimeStampFromLogcatDmesg "init .* init second stage started"|tail -n1)
+ if [ -n "${POINT_FIRST_STAGE_START}" ] && [ -n "${POINT_SECOND_STAGE_START}" ]; then
+ INIT_FIRST_STAGE_TIME=$(echo "${POINT_SECOND_STAGE_START} ${POINT_FIRST_STAGE_START}" | awk '{printf "%.3f",$1-$2;}')
+ output_test_result "INIT_FIRST_STAGE_TIME" "pass" "${INIT_FIRST_STAGE_TIME}" "s"
+ fi
+ fi
+
+ POINT_SERVICE_BOOTANIM_START=$(getTimeStampFromLogcatDmesg "init .* Starting service 'bootanim'..."|tail -n1)
+ POINT_SERVICE_BOOTANIM_END=$(getTimeStampFromLogcatDmesg "init .* Service 'bootanim'.* exited with status"|tail -n1)
+ if [ -n "${POINT_SERVICE_BOOTANIM_END}" ] && [ -n "${POINT_SERVICE_BOOTANIM_START}" ]; then
+ BOOTANIM_TIME=$(echo "${POINT_SERVICE_BOOTANIM_END} ${POINT_SERVICE_BOOTANIM_START}" | awk '{printf "%.3f",$1-$2;}')
+ output_test_result "BOOTANIM_TIME" "pass" "${BOOTANIM_TIME}" "s"
+ fi
+
+ # use ro.boottime.init as the start of the init
+ POINT_INIT_START_FROM_PROPERTY_NS=$(getTimeFromPropertyWithKey "ro.boottime.init")
+ POINT_SERVICE_SURFACEFLINGER_START_FROM_PROPERTY_NS=$(getTimeFromPropertyWithKey "ro.boottime.surfaceflinger")
+ if [ -n "${POINT_INIT_START_FROM_PROPERTY_NS}" ] && [ -n "${POINT_SERVICE_SURFACEFLINGER_START_FROM_PROPERTY_NS}" ]; then
+ POINT_INIT_START_FROM_PROPERTY=$(echo "${POINT_INIT_START_FROM_PROPERTY_NS}"| awk '{printf "%.3f",$1/1000/1000/1000;}')
+ POINT_SERVICE_SURFACEFLINGER_START_FROM_PROPERTY=$(echo "${POINT_SERVICE_SURFACEFLINGER_START_FROM_PROPERTY_NS}"| awk '{printf "%.3f",$1/1000/1000/1000;}')
+ INIT_TO_SURFACEFLINGER_START_TIME=$(echo "${POINT_SERVICE_SURFACEFLINGER_START_FROM_PROPERTY} ${POINT_INIT_START_FROM_PROPERTY}" | awk '{printf "%.3f",$1-$2;}')
+ output_test_result "INIT_TO_SURFACEFLINGER_START_TIME" "pass" "${INIT_TO_SURFACEFLINGER_START_TIME}" "s"
+ else
+ POINT_INIT_START=$(getTimeStampFromLogcatDmesg "Freeing unused kernel memory")
+ POINT_SERVICE_SURFACEFLINGER_START=$(getTimeStampFromLogcatDmesg "init .* Starting service 'surfaceflinger'..."|tail -n1)
+ if [ -n "${POINT_SERVICE_SURFACEFLINGER_START}" ] && [ -n "${POINT_INIT_START}" ]; then
+ INIT_TO_SURFACEFLINGER_START_TIME=$(echo "${POINT_SERVICE_SURFACEFLINGER_START} ${POINT_INIT_START}" | awk '{printf "%.3f",$1-$2;}')
+ output_test_result "INIT_TO_SURFACEFLINGER_START_TIME" "pass" "${INIT_TO_SURFACEFLINGER_START_TIME}" "s"
+ fi
+ fi
+
+ ## When there are 2 lines of "Boot is finished",
+ ## it mostly means that the surfaceflinger service restarted by some reason
+ ## but here when there are multiple lines of "Boot is finished",
+ ## use the last one line, and report the case later after checked all the logs
+ SURFACEFLINGER_BOOT_TIME_INFO=$(grep "Boot is finished" "${LOG_LOGCAT_ALL}"|tail -n1)
+ if [ -n "${SURFACEFLINGER_BOOT_TIME_INFO}" ]; then
+ while echo "${SURFACEFLINGER_BOOT_TIME_INFO}"|grep -q -F "("; do
+ SURFACEFLINGER_BOOT_TIME_INFO=$(echo "${SURFACEFLINGER_BOOT_TIME_INFO}"|cut -d\( -f2-)
+ done
+ SURFACEFLINGER_BOOT_TIME_MS=$(echo "${SURFACEFLINGER_BOOT_TIME_INFO}"|cut -d\ -f1)
+ SURFACEFLINGER_BOOT_TIME=$(echo "${SURFACEFLINGER_BOOT_TIME_MS}" | awk '{printf "%.3f",$1/1000;}')
+ output_test_result "SURFACEFLINGER_BOOT_TIME" "pass" "${SURFACEFLINGER_BOOT_TIME}" "s"
+ fi
+
+
+ # 01-01 00:00:51.269 I/AlarmManager( 536): Current time only 51269, advancing to build time 1593449504000 # Unit is MS
+ # 06-29 16:51:44.003 D/SystemServerTiming( 536): StartAlarmManagerService took to complete: 6ms
+ # 06-29 16:51:50.018 D/AlarmManagerService( 536): Setting time of day to sec=1595426411 #UNIT is SEC
+ # 07-22 14:00:11.659 I/LaunchParamsPersister( 536): Didn't find launch param folder for user 0
+ # 07-22 14:00:11.659 W/AlarmManagerService( 536): Unable to set rtc to 1595426411: Permission denied
+ POINT_SURFACEFLINGER_BOOTED=$(getTimeStampFromLogcat "Boot is finished")
+ POINT_LAUNCHER_DISPLAYED=$(getTimeStampFromLogcat "Displayed com.android.launcher")
+
+ if [ -n "${POINT_SURFACEFLINGER_BOOTED}" ] && [ -n "${POINT_LAUNCHER_DISPLAYED}" ] && [ -n "${INIT_TO_SURFACEFLINGER_START_TIME}" ]; then
+ min=$(echo "${POINT_LAUNCHER_DISPLAYED} ${POINT_SURFACEFLINGER_BOOTED}" | awk '{if ($1 < $2) printf $1; else print $2}')
+ if [ "${min}" = "${POINT_SURFACEFLINGER_BOOTED}" ]; then
+ ## In case timestamp of "Boot is finished" is smaller than timestamp of "Displayed com.android.launcher",
+ ## we calculate TIME_FROM_SURFACEFLINER_BOOTED_TO_LAUNCHER_DISPLAYED as the difference between
+ ## "Boot is finished" and "Displayed com.android.launcher"
+
+ # find the "Setting time of day to sec=" line between "Boot is finished" and "Displayed com.android.launcher"
+ # Not sure if there is the case that "Setting time of day to" is called twice between "Boot is finished" and "Displayed com.android.launcher"
+ # 01-01 00:00:24.024 D/AlarmManagerService( 397): Setting time of day to sec=1595059530
+ # 07-18 08:05:30.003 D/SystemServerTiming( 397): StartAlarmManagerService took to complete: 11ms
+ # --
+ # 07-18 08:05:39.722 I/SurfaceFlinger( 283): Boot is finished (23423 ms)
+ # 07-18 08:05:39.725 I/ActivityManager( 397): About to commit checkpoint
+ # --
+ # 07-18 08:05:44.837 I/ActivityTaskManager( 397): Displayed com.android.launcher3/.Launcher: +1s220ms
+ # 07-18 08:05:44.972 D/Zygote ( 249): Forked child process 1258
+ # --
+ # 07-18 08:05:45.654 D/AlarmManagerService( 397): Setting time of day to sec=1595481066
+ # 07-23 05:11:06.127 D/DevicePolicyManager( 397): updateSystemUpdateFreezePeriodsRecord
+
+ found_boot_is_finished=false
+ SETTING_TIME_OF_DAY_SECS=0
+ LAST_POINT_SETTING_TIME_OF_DAY="${POINT_SURFACEFLINGER_BOOTED}" # in case not found in between
+ DURATION_FROM_SURFACEFLINGER_BOOTED=0
+ grep -e 'Setting time of day to sec' -e 'Boot is finished' -e 'Displayed com.android.launcher' "${LOG_LOGCAT_ALL}" > "${LOG_LOGCAT_ALL}.tmp"
+ while read -r line; do
+ if echo "${line}"|grep -iq 'Displayed com.android.launcher'; then
+ POINT_LAUNCHER_DISPLAYED=$(calculate_logcat_timestamp "$line")
+ DURATION_FROM_SURFACEFLINGER_BOOTED=$(echo "${DURATION_FROM_SURFACEFLINGER_BOOTED}" "${POINT_LAUNCHER_DISPLAYED}" "${LAST_POINT_SETTING_TIME_OF_DAY}" | awk '{printf "%.3f", $1 + $2 - $3}')
+ break
+ fi
+
+ if ${found_boot_is_finished}; then
+ if echo "${line}"|grep -iq 'Setting time of day to sec='; then
+ POINT_SETTING_TIME_OF_DAY=$(calculate_logcat_timestamp "$line")
+ DURATION_FROM_SURFACEFLINGER_BOOTED=$(echo "${DURATION_FROM_SURFACEFLINGER_BOOTED}" "${POINT_SETTING_TIME_OF_DAY}" "${LAST_POINT_SETTING_TIME_OF_DAY}" | awk '{printf "%.3f", $1 + $2 - $3}')
+ SETTING_TIME_OF_DAY_SECS=$(echo "${line}" | cut -d= -f2)
+ LAST_POINT_SETTING_TIME_OF_DAY="${SETTING_TIME_OF_DAY_SECS}"
+ fi
+ elif echo "${line}"|grep -iq 'Boot is finished'; then
+ found_boot_is_finished=true
+ fi
+ done <"${LOG_LOGCAT_ALL}.tmp"
+ rm -f "${LOG_LOGCAT_ALL}.tmp"
+
+ TIME_FROM_SURFACEFLINER_BOOTED_TO_LAUNCHER_DISPLAYED="${DURATION_FROM_SURFACEFLINGER_BOOTED}"
+ else
+ ## In case timestamp of "Boot is finished" is greater than timestamp of "Displayed com.android.launcher",
+ ## we set TIME_FROM_SURFACEFLINER_BOOTED_TO_LAUNCHER_DISPLAYED as 0 since it is already included in the "Boot is finished" time
+ TIME_FROM_SURFACEFLINER_BOOTED_TO_LAUNCHER_DISPLAYED=0
+ fi
+
+ output_test_result "TIME_FROM_SURFACEFLINER_BOOTED_TO_LAUNCHER_DISPLAYED" "pass" "${TIME_FROM_SURFACEFLINER_BOOTED_TO_LAUNCHER_DISPLAYED}" "s"
+
+ ANDROID_UI_SHOWN=$(echo "${INIT_TO_SURFACEFLINGER_START_TIME} ${SURFACEFLINGER_BOOT_TIME} ${TIME_FROM_SURFACEFLINER_BOOTED_TO_LAUNCHER_DISPLAYED}" | awk '{printf "%.3f",$1 + $2 + $3;}')
+ output_test_result "ANDROID_UI_SHOWN" "pass" "${ANDROID_UI_SHOWN}" "s"
+ fi
+
+ if [ -n "${INIT_TO_SURFACEFLINGER_START_TIME}" ] && [ -n "${SURFACEFLINGER_BOOT_TIME}" ] ; then
+ ANDROID_BOOT_TIME=$(echo "${INIT_TO_SURFACEFLINGER_START_TIME} ${SURFACEFLINGER_BOOT_TIME}" | awk '{printf "%.3f",$1 + $2;}')
+ output_test_result "ANDROID_BOOT_TIME" "pass" "${ANDROID_BOOT_TIME}" "s"
+ fi
+
+ ## Special case about the timestamp:
+ ## 12-31 23:59:59.989 E/ueventd ( 0): LoadWithAliases was unable to load of:NmpuT<NULL>Cti,omap5-mpu
+ ## 01-01 00:00:00.005 E/ueventd ( 0): LoadWithAliases was unable to load of:NledsT<NULL>Cgpio-leds
+
+ line_first_service_start=$(grep "init .* starting service '" "${LOG_LOGCAT_DMESG}"|head -n1)
+ line_last_service_exit=$(grep "init .* Service .* exited with status" "${LOG_LOGCAT_DMESG}"|tail -n1)
+
+ POINT_FIRST_SERVICE_START=$(getTimeStampFromLogcatDmesg "${line_first_service_start}")
+ POINT_LAST_SERVICE_END=$(getTimeStampFromLogcatDmesg "${line_last_service_exit}")
+ if [ -n "${POINT_LAST_SERVICE_END}" ] && [ -n "${POINT_FIRST_SERVICE_START}" ]; then
+ ANDROID_SERVICES_TIME=$(echo "${POINT_LAST_SERVICE_END} ${POINT_FIRST_SERVICE_START}" | awk '{printf "%.3f",$1-$2;}')
+
+ IS_ANDROID_SERVICES_TIME_LT_ZERO=$(echo "$ANDROID_SERVICES_TIME" |awk '{ if ($1 < 0) printf "true"; else print "false";}')
+ if ${IS_ANDROID_SERVICES_TIME_LT_ZERO}; then
+ # for case that the timestamps crosses years
+ # move the first service start point to one day before,
+ # and use the day of the first service start point as the day for the last service exit point
+ mmdd_first_service_start=$(echo "${line_first_service_start}" |awk '{printf "%s\n", $1}')
+ hhmmss_ms_first_service_start=$(echo "${line_first_service_start}" |awk '{printf "%s\n", $2}')
+ hhmmss_ms_last_service_exit=$(echo "${line_last_service_exit}" |awk '{printf "%s\n", $2}')
+
+ dd_first_service_start=$(echo "${mmdd_first_service_start}"|cut -d- -f2)
+ dd_first_service_start=$((dd_first_service_start - 1))
+ mm_first_service_start=$(echo "${mmdd_first_service_start}"|cut -d- -f1)
+ mmdd_first_service_start_new="${mm_first_service_start}-${dd_first_service_start}"
+
+ POINT_FIRST_SERVICE_START=$(getTimestampWithMMDDAndHHMMSSMS "${mmdd_first_service_start_new}" "${hhmmss_ms_first_service_start}")
+ POINT_LAST_SERVICE_END=$(getTimestampWithMMDDAndHHMMSSMS "${mmdd_first_service_start}" "${hhmmss_ms_last_service_exit}")
+ ANDROID_SERVICES_TIME=$(echo "${POINT_LAST_SERVICE_END} ${POINT_FIRST_SERVICE_START}" | awk '{printf "%.3f",$1-$2;}')
+ fi
+ output_test_result "ANDROID_SERVICES_TIME" "pass" "${ANDROID_SERVICES_TIME}" "s"
+ fi
+
+ if [ -n "${KERNEL_BOOT_TIME}" ] && [ -n "${ANDROID_BOOT_TIME}" ] ; then
+ TOTAL_SECONDS=$(echo "${KERNEL_BOOT_TIME} ${ANDROID_BOOT_TIME}" | awk '{printf "%.3f",$1 + $2;}')
+ output_test_result "TOTAL_BOOT_TIME" "pass" "${TOTAL_SECONDS}" "s"
+ fi
+}
+
+OPERATION=$1
+rm -rf "${RESULT_FILE}"
+if [ "X${OPERATION}" = "XCOLLECT" ]; then
+ G_VERBOSE_OUTPUT=FALSE
+ G_RECORD_LOCAL_CSV=FALSE
+ COLLECT_NO=$2
+ mkdir -p ${dir_boottime_data}
+
+ # shellcheck disable=SC2035
+ logcat -d -v time *:V > "${dir_boottime_data}/logcat_all_${COLLECT_NO}.log"
+ output_test_result "BOOTTIME_LOGCAT_ALL_COLLECT" "pass"
+ logcat -d -b events -v time > "${dir_boottime_data}/logcat_events_${COLLECT_NO}.log"
+ logcat -d -b kernel -v time > "${dir_boottime_data}/logcat_dmesg_${COLLECT_NO}.log"
+ output_test_result "BOOTTIME_LOGCAT_EVENTS_COLLECT" "pass"
+ su 0 dmesg > "${dir_boottime_data}/dmesg_${COLLECT_NO}.log"
+ output_test_result "BOOTTIME_DMESG_COLLECT" "pass"
+ su 0 getprop > "${dir_boottime_data}/prop_${COLLECT_NO}.log"
+ output_test_result "BOOTTIME_PROP_COLLECT" "pass"
+
+ # make sure to write all files to disk
+ sync
+
+ echo "==============list of files under ${dir_boottime_data}/ starts from here:"
+ ls -l ${dir_boottime_data}/*
+ echo "==============list of files under ${dir_boottime_data}/ ends from here:"
+elif [ "X${OPERATION}" = "XANALYZE" ]; then
+ count=$2
+
+ ## Check if there is any case that the surfaceflinger service
+ ## was started several times
+ if [ "${count}" -eq 0 ]; then
+ i=0
+ else
+ i=1
+ fi
+ service_started_once=true
+ no_boot_timeout_force_display=true
+ while ${service_started_once}; do
+ if [ $i -gt "$count" ]; then
+ break
+ fi
+ ## check the existence of "Boot is finished"
+ LOG_LOGCAT_ALL="${dir_boottime_data}/logcat_all_${i}.log"
+ android_boottime_lines=$(grep -c "Boot is finished" "${LOG_LOGCAT_ALL}")
+ if [ "${android_boottime_lines}" -ne 1 ]; then
+ echo "There are ${android_boottime_lines} existences of 'Boot is finished' in file: ${LOG_LOGCAT_ALL}"
+ echo "Please check the status first"
+ echo "==============content of the file ${LOG_LOGCAT_ALL} start from here:"
+ cat ${LOG_LOGCAT_ALL}
+ echo "==============content of the file ${LOG_LOGCAT_ALL} end from here:"
+
+ service_started_once=false
+ fi
+
+ if grep -q "BOOT TIMEOUT: forcing display enabled" "${LOG_LOGCAT_ALL}"; then
+ no_boot_timeout_force_display=false
+ echo "There are boot timeout problem in file: ${LOG_LOGCAT_ALL}"
+ echo "Please check the status first"
+ break
+ fi
+
+ LOG_DMESG="${dir_boottime_data}/dmesg_${i}.log"
+ ## check the service of bootanim
+ # [ 45.180397] init: Service 'bootanim' (pid 513) exited with status 0 oneshot service took 5.083000 seconds in background
+ # [ 45.191340] init: Sending signal 9 to service 'bootanim' (pid 513) process group...
+ bootanim_lines=$(grep -c "init: Service 'bootanim'.* exited with status" "${LOG_DMESG}")
+ if [ "${bootanim_lines}" -ne 1 ]; then
+ echo "bootanim service seems to be started ${bootanim_lines} times in file: ${LOG_DMESG}"
+ echo "Please check the status first"
+ echo "==============content of the file ${LOG_DMESG} start from here:"
+ cat ${LOG_DMESG}
+ echo "==============content of the file ${LOG_DMESG} end from here:"
+ service_started_once=false
+ fi
+ i=$((i+1))
+ done
+
+ if ! ${no_boot_timeout_force_display}; then
+ output_test_result "NO_BOOT_TIMEOUT_FORCE_DISPLAY" "fail"
+ fi
+ if ! ${service_started_once}; then
+ output_test_result "SERVICE_STARTED_ONCE" "fail"
+ fi
+
+ if ${no_boot_timeout_force_display} && ${service_started_once}; then
+ no_checking_problem=true
+ else
+ no_checking_problem=false
+ fi
+
+ if ${no_checking_problem}; then
+ if [ "${count}" -eq 0 ]; then
+ i=0
+ else
+ i=1
+ fi
+ G_RESULT_NOT_RECORD=TRUE
+ G_RECORD_LOCAL_CSV=TRUE
+ export G_RECORD_LOCAL_CSV G_RESULT_NOT_RECORD
+ while true; do
+ if [ $i -gt "$count" ]; then
+ break
+ fi
+ echo "=======Start to collect infomation for $i/$count iteration"
+ getBootTimeInfoFromLogs ${i}
+ getBootTimeInfoFromProperty ${i}
+ echo "=======Finished collecting infomation for $i/$count iteration"
+ i=$((i+1))
+ done
+
+ G_RESULT_NOT_RECORD=FALSE
+ export G_RESULT_NOT_RECORD
+ if [ "X${G_RECORD_LOCAL_CSV}" = "XTRUE" ]; then
+ echo "=======Start to statistic infomation"
+ statistic ${F_RAW_DATA_CSV} 2 |tee ${F_STATISTIC_DATA_CSV}
+ sed -i 's/=/,/' "${F_STATISTIC_DATA_CSV}"
+
+ G_RECORD_LOCAL_CSV=FALSE
+ export G_RECORD_LOCAL_CSV
+ while read -r line; do
+ if ! echo "$line"|grep -q ,; then
+ continue
+ fi
+ key=$(echo "$line"|cut -d, -f1)
+ measurement=$(echo "$line"|cut -d, -f2)
+ units=$(echo "$line"|cut -d, -f3)
+ output_test_result "${key}_avg" "pass" "${measurement}" "${units}"
+ done < "${F_STATISTIC_DATA_CSV}"
+ echo "=======Finished collecting statistic infomation"
+ fi
+
+ output_test_result "SERVICE_STARTED_ONCE" "pass"
+ fi
+
+ # set again for following output_test_result
+ G_RECORD_LOCAL_CSV=FALSE
+ cd ${local_tmp}|| exit 1
+ tar -czvf boottime.tgz boottime
+ output_test_result "BOOTTIME_ANALYZE" "pass"
+else
+ G_VERBOSE_OUTPUT=FALSE
+ G_RECORD_LOCAL_CSV=FALSE
+ export G_VERBOSE_OUTPUT G_RECORD_LOCAL_CSV
+ echo "Not recognised operation"
+ output_test_result "BOOTTIME" "fail"
+fi
diff --git a/automated/android/dd-wr-speed/dd-wr-speed.yaml b/automated/android/dd-wr-speed/dd-wr-speed.yaml
index fa4b5ee..b2ecf49 100644
--- a/automated/android/dd-wr-speed/dd-wr-speed.yaml
+++ b/automated/android/dd-wr-speed/dd-wr-speed.yaml
@@ -1,6 +1,6 @@
metadata:
name: dd-speed-test
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "dd write/read speed test."
maintainer:
- chase.qi@linaro.org
diff --git a/automated/android/ion-monitor-tool/ion-monitor-tool.yaml b/automated/android/ion-monitor-tool/ion-monitor-tool.yaml
index 198661f..2c18f26 100644
--- a/automated/android/ion-monitor-tool/ion-monitor-tool.yaml
+++ b/automated/android/ion-monitor-tool/ion-monitor-tool.yaml
@@ -1,6 +1,6 @@
metadata:
name: ion-monitor-tool
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run ion monitor tool to check memory leaks."
maintainer:
- axel.lebourhis@linaro.org
diff --git a/automated/android/libc-bench/libc-bench.yaml b/automated/android/libc-bench/libc-bench.yaml
index ddc9237..4696074 100644
--- a/automated/android/libc-bench/libc-bench.yaml
+++ b/automated/android/libc-bench/libc-bench.yaml
@@ -1,6 +1,6 @@
metadata:
name: libc-bench
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run libc-bench to benchmark the performance of bionic.
The source can be viewed here:
https://android-git.linaro.org/platform/external/libc-bench"
diff --git a/automated/android/linaro-android-kernel-tests/linaro-android-kernel-tests.yaml b/automated/android/linaro-android-kernel-tests/linaro-android-kernel-tests.yaml
index 0f0d9da..fa60fdb 100644
--- a/automated/android/linaro-android-kernel-tests/linaro-android-kernel-tests.yaml
+++ b/automated/android/linaro-android-kernel-tests/linaro-android-kernel-tests.yaml
@@ -1,6 +1,6 @@
metadata:
name: linaro-android-kernel-tests
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Linaro Android kernel test suite comprises of kernel feature
tests which are uniquie to android. The test suite runs
following kernel tests: ashmem, ashmem-expanded, alarmdev,
diff --git a/automated/android/media-codecs-functional-tests/media-codecs-functional-tests.yaml b/automated/android/media-codecs-functional-tests/media-codecs-functional-tests.yaml
index 2c3ed07..15a5a1e 100644
--- a/automated/android/media-codecs-functional-tests/media-codecs-functional-tests.yaml
+++ b/automated/android/media-codecs-functional-tests/media-codecs-functional-tests.yaml
@@ -1,6 +1,6 @@
metadata:
name: media-codecs-functional-tests
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Media functional codecs test suite for linaro android
verifies arm based audio/video software codecs available in
Android AOSP release. These codecs are used in implementation
diff --git a/automated/android/meminfo/meminfo.yaml b/automated/android/meminfo/meminfo.yaml
index 69fd484..6c80061 100644
--- a/automated/android/meminfo/meminfo.yaml
+++ b/automated/android/meminfo/meminfo.yaml
@@ -1,6 +1,6 @@
metadata:
name: meminfo
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Collect the information in /proc/meminfo after boot."
maintainer:
- yongqin.liu@linaro.org
diff --git a/automated/android/microbenchmarks/manifest.xml b/automated/android/microbenchmarks/manifest.xml
index 60848b4..7060c04 100644
--- a/automated/android/microbenchmarks/manifest.xml
+++ b/automated/android/microbenchmarks/manifest.xml
@@ -19,6 +19,9 @@
<!-- ART target for running art target tests" -->
<project path="device/generic/art" name="device/generic/art" />
+ <!-- Add prebuilt jdk to build benchmarks -->
+ <project clone-depth="1" dest-branch="master" groups="pdk" name="platform/prebuilts/jdk/jdk11" path="prebuilts/jdk/jdk11" upstream="master"/>
+
<project path="build/make" name="platform/build" groups="pdk">
<copyfile src="core/root.mk" dest="Makefile" />
<linkfile src="CleanSpec.mk" dest="build/CleanSpec.mk" />
diff --git a/automated/android/microbenchmarks/microbenchmarks.sh b/automated/android/microbenchmarks/microbenchmarks.sh
index b4e2bbf..917d9b6 100755
--- a/automated/android/microbenchmarks/microbenchmarks.sh
+++ b/automated/android/microbenchmarks/microbenchmarks.sh
@@ -13,10 +13,9 @@ export QA_REPORTS_URL
set +x
lava_test_dir="$(find /lava-* -maxdepth 0 -type d -regex '/lava-[0-9]+' 2>/dev/null | sort | tail -1)"
-if test -f "${lava_test_dir}/secrets" && grep -q "ART_TOKEN" "${lava_test_dir}/secrets"; then
+if test -f "${lava_test_dir}/secrets"; then
# shellcheck disable=SC1090
. "${lava_test_dir}/secrets"
- export ART_TOKEN
export ARTIFACTORIAL_TOKEN
export QA_REPORTS_TOKEN
fi
@@ -27,7 +26,7 @@ set -x
# shellcheck disable=SC1091
. ../../lib/android-test-lib
-PKG_DEPS="git wget binutils curl bc xz-utils python python3 python3-scipy openjdk-8-jdk"
+PKG_DEPS="git wget binutils curl bc xz-utils python python3 python3-scipy"
SKIP_INSTALL="false"
@@ -77,7 +76,7 @@ export OUT=${PWD}/out/target/product/${LUNCH_TARGET}/
./scripts/benchmarks/benchmarks_run_target.sh --skip-build true --iterations "${ITERATIONS}" \
--mode "${MODE}" --target-device "${LUNCH_TARGET}"
-if [ -n "${ART_TOKEN}" ]; then
+if [ -n "${QA_REPORTS_TOKEN}" ]; then
git clone https://git.linaro.org/qa/post-build-report.git pbr; mkdir -p pbr/artifacts/
cp ./*.json pbr/artifacts/
wget "${SNAPSHOTS_URL}"/pinned-manifest.xml -O pbr/artifacts/pinned-manifest.xml
diff --git a/automated/android/microbenchmarks/microbenchmarks.yaml b/automated/android/microbenchmarks/microbenchmarks.yaml
index 0208cc3..19690e7 100644
--- a/automated/android/microbenchmarks/microbenchmarks.yaml
+++ b/automated/android/microbenchmarks/microbenchmarks.yaml
@@ -1,6 +1,6 @@
metadata:
name: microbenchmarks
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run ART microbenchmarks."
maintainer:
- vishal.bhoj@linaro.org
diff --git a/automated/android/monkey/monkey.yaml b/automated/android/monkey/monkey.yaml
index 42bd0c1..3b88318 100755
--- a/automated/android/monkey/monkey.yaml
+++ b/automated/android/monkey/monkey.yaml
@@ -1,6 +1,6 @@
metadata:
name: monkey
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Monkey performs stress testing for stability using monkey
command which generates pesudo-random streams of user events
such as clicks, touches, or gestures as well as some system
diff --git a/automated/android/multinode/connect-to-remote-adb-tcpip-devices.yaml b/automated/android/multinode/connect-to-remote-adb-tcpip-devices.yaml
index 3dc2e42..8a13503 100644
--- a/automated/android/multinode/connect-to-remote-adb-tcpip-devices.yaml
+++ b/automated/android/multinode/connect-to-remote-adb-tcpip-devices.yaml
@@ -1,6 +1,6 @@
metadata:
name: connect-to-remote-adb-tcpip-devices
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "adb MultiNode setup: connect to remote devices made accessible via adb TCP/IP."
maintainer:
- karsten@fairphone.com
diff --git a/automated/android/multinode/release-remote-adb-tcpip-devices.yaml b/automated/android/multinode/release-remote-adb-tcpip-devices.yaml
index c28c22e..0e239cf 100644
--- a/automated/android/multinode/release-remote-adb-tcpip-devices.yaml
+++ b/automated/android/multinode/release-remote-adb-tcpip-devices.yaml
@@ -1,6 +1,6 @@
metadata:
name: release-remote-adb-tcpip-devices
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Disconnect from remote adb devices and cleanup."
maintainer:
- karsten@fairphone.com
diff --git a/automated/android/multinode/remote-adb-devices-smoke-test.yaml b/automated/android/multinode/remote-adb-devices-smoke-test.yaml
index 838b866..c7b65c8 100644
--- a/automated/android/multinode/remote-adb-devices-smoke-test.yaml
+++ b/automated/android/multinode/remote-adb-devices-smoke-test.yaml
@@ -1,6 +1,6 @@
metadata:
name: remote-adb-devices-smoke-test
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Smoke test demonstrating access to adb devices over TCP/IP."
maintainer:
- karsten@fairphone.com
diff --git a/automated/android/multinode/share-local-device-over-adb-tcpip.yaml b/automated/android/multinode/share-local-device-over-adb-tcpip.yaml
index 9ede1df..a8ea637 100644
--- a/automated/android/multinode/share-local-device-over-adb-tcpip.yaml
+++ b/automated/android/multinode/share-local-device-over-adb-tcpip.yaml
@@ -1,6 +1,6 @@
metadata:
name: share-local-device-over-adb-tcpip
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "adb MultiNode setup: make local device remotely accessible via adb TCP/IP.
Handles the device over to a role that responds to the following synchronization steps:
- lava-sync start_handover
diff --git a/automated/android/multinode/tradefed/tradefed-multinode.yaml b/automated/android/multinode/tradefed/tradefed-multinode.yaml
index d6728df..eff5502 100644
--- a/automated/android/multinode/tradefed/tradefed-multinode.yaml
+++ b/automated/android/multinode/tradefed/tradefed-multinode.yaml
@@ -1,6 +1,6 @@
metadata:
name: cts
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run tradefed based tests in LAVA."
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/android/multinode/wait-and-keep-local-device-accessible.yaml b/automated/android/multinode/wait-and-keep-local-device-accessible.yaml
index 720fedf..bd63862 100644
--- a/automated/android/multinode/wait-and-keep-local-device-accessible.yaml
+++ b/automated/android/multinode/wait-and-keep-local-device-accessible.yaml
@@ -1,6 +1,6 @@
metadata:
name: wait-and-keep-local-device-accessible
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Continuously wait for MultiNode messages from a remote role (master) and make the
locally connected device accessible again when it is lost for the remote role."
maintainer:
diff --git a/automated/android/multinode/wait-for-release-and-reset.yaml b/automated/android/multinode/wait-for-release-and-reset.yaml
index 78a52b4..291609e 100644
--- a/automated/android/multinode/wait-for-release-and-reset.yaml
+++ b/automated/android/multinode/wait-for-release-and-reset.yaml
@@ -1,6 +1,6 @@
metadata:
name: wait-for-release-and-reset
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Wait until a remote MultiNode role (master) requests to release the device.
Then, bring the device back into adb USB state."
maintainer:
diff --git a/automated/android/noninteractive-tradefed/tradefed-runner.py b/automated/android/noninteractive-tradefed/tradefed-runner.py
index 7a3aac8..c2bce5c 100755
--- a/automated/android/noninteractive-tradefed/tradefed-runner.py
+++ b/automated/android/noninteractive-tradefed/tradefed-runner.py
@@ -49,7 +49,12 @@ def result_parser(xml_file, result_format):
or 0x10000 <= num <= 0x10FFFF):
etree_content = etree_content[:mstart] + etree_content[mend:]
endpos = len(etree_content)
- pos = mend
+ # next time search again from the same position as this time
+ # as the detected pattern was removed here
+ pos = mstart
+ else:
+ # continue from the end of this match
+ pos = mend
try:
root = ET.fromstring(etree_content)
diff --git a/automated/android/noninteractive-tradefed/tradefed.yaml b/automated/android/noninteractive-tradefed/tradefed.yaml
index a9c1802..ed8ffde 100644
--- a/automated/android/noninteractive-tradefed/tradefed.yaml
+++ b/automated/android/noninteractive-tradefed/tradefed.yaml
@@ -1,6 +1,6 @@
metadata:
name: cts
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run tradefed based tests in LAVA."
maintainer:
- milosz.wasilewski@linaro.org
@@ -53,7 +53,7 @@ run:
- chown testuser:testuser .
- if echo "${TEST_REBOOT_EXPECTED}" |grep -i "true" ; then ./monitor_fastboot.sh & fi
- ./monitor_adb.sh &
- - sudo -u testuser ./tradefed.sh -o "${TIMEOUT}" -c "${TEST_URL}" -t "${TEST_PARAMS}" -p "${TEST_PATH}" -r "${RESULTS_FORMAT}" -n "${ANDROID_SERIAL}" -f "${FAILURES_PRINTED}" -a "${AP_SSID}" -k "${AP_KEY}"
+ - sudo -u testuser ./tradefed.sh -o "${TIMEOUT}" -c "${TEST_URL}" -t "${TEST_PARAMS}" -p "${TEST_PATH}" -r "${RESULTS_FORMAT}" -n "${ANDROID_SERIAL}" -f "${FAILURES_PRINTED}" -a "${AP_SSID}" -k "${AP_KEY}" || true
# Upload test log and result files to artifactorial.
- cp -r ./${TEST_PATH}/results ./output/ || true
- cp -r ./${TEST_PATH}/logs ./output/ || true
diff --git a/automated/android/optee/optee-gtest-gatekeeper-keymaster.yaml b/automated/android/optee/optee-gtest-gatekeeper-keymaster.yaml
index 1b9443d..5483e30 100755
--- a/automated/android/optee/optee-gtest-gatekeeper-keymaster.yaml
+++ b/automated/android/optee/optee-gtest-gatekeeper-keymaster.yaml
@@ -1,6 +1,6 @@
metadata:
name: optee-gtest-gatekeeper-keymaster
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "gatekeeper and keymaster gtests"
maintainer:
- victor.chong@linaro.org
diff --git a/automated/android/optee/optee-gtest-kmgk.yaml b/automated/android/optee/optee-gtest-kmgk.yaml
index 65e0e50..8356884 100755
--- a/automated/android/optee/optee-gtest-kmgk.yaml
+++ b/automated/android/optee/optee-gtest-kmgk.yaml
@@ -1,6 +1,6 @@
metadata:
name: optee-gtest-kmgk
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "OP-TEE KMGK gtest"
maintainer:
- victor.chong@linaro.org
diff --git a/automated/android/optee/optee-xtest.yaml b/automated/android/optee/optee-xtest.yaml
index 23ce8a4..2b49a42 100755
--- a/automated/android/optee/optee-xtest.yaml
+++ b/automated/android/optee/optee-xtest.yaml
@@ -1,6 +1,6 @@
metadata:
name: optee-xtest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "OP-TEE sanity test suite"
maintainer:
- chase.qi@linaro.org
diff --git a/automated/android/piglit-gles2/piglit-gles2.yaml b/automated/android/piglit-gles2/piglit-gles2.yaml
index 83ffa2a..b773479 100644
--- a/automated/android/piglit-gles2/piglit-gles2.yaml
+++ b/automated/android/piglit-gles2/piglit-gles2.yaml
@@ -1,6 +1,6 @@
metadata:
name: piglit-gles2
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Piglit is a collection of automated tests for OpenGL
implementations. piglit_gles2.0 runs OpenGL ES 2.0 tests."
maintainer:
diff --git a/automated/android/piglit-gles3/piglit-gles3.yaml b/automated/android/piglit-gles3/piglit-gles3.yaml
index 43cc437..ae15373 100644
--- a/automated/android/piglit-gles3/piglit-gles3.yaml
+++ b/automated/android/piglit-gles3/piglit-gles3.yaml
@@ -1,6 +1,6 @@
metadata:
name: piglit-gles3
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Piglit is a collection of automated tests for OpenGL
implementations. piglit_gles3.0 runs OpenGL ES 3.0 tests."
maintainer:
diff --git a/automated/android/piglit-glslparser/piglit-glslparser.yaml b/automated/android/piglit-glslparser/piglit-glslparser.yaml
index c5bad20..1988766 100644
--- a/automated/android/piglit-glslparser/piglit-glslparser.yaml
+++ b/automated/android/piglit-glslparser/piglit-glslparser.yaml
@@ -1,6 +1,6 @@
metadata:
name: piglit-glslparser
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "piglit glslparser test for android. The GLSL Parser provides
a front end for parsing and operating on OpenGL Shading
Language code. "
diff --git a/automated/android/piglit-shader-runner/piglit-shader-runner.yaml b/automated/android/piglit-shader-runner/piglit-shader-runner.yaml
index 42193ca..f13300c 100644
--- a/automated/android/piglit-shader-runner/piglit-shader-runner.yaml
+++ b/automated/android/piglit-shader-runner/piglit-shader-runner.yaml
@@ -1,6 +1,6 @@
metadata:
name: piglit-shader-runner
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Piglit shader_runner test suite for android. The test suite
runs shader tests using OpenGL shaders."
maintainer:
diff --git a/automated/android/pm-qa/pm-qa.yaml b/automated/android/pm-qa/pm-qa.yaml
index 90b5a82..6890576 100644
--- a/automated/android/pm-qa/pm-qa.yaml
+++ b/automated/android/pm-qa/pm-qa.yaml
@@ -1,6 +1,6 @@
metadata:
name: pm-qa
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Test power management (PM-QA). Currently, the test runs
cpufreq, cpuidle, cpuhotplug, thermal and cputopology by
default and individual test can be run by setting TESTS
diff --git a/automated/android/stringbench/stringbench.yaml b/automated/android/stringbench/stringbench.yaml
index ec58bce..ceea092 100644
--- a/automated/android/stringbench/stringbench.yaml
+++ b/automated/android/stringbench/stringbench.yaml
@@ -1,6 +1,6 @@
metadata:
name: stringbench
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run stringbench command to benchmark the performance of
string relevant feature of bionic. In the feature this test
should be upstream to bionic-benchmarks.
diff --git a/automated/android/tradefed/result_parser.py b/automated/android/tradefed/result_parser.py
index 3dd90a9..477b068 100644
--- a/automated/android/tradefed/result_parser.py
+++ b/automated/android/tradefed/result_parser.py
@@ -57,7 +57,12 @@ class TradefedResultParser:
):
etree_content = etree_content[:mstart] + etree_content[mend:]
endpos = len(etree_content)
- pos = mend
+ # next time search again from the same position as this time
+ # as the detected pattern was removed here
+ pos = mstart
+ else:
+ # continue from the end of this match
+ pos = mend
try:
root = ET.fromstring(etree_content)
diff --git a/automated/android/tradefed/tradefed.yaml b/automated/android/tradefed/tradefed.yaml
index 0d5822d..ae5ae00 100644
--- a/automated/android/tradefed/tradefed.yaml
+++ b/automated/android/tradefed/tradefed.yaml
@@ -1,6 +1,6 @@
metadata:
name: cts
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run tradefed based tests in LAVA."
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/android/wait-single-boot-completed.yaml b/automated/android/wait-single-boot-completed.yaml
index ea8f062..f35c02b 100644
--- a/automated/android/wait-single-boot-completed.yaml
+++ b/automated/android/wait-single-boot-completed.yaml
@@ -1,6 +1,6 @@
metadata:
name: wait-single-boot-completed
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Wait until one locally attached Android device is completely booted.
Usable only if at most one device is visible to adb, but has the advantage that the device's
serial number does not need to be known in advance."
diff --git a/automated/android/wait-single-network-connected.yaml b/automated/android/wait-single-network-connected.yaml
index 3ed6db7..6f235a8 100644
--- a/automated/android/wait-single-network-connected.yaml
+++ b/automated/android/wait-single-network-connected.yaml
@@ -1,6 +1,6 @@
metadata:
name: wait-single-network-connected
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Wait until one locally attached Android device has a network IP address.
Usable only if at most one device is visible to adb, but has the advantage that the device's
serial number does not need to be known in advance."
diff --git a/automated/android/workload-automation/workload-automation.yaml b/automated/android/workload-automation/workload-automation.yaml
index f706299..b64e908 100644
--- a/automated/android/workload-automation/workload-automation.yaml
+++ b/automated/android/workload-automation/workload-automation.yaml
@@ -1,6 +1,6 @@
metadata:
name: workload-automation
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Workload Automation on Android"
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/android/workload-automation3/workload-automation.yaml b/automated/android/workload-automation3/workload-automation.yaml
index 9bbde23..2dbe0f2 100644
--- a/automated/android/workload-automation3/workload-automation.yaml
+++ b/automated/android/workload-automation3/workload-automation.yaml
@@ -1,6 +1,6 @@
metadata:
name: workload-automation
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Workload Automation on Android"
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/lib/sh-test-lib b/automated/lib/sh-test-lib
index b910354..43a54bd 100755
--- a/automated/lib/sh-test-lib
+++ b/automated/lib/sh-test-lib
@@ -476,3 +476,92 @@ generate_skipfile() {
cat "${SKIPFILE_PATH}"
info_msg "EOF"
}
+
+# Description: parted is not working on SSD (Solid-State Drives) this is the
+# reason to use fdisk instead.
+# Dependency packages for fdisk and mkfs are "fdisk e2fsprogs dosfstools"
+# fdisk and mkfs commands might be in $PATH /sbin or /usr/sbin or
+# /usr/local/sbin export PATH for better usage.
+export PATH=$PATH:/sbin:/usr/sbin:/usr/local/sbin
+
+
+# Usage:
+# partition_disk "/dev/sdb" "+5G +10G"
+# shellcheck disable=SC2039
+partition_disk() {
+ # when sizes is empty it creates single partitions with total disk size
+ [ "$#" -ne 1 ] && error_msg "Usage: <block-device> <sizes>"
+ local device="$1"
+ local sizes="$2"
+
+ command -v fdisk
+ exit_on_fail "fdisk not in ${PATH} or not installed"
+
+ # Create a new empty DOS partition table
+ (
+ echo o
+ echo w) | fdisk "${device}"
+
+ if [ -n "${sizes}" ]; then
+ # Create partitions as per sizes
+ for size in ${sizes}; do
+ # Create patitions with ${size}
+ (
+ echo n
+ echo p
+ echo
+ echo
+ echo "${size}"
+ echo w) | fdisk "${device}"
+ check_return "fdisk-${device}-${size}-partition"
+ # sync and wait 5 sec
+ sync
+ sleep 5
+ done
+ fi
+
+ # Create a partition at the end.
+ # Use the rest of the disk.
+ (
+ echo n
+ echo p
+ echo
+ echo
+ echo
+ echo w) | fdisk "${device}"
+ check_return "fdisk-${device}-end-partition"
+ # sync and wait 5 sec
+ sync
+ sleep 5
+}
+# Usage:
+# format_partitions "/dev/sdb" "ext4"
+# shellcheck disable=SC2039
+format_partitions() {
+ [ "$#" -ne 2 ] && error_msg "Usage: <block-device> <filesystem-type>"
+ local device="$1"
+ local filesystem="$2"
+ local partition=""
+ local total_partitions=""
+
+ command -v mkfs
+ exit_on_fail "mkfs not in ${PATH} or not installed"
+
+ # Total partitions in a block device
+ total_partitions=$(find "${device}"* | grep "[0-9]" | tr '\n' ' ' )
+
+ # Format each partition in a given block device
+ for partition in ${total_partitions} ; do
+ echo "Formatting ${partition} to ${filesystem}"
+ if [ "${filesystem}" = "fat32" ]; then
+ echo "y" | mkfs -t vfat -F 32 "${partition}"
+ check_return " mkfs-t-vfat-F-32-${partition}"
+ else
+ echo "y" | mkfs -t "${filesystem}" "${partition}"
+ check_return "mkfs-${filesystem}-${partition}"
+ fi
+ # sync and wait 5 sec
+ sync
+ sleep 5
+ done
+}
diff --git a/automated/linux/24h-stress-test/24h-stress-test.sh b/automated/linux/24h-stress-test/24h-stress-test.sh
index 00ef290..9d411a0 100755
--- a/automated/linux/24h-stress-test/24h-stress-test.sh
+++ b/automated/linux/24h-stress-test/24h-stress-test.sh
@@ -230,7 +230,7 @@ if ! which stress-ng; then
# shellcheck disable=SC2154
case "${dist}" in
debian|ubuntu)
- install_deps "git build-essential libaio-dev libapparmor-dev libattr1-dev libbsd-dev libcap-dev libgcrypt11-dev libkeyutils-dev libsctp-dev zlib1g-dev"
+ install_deps "git build-essential libaio-dev libapparmor-dev libattr1-dev libbsd-dev libcap-dev libkeyutils-dev libsctp-dev zlib1g-dev"
git clone git://kernel.ubuntu.com/cking/stress-ng.git
(
cd stress-ng || exit
diff --git a/automated/linux/24h-stress-test/24h-stress-test.yaml b/automated/linux/24h-stress-test/24h-stress-test.yaml
index feb819e..086d955 100644
--- a/automated/linux/24h-stress-test/24h-stress-test.yaml
+++ b/automated/linux/24h-stress-test/24h-stress-test.yaml
@@ -1,6 +1,6 @@
metadata:
name: 24h-stress-test
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run stress_ng, stress_oom and stress_network tests"
maintainer:
- chase.qi@linaro.org
diff --git a/automated/linux/android-platform-tools/install.sh b/automated/linux/android-platform-tools/install.sh
index 39f1f8a..472edea 100755
--- a/automated/linux/android-platform-tools/install.sh
+++ b/automated/linux/android-platform-tools/install.sh
@@ -22,6 +22,26 @@ done
. "${TEST_DIR}/../../lib/sh-test-lib"
create_out_dir "${OUTPUT}"
+! check_root && \
+ error_msg "Please run this script as root."
+
+# Add amd64 architecture on debian/ubuntu
+arch=$(uname -m)
+if [ "${arch}" != "x86_64" ]; then
+ info_msg "Running on ${arch}. Installing multiarch libraries"
+ dist_name
+ # shellcheck disable=SC2154
+ case "${dist}" in
+ debian|ubuntu)
+ dpkg --add-architecture amd64
+ install_deps "qemu-user-static libc6:amd64"
+ ;;
+ *)
+ warn_msg "Unsupported distro: ${dist}! Package installation skipped."
+ ;;
+ esac
+fi
+
install_deps "wget unzip"
cd /opt/
rm -rf platform-tools*
diff --git a/automated/linux/android-platform-tools/install.yaml b/automated/linux/android-platform-tools/install.yaml
index 11cfd6d..b68ad78 100644
--- a/automated/linux/android-platform-tools/install.yaml
+++ b/automated/linux/android-platform-tools/install.yaml
@@ -1,6 +1,6 @@
metadata:
name: install-android-platform-tools
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Install android platform tools provided by Google from
https://developer.android.com/studio/releases/platform-tools"
maintainer:
diff --git a/automated/linux/armnn/armnn-unit-tests.sh b/automated/linux/armnn/armnn-unit-tests.sh
new file mode 100644
index 0000000..77dfde0
--- /dev/null
+++ b/automated/linux/armnn/armnn-unit-tests.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+# shellcheck disable=SC1091
+. ../../lib/sh-test-lib
+OUTPUT="$(pwd)/output"
+
+usage() {
+ echo "Usage: $0 [-s <true|false>]" 1>&2
+ exit 1
+}
+
+while getopts "s:a:d:" o; do
+ case "$o" in
+ s) SKIP_INSTALL="${OPTARG}" ;;
+ a) ARMNN_TARBALL="${OPTARG}" ;;
+ d) TEST_DIR="${OPTARG}" ;;
+ *) usage ;;
+ esac
+done
+
+! check_root && error_msg "You need to be root to run this script."
+create_out_dir "${OUTPUT}"
+
+pkgs="ntp wget gcc g++ python3 python3-pip"
+dhclient
+install_deps "${pkgs}" "${SKIP_INSTALL}"
+
+if [ -n "${ARMNN_TARBALL}" ]; then
+ mkdir -p "${TEST_DIR}"
+ cd "${TEST_DIR}" || exit
+ wget -O armnn.tar.xz "${ARMNN_TARBALL}"
+ tar xf armnn.tar.xz
+ export BASEDIR="/home/buildslave/workspace/armnn-ci-build"
+fi
+
+if [ "${SKIP_INSTALL}" = false ]; then
+ cd "$BASEDIR/build" || exit
+ ln -s libprotobuf.so.15.0.0 ./libprotobuf.so.15
+ LD_LIBRARY_PATH=$(pwd)
+ export LD_LIBRARY_PATH
+ chmod a+x UnitTests
+fi
+lava-test-case ArmNN-Unit-Tests --shell ./UnitTests
diff --git a/automated/linux/armnn/armnn-unit-tests.yaml b/automated/linux/armnn/armnn-unit-tests.yaml
new file mode 100644
index 0000000..cb46c58
--- /dev/null
+++ b/automated/linux/armnn/armnn-unit-tests.yaml
@@ -0,0 +1,25 @@
+metadata:
+ format: Lava-Test Test Definition 1.0
+ name: armnn-unit-tests
+ description: "Carry out armnn unit tests."
+ maintainer:
+ - theodore.grey@linaro.org
+ os:
+ - debian
+ scope:
+ - functional
+ devices:
+ - synquacer
+ - dragonboard-845c
+ - hi960-hikey
+ - stm32mp1
+params:
+ # if you need to run for 32 bit devices use:
+ # 'https://snapshots.linaro.org/componenets/armnn-32bit/latest/armnn-32.tar.xz'
+ ARMNN_TARBALL: 'https://snapshots.linaro.org/componenets/armnn/latest/armnn.tar.xz'
+ TEST_DIR: '/usr/local/bin'
+ SKIP_INSTALL: false
+run:
+ steps:
+ - cd ./automated/linux/armnn/
+ - ./armnn-unit-tests.sh -s "${SKIP_INSTALL}" -a "${ARMNN_TARBALL}" -d "{TEST_DIR}"
diff --git a/automated/linux/badblocks/badblocks.yaml b/automated/linux/badblocks/badblocks.yaml
index 99eec2f..bdd3896 100644
--- a/automated/linux/badblocks/badblocks.yaml
+++ b/automated/linux/badblocks/badblocks.yaml
@@ -1,6 +1,6 @@
metadata:
name: badblocks
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run a read-only test for bad blocks"
maintainer:
- ryan.harkin@linaro.org
diff --git a/automated/linux/blogbench/blogbench.yaml b/automated/linux/blogbench/blogbench.yaml
index bc10b6d..54c679f 100644
--- a/automated/linux/blogbench/blogbench.yaml
+++ b/automated/linux/blogbench/blogbench.yaml
@@ -1,6 +1,6 @@
metadata:
name: blogbench
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Blogbench is a portable filesystem benchmark that tries to
reproduce the load of a real-world busy file server."
maintainer:
diff --git a/automated/linux/chroot/kselftest_chroot.yaml b/automated/linux/chroot/kselftest_chroot.yaml
index f6874f0..dbdaf74 100644
--- a/automated/linux/chroot/kselftest_chroot.yaml
+++ b/automated/linux/chroot/kselftest_chroot.yaml
@@ -1,6 +1,6 @@
metadata:
name: kselftest-chroot
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run a chroot"
maintainer:
- anders.roxell@linaro.org
diff --git a/automated/linux/chroot/ltp_chroot.yaml b/automated/linux/chroot/ltp_chroot.yaml
index add9705..f3c0420 100644
--- a/automated/linux/chroot/ltp_chroot.yaml
+++ b/automated/linux/chroot/ltp_chroot.yaml
@@ -1,6 +1,6 @@
metadata:
name: ltp-chroot
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run a chroot"
maintainer:
- anders.roxell@linaro.org
diff --git a/automated/linux/cyclicdeadline/cyclicdeadline.yaml b/automated/linux/cyclicdeadline/cyclicdeadline.yaml
index 9f3c16b..7259c4f 100644
--- a/automated/linux/cyclicdeadline/cyclicdeadline.yaml
+++ b/automated/linux/cyclicdeadline/cyclicdeadline.yaml
@@ -1,6 +1,6 @@
metadata:
name: cyclicdeadline
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "cyclicdeadline is a test that is similar to cyclictest
but instead of using SCHED_FIFO and nanosleep() to
measure jitter, it uses SCHED_DEADLINE and has the
diff --git a/automated/linux/cyclictest/cyclictest.yaml b/automated/linux/cyclictest/cyclictest.yaml
index 81e3dea..613a963 100644
--- a/automated/linux/cyclictest/cyclictest.yaml
+++ b/automated/linux/cyclictest/cyclictest.yaml
@@ -1,6 +1,6 @@
metadata:
name: cyclictest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "cyclictest measures event latency in Linux kernel by
measuring the amount of time that passes between when a timer
expires and when the thread which set the timer actually
diff --git a/automated/linux/dd-wr-speed/dd-wr-speed.yaml b/automated/linux/dd-wr-speed/dd-wr-speed.yaml
index 71132f7..1fc0d28 100644
--- a/automated/linux/dd-wr-speed/dd-wr-speed.yaml
+++ b/automated/linux/dd-wr-speed/dd-wr-speed.yaml
@@ -1,6 +1,6 @@
metadata:
name: dd-speed-test
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "dd write/read speed test."
maintainer:
- chase.qi@linaro.org
diff --git a/automated/linux/device-tree/device-tree.yaml b/automated/linux/device-tree/device-tree.yaml
index 7c0a8ee..c8637d0 100644
--- a/automated/linux/device-tree/device-tree.yaml
+++ b/automated/linux/device-tree/device-tree.yaml
@@ -1,6 +1,6 @@
metadata:
name: device-tree
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Device tree test to check the folder structure. The test verifies that device-tree
is available and contains model name of the board."
maintainer:
diff --git a/automated/linux/disk-partitioning/disk-partitioning.yaml b/automated/linux/disk-partitioning/disk-partitioning.yaml
index 9e72d95..df559d3 100644
--- a/automated/linux/disk-partitioning/disk-partitioning.yaml
+++ b/automated/linux/disk-partitioning/disk-partitioning.yaml
@@ -1,6 +1,6 @@
metadata:
name: disk-partitioning-test
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Disk partitioning test."
maintainer:
- chase.qi@linaro.org
diff --git a/automated/linux/docker-integration-test/local-daemon.yaml b/automated/linux/docker-integration-test/local-daemon.yaml
index 08e5fb1..a8e39b4 100644
--- a/automated/linux/docker-integration-test/local-daemon.yaml
+++ b/automated/linux/docker-integration-test/local-daemon.yaml
@@ -1,6 +1,6 @@
metadata:
name: local-daemon
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "docker-ce engine integration test for local daemon. Docker's tests are
designed to run inside docker container. This test case clones
docker-ce repo and checks out the released tag to test and then lunchs
diff --git a/automated/linux/docker/docker.yaml b/automated/linux/docker/docker.yaml
index 45c4fc5..96edfaa 100644
--- a/automated/linux/docker/docker.yaml
+++ b/automated/linux/docker/docker.yaml
@@ -1,6 +1,6 @@
metadata:
name: docker
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Docker smoke test. When docker not pre-installed,
docker-ce will be installed from upstream on supported
distributions."
diff --git a/automated/linux/dockerized-tests/local-run.yaml b/automated/linux/dockerized-tests/local-run.yaml
index 37153b9..d91ada5 100644
--- a/automated/linux/dockerized-tests/local-run.yaml
+++ b/automated/linux/dockerized-tests/local-run.yaml
@@ -1,6 +1,6 @@
metadata:
name: local-run
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run tests from https://git.linaro.org/qa/test-definitions.git with
docker. Test runs on test target."
maintainer:
diff --git a/automated/linux/dockerized-tests/over-ssh.yaml b/automated/linux/dockerized-tests/over-ssh.yaml
index 729148c..143bd6b 100644
--- a/automated/linux/dockerized-tests/over-ssh.yaml
+++ b/automated/linux/dockerized-tests/over-ssh.yaml
@@ -1,6 +1,6 @@
metadata:
name: over-ssh
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run tests from https://git.linaro.org/qa/test-definitions.git with
docker. Test run will be triggered over ssh"
maintainer:
diff --git a/automated/linux/dsdbench/dsdbench.yaml b/automated/linux/dsdbench/dsdbench.yaml
index 5787481..a1c0326 100644
--- a/automated/linux/dsdbench/dsdbench.yaml
+++ b/automated/linux/dsdbench/dsdbench.yaml
@@ -1,6 +1,6 @@
metadata:
name: dsdbench
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "dsdbench runs benchmarks and tests for storage driver
configurations to help figure out how the configuration will
perform and which known issues the daemon may be affected by
diff --git a/automated/linux/ethernet/ethernet.yaml b/automated/linux/ethernet/ethernet.yaml
index d2999a2..f161a8c 100644
--- a/automated/linux/ethernet/ethernet.yaml
+++ b/automated/linux/ethernet/ethernet.yaml
@@ -1,6 +1,6 @@
metadata:
name: ethernet
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "This test checks if Ethernet is up and prints IP address."
maintainer:
- naresh.kamboju@linaro.org
diff --git a/automated/linux/fdisk/fdisk.sh b/automated/linux/fdisk/fdisk.sh
new file mode 100755
index 0000000..3694cc8
--- /dev/null
+++ b/automated/linux/fdisk/fdisk.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+# shellcheck disable=SC1091
+. ../../lib/sh-test-lib
+
+OUTPUT="$(pwd)/output"
+RESULT_FILE="${OUTPUT}/result.txt"
+export RESULT_FILE
+SKIP_INSTALL="false"
+
+DEVICE=""
+FILESYSTEM="ext4"
+SIZES=""
+
+usage() {
+ echo "Usage: $0 [-d </dev/sdb>]
+ [-f <ext4>]
+ [-s <true|false>]
+ [-z <+10G +5G>]" 1>&2
+ exit 1
+}
+
+# create_disk_partitions "/dev/sdb" "+5G +5G"
+create_disk_partitions() {
+ echo
+ echo "Creating partition: ${DEVICE}"
+ skip_list="format-disk-partitions"
+ partition_disk "${DEVICE}" "${SIZES}"
+ exit_on_fail "create-disk-partitions" "${skip_list}"
+}
+
+# format_disk_partitions "/dev/sdb" "ext4"
+format_disk_partitions() {
+ echo
+ echo "Format partitions of: ${DEVICE}"
+ format_partitions "${DEVICE}" "${FILESYSTEM}"
+ exit_on_fail "format-disk-partitions"
+}
+
+while getopts "d:f:s:z:" arg; do
+ case "$arg" in
+ d) DEVICE="${OPTARG}";;
+ f) FILESYSTEM="${OPTARG}" ;;
+ # SKIP_INSTALL is true in case of Open Embedded builds
+ # SKIP_INSTALL is false in case of Debian builds
+ s) SKIP_INSTALL="${OPTARG}";;
+ z) SIZES="${OPTARG}";;
+ *) usage ;;
+ esac
+done
+
+# Test run.
+[ -b "${DEVICE}" ] || error_msg "Please specify a block device with '-d'"
+! check_root && error_msg "You need to be root to run this script."
+create_out_dir "${OUTPUT}"
+
+info_msg "About to run fdisk tests ..."
+info_msg "Output directory: ${OUTPUT}"
+
+pkgs="fdisk e2fsprogs dosfstools"
+install_deps "${pkgs}" "${SKIP_INSTALL}"
+
+create_disk_partitions
+format_disk_partitions
diff --git a/automated/linux/fdisk/fdisk.yaml b/automated/linux/fdisk/fdisk.yaml
new file mode 100644
index 0000000..c0bd204
--- /dev/null
+++ b/automated/linux/fdisk/fdisk.yaml
@@ -0,0 +1,38 @@
+metadata:
+ name: fdisk
+ format: "Lava-Test Test Definition 1.0"
+ description: "Create partitions with fdisk command and format with
+ file system type"
+ maintainer:
+ - anders.roxell@linaro.org
+ - naresh.kamboju@linaro.org
+ os:
+ - debian
+ - ubuntu
+ - fedora
+ - centos
+ - openembedded
+ scope:
+ - functional
+ devices:
+ - juno
+ - x15
+ - x86
+ - i386
+ environment:
+ - lava-test-shell
+
+params:
+ # example: DEVICE=/dev/sdb
+ DEVICE: ""
+ FILESYSTEM: "ext4"
+ SIZES: "+10G +5G"
+ # SKIP_INSTALL is true in case of Open Embedded builds
+ # SKIP_INSTALL is false in case of Debian builds
+ SKIP_INSTALL: "true"
+
+run:
+ steps:
+ - cd ./automated/linux/fdisk/
+ - ./fdisk.sh -d "${DEVICE}" -f "${FILESYSTEM}" -s "${SKIP_INSTALL}" -z "${SIZES}"
+ - ../../utils/send-to-lava.sh ./output/result.txt
diff --git a/automated/linux/fio-test/fio-test.yaml b/automated/linux/fio-test/fio-test.yaml
index 950a329..9a3cde6 100644
--- a/automated/linux/fio-test/fio-test.yaml
+++ b/automated/linux/fio-test/fio-test.yaml
@@ -1,6 +1,6 @@
metadata:
name: fio-test
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "FIO or Flexible IO is a versatile IO workload generator test
on Linux."
maintainer:
diff --git a/automated/linux/gpiod/gpiod.yaml b/automated/linux/gpiod/gpiod.yaml
index 1b8fb5b..be8766a 100644
--- a/automated/linux/gpiod/gpiod.yaml
+++ b/automated/linux/gpiod/gpiod.yaml
@@ -1,6 +1,6 @@
metadata:
name: gpiod
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run a gpiod suite"
maintainer:
- anders.roxell@linaro.org
diff --git a/automated/linux/httperf-client/httperf-client.yaml b/automated/linux/httperf-client/httperf-client.yaml
index f8519ec..19e2d85 100644
--- a/automated/linux/httperf-client/httperf-client.yaml
+++ b/automated/linux/httperf-client/httperf-client.yaml
@@ -1,6 +1,6 @@
metadata:
name: httperf-runner
- format: Lava-Test-Shell Test Definition 1.0
+ format: Lava-Test Test Definition 1.0
description: Run httperf against a web webserver to measure performance
environment:
- lava-test-shell
diff --git a/automated/linux/igt/igt-test.yaml b/automated/linux/igt/igt-test.yaml
index c84c083..95deaf8 100644
--- a/automated/linux/igt/igt-test.yaml
+++ b/automated/linux/igt/igt-test.yaml
@@ -28,14 +28,13 @@ run:
- cd ./automated/linux/igt
- OPT="-d ${IGT_DIR} -t ${TEST_LIST}"
- if [ "${TEST_LIST}" = "CHAMELIUM" ]; then
- # Check if Chamelium is available
+ # Check if Chamelium is available. ${CHAMELIUM_IP} is from LAVA device dictionary
- while [ ${CHAMELIUM_PING_RETRY} -gt 0 ]; do PC=`ping -c 2 ${CHAMELIUM_IP}|grep '100% packet loss'`||true; if [ -n "${PC}" ]; then ./control_chamelium.sh ${CHAMELIUM_REBOOT_ARG}; sleep 30; (( CHAMELIUM_PING_RETRY-- )); else break; fi; done
- - test ${CHAMELIUM_PING_RETRY} -gt 0 && lava-test-case "Ping-Chamelium" --result pass || lava-test-raise "Ping-Chamelium"
+ - test -n "${CHAMELIUM_IP}" && test ${CHAMELIUM_PING_RETRY} -gt 0 && lava-test-case "Ping-Chamelium" --result pass || lava-test-raise "Ping-Chamelium"
# Check Chamelium uboot console status
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${CHAMELIUM_IP} /usr/bin/lock_u_boot_console
# Showing the uptime of Chamelium
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${CHAMELIUM_IP} /usr/bin/uptime
- # ${CHAMELIUM_IP} is from LAVA device dictionary
- if [ -n "${CHAMELIUM_IP}" ]; then OPT="${OPT} -c ${CHAMELIUM_IP}"; fi
- if [ -n "${HDMI_DEV_NAME}" ]; then OPT="${OPT} -h ${HDMI_DEV_NAME}"; fi
- fi
diff --git a/automated/linux/iozone/iozone.yaml b/automated/linux/iozone/iozone.yaml
index d03561f..cb5f8bc 100644
--- a/automated/linux/iozone/iozone.yaml
+++ b/automated/linux/iozone/iozone.yaml
@@ -1,6 +1,6 @@
metadata:
name: iozone
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "IOzone is a filesystem benchmark tool. The benchmark generates and
measures a variety of file operations."
maintainer:
diff --git a/automated/linux/iperf/iperf-client.yaml b/automated/linux/iperf/iperf-client.yaml
index 09b82db..dfa866f 100644
--- a/automated/linux/iperf/iperf-client.yaml
+++ b/automated/linux/iperf/iperf-client.yaml
@@ -1,6 +1,6 @@
metadata:
name: iperf
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "iperf is a tool for active measurements of the maximum
achievable bandwidth on IP networks."
maintainer:
diff --git a/automated/linux/iperf/iperf-server.yaml b/automated/linux/iperf/iperf-server.yaml
index 61869b2..f257066 100644
--- a/automated/linux/iperf/iperf-server.yaml
+++ b/automated/linux/iperf/iperf-server.yaml
@@ -1,6 +1,6 @@
metadata:
name: iperf
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "iperf is a tool for active measurements of the maximum
achievable bandwidth on IP networks."
maintainer:
diff --git a/automated/linux/isolate-task/isolate-task.yaml b/automated/linux/isolate-task/isolate-task.yaml
index 2b428e0..41ed4c0 100644
--- a/automated/linux/isolate-task/isolate-task.yaml
+++ b/automated/linux/isolate-task/isolate-task.yaml
@@ -1,7 +1,7 @@
metadata:
name: verify_isolation
version: "1.0"
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Verify kernel configuration. Isolate application and verify
that kernel tick/resch interrups do not occur more than 1/sec."
maintainer:
diff --git a/automated/linux/kernel-compilation/kernel-compilation.yaml b/automated/linux/kernel-compilation/kernel-compilation.yaml
index 72da918..f2c8656 100644
--- a/automated/linux/kernel-compilation/kernel-compilation.yaml
+++ b/automated/linux/kernel-compilation/kernel-compilation.yaml
@@ -1,6 +1,6 @@
metadata:
name: kernel-compilation
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Compile kernel with defconfig on ARM64/ARM platform and
measure how long it takes."
maintainer:
diff --git a/automated/linux/kernel-config-checker/kernel-config-checker.yaml b/automated/linux/kernel-config-checker/kernel-config-checker.yaml
index 41cfefc..7f76e61 100644
--- a/automated/linux/kernel-config-checker/kernel-config-checker.yaml
+++ b/automated/linux/kernel-config-checker/kernel-config-checker.yaml
@@ -1,6 +1,6 @@
metadata:
name: kernel-config-checker
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Check the kernel config file for the presence of required
CONFIG_* values."
maintainer:
diff --git a/automated/linux/kselftest/kselftest.sh b/automated/linux/kselftest/kselftest.sh
index 5354204..6ab56ad 100755
--- a/automated/linux/kselftest/kselftest.sh
+++ b/automated/linux/kselftest/kselftest.sh
@@ -117,8 +117,8 @@ install() {
dist_name
# shellcheck disable=SC2154
case "${dist}" in
- debian|ubuntu) install_deps "sed wget xz-utils" "${SKIP_INSTALL}" ;;
- centos|fedora) install_deps "sed wget xz" "${SKIP_INSTALL}" ;;
+ debian|ubuntu) install_deps "sed wget xz-utils iproute2" "${SKIP_INSTALL}" ;;
+ centos|fedora) install_deps "sed wget xz iproute" "${SKIP_INSTALL}" ;;
unknown) warn_msg "Unsupported distro: package install skipped" ;;
esac
}
@@ -144,7 +144,7 @@ else
fi
tar zxf "kselftest.tar.gz"
# shellcheck disable=SC2164
- cd "kselftest"
+ if [ ! -e "run_kselftest.sh" ]; then cd "kselftest"; fi
fi
if [ -n "${SKIPLIST}" ]; then
diff --git a/automated/linux/kselftest/kselftest.yaml b/automated/linux/kselftest/kselftest.yaml
index b8f172a..e3a7189 100644
--- a/automated/linux/kselftest/kselftest.yaml
+++ b/automated/linux/kselftest/kselftest.yaml
@@ -1,6 +1,6 @@
metadata:
name: kselftest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Linux kernel self test"
maintainer:
- naresh.kamboju@linaro.org
diff --git a/automated/linux/kselftest/skipfile-lkft.yaml b/automated/linux/kselftest/skipfile-lkft.yaml
index eb9471c..2cc0167 100644
--- a/automated/linux/kselftest/skipfile-lkft.yaml
+++ b/automated/linux/kselftest/skipfile-lkft.yaml
@@ -29,13 +29,26 @@ skiplist:
boards: all
branches:
- mainline
- - 5.3
- - 5.2
- - 5.1
+ - 5.7
+ - 5.6
+ - 5.4
- 4.19
- 4.14
- 4.9
- 4.4
+ - master
+ - linux-5.7.y
+ - linux-5.6.y
+ - linux-5.4.y
+ - linux-4.19.y
+ - linux-4.14.y
+ - linux-4.9.y
+ - linux-4.4.y
+ - v5.4-rt
+ - v4.19-rt
+ - v4.14-rt
+ - v4.9-rt
+ - v4.4-rt
tests:
- ftracetest
@@ -68,15 +81,6 @@ skiplist:
branches: all
tests:
- breakpoint_test
- - reason:
- url:
- environments: production
- boards: all
- branches:
- - 4.9
- - mainline
- tests:
- - breakpoint_test
- reason:
url:
@@ -187,6 +191,8 @@ skiplist:
- nxp-ls2088
branches:
- 4.9
+ - linux-4.9.y
+ - v4.9-rt
tests:
- sync_test
@@ -218,6 +224,8 @@ skiplist:
branches:
- next
- mainline
+ - default
+ - master
tests:
- test_btf
@@ -233,6 +241,8 @@ skiplist:
- nxp-ls2088
branches:
- 4.19
+ - linux-4.19.y
+ - v4.19-rt
tests:
- test_progs
diff --git a/automated/linux/kvm-unit-tests/kvm-unit-tests.sh b/automated/linux/kvm-unit-tests/kvm-unit-tests.sh
index 34d9828..73210f6 100755
--- a/automated/linux/kvm-unit-tests/kvm-unit-tests.sh
+++ b/automated/linux/kvm-unit-tests/kvm-unit-tests.sh
@@ -52,7 +52,7 @@ kvm_unit_tests_run_test() {
kvm_unit_tests_build_test() {
info_msg "git clone kvm unit tests ..."
- git clone https://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git
+ git clone https://gitlab.com/kvm-unit-tests/kvm-unit-tests.git
cd kvm-unit-tests || error_msg "Wasn't able to clone repo kvm-unit-tests!"
info_msg "Checkout on a given git reference ${GIT_REF}"
git checkout "${GIT_REF}"
diff --git a/automated/linux/kvm-unit-tests/kvm-unit-tests.yaml b/automated/linux/kvm-unit-tests/kvm-unit-tests.yaml
index d184295..ccc0a53 100644
--- a/automated/linux/kvm-unit-tests/kvm-unit-tests.yaml
+++ b/automated/linux/kvm-unit-tests/kvm-unit-tests.yaml
@@ -1,6 +1,6 @@
metadata:
name: kvm-unit-tests
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Build and run kvm-unit-tests natively"
maintainer:
diff --git a/automated/linux/kvm/start-kvm.yaml b/automated/linux/kvm/start-kvm.yaml
index 567c57c..74f212a 100644
--- a/automated/linux/kvm/start-kvm.yaml
+++ b/automated/linux/kvm/start-kvm.yaml
@@ -1,6 +1,6 @@
metadata:
name: kvm
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Start KVM for secondary testing"
maintainer:
- riku.voipio@linaro.org
diff --git a/automated/linux/kvm/stop-guest.yaml b/automated/linux/kvm/stop-guest.yaml
index f04bf42..75e56ef 100644
--- a/automated/linux/kvm/stop-guest.yaml
+++ b/automated/linux/kvm/stop-guest.yaml
@@ -1,6 +1,6 @@
metadata:
name: guest-complete
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Signal tests inside guest complete and power off"
maintainer:
- riku.voipio@linaro.org
diff --git a/automated/linux/lapack/lapack.yaml b/automated/linux/lapack/lapack.yaml
index 70743d1..f9e943f 100644
--- a/automated/linux/lapack/lapack.yaml
+++ b/automated/linux/lapack/lapack.yaml
@@ -1,6 +1,6 @@
metadata:
name: lapack
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Build and run lapack (Linear Algebra PACKage) tests natively"
maintainer:
diff --git a/automated/linux/libhugetlbfs/libhugetlbfs.yaml b/automated/linux/libhugetlbfs/libhugetlbfs.yaml
index 03779d0..9565286 100644
--- a/automated/linux/libhugetlbfs/libhugetlbfs.yaml
+++ b/automated/linux/libhugetlbfs/libhugetlbfs.yaml
@@ -1,6 +1,6 @@
metadata:
name: libhugetlbfs
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Build and run libhugetlbfs tests.
Runs through $WORD_SIZE bit libhugetlbfs on target system.
The default WORD_SIZE is 64 bit to accomodate 64 bit systems
diff --git a/automated/linux/lshw/lshw.yaml b/automated/linux/lshw/lshw.yaml
index f63fc90..cbcc4b3 100644
--- a/automated/linux/lshw/lshw.yaml
+++ b/automated/linux/lshw/lshw.yaml
@@ -1,6 +1,6 @@
metadata:
name: lshw
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "lshw is a small tool to extract detailed information
on the hardware configuration of the machine."
maintainer:
diff --git a/automated/linux/ltp-open-posix/ltp-open-posix.yaml b/automated/linux/ltp-open-posix/ltp-open-posix.yaml
index da6f98d..141122a 100644
--- a/automated/linux/ltp-open-posix/ltp-open-posix.yaml
+++ b/automated/linux/ltp-open-posix/ltp-open-posix.yaml
@@ -1,6 +1,6 @@
metadata:
name: ltp-open-posix
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run LTP open posix test suite"
maintainer:
- naresh.kamboju@linaro.org
diff --git a/automated/linux/ltp-realtime/ltp-realtime.yaml b/automated/linux/ltp-realtime/ltp-realtime.yaml
index f987b01..b7815a4 100644
--- a/automated/linux/ltp-realtime/ltp-realtime.yaml
+++ b/automated/linux/ltp-realtime/ltp-realtime.yaml
@@ -1,6 +1,6 @@
metadata:
name: ltp-realtime
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run LTP realtime test suite on Ubuntu/CentOS/Openembedded.
This testsuite contains some functional tests and a few
performance and latency measurement tests"
diff --git a/automated/linux/ltp/ltp.yaml b/automated/linux/ltp/ltp.yaml
index e2aa40e..ee6b0c6 100644
--- a/automated/linux/ltp/ltp.yaml
+++ b/automated/linux/ltp/ltp.yaml
@@ -1,6 +1,6 @@
metadata:
name: ltp
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run LTP test suite on Ubuntu"
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/linux/ltp/skipfile-lkft.yaml b/automated/linux/ltp/skipfile-lkft.yaml
index 95bc516..d5f9947 100644
--- a/automated/linux/ltp/skipfile-lkft.yaml
+++ b/automated/linux/ltp/skipfile-lkft.yaml
@@ -79,6 +79,10 @@ skiplist:
branches:
- 4.4
- 4.9
+ - linux-4.4.y
+ - linux-4.9.y
+ - v4.4-rt
+ - v4.9-rt
tests:
- fanotify07
- fanotify08
@@ -93,6 +97,10 @@ skiplist:
branches:
- 4.4
- 4.9
+ - linux-4.4.y
+ - linux-4.9.y
+ - v4.4-rt
+ - v4.9-rt
tests:
- inotify07
@@ -106,6 +114,12 @@ skiplist:
- 4.4
- 4.9
- 4.14
+ - linux-4.4.y
+ - linux-4.9.y
+ - linux-4.14.y
+ - v4.4-rt
+ - v4.9-rt
+ - v4.14-rt
tests:
- inotify08
@@ -283,3 +297,14 @@ skiplist:
- dio22
- dio23
- dio26
+
+ - reason: >
+ skip long running LTP memcg_stress_test on all devices
+ url: https://bugs.linaro.org/show_bug.cgi?id=5657
+ environments: production
+ boards:
+ - all
+ branches:
+ - all
+ tests:
+ - memcg_stress
diff --git a/automated/linux/meminfo/meminfo.yaml b/automated/linux/meminfo/meminfo.yaml
index 1b86678..539819f 100644
--- a/automated/linux/meminfo/meminfo.yaml
+++ b/automated/linux/meminfo/meminfo.yaml
@@ -1,6 +1,6 @@
metadata:
name: meminfo
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Collect the information in /proc/meminfo after boot."
maintainer:
- ryan.harkin@linaro.org
diff --git a/automated/linux/nginx-server/nginx-linux.yaml b/automated/linux/nginx-server/nginx-linux.yaml
index c438c1c..af63c8e 100644
--- a/automated/linux/nginx-server/nginx-linux.yaml
+++ b/automated/linux/nginx-server/nginx-linux.yaml
@@ -1,6 +1,6 @@
metadata:
name: httperf-nginx-linux-ip
- format: Lava-Test-Shell Test Definition 1.0
+ format: Lava-Test Test Definition 1.0
description: Server running plain NGiNX, no acceleration
environment:
- lava-test-shell
diff --git a/automated/linux/nginx-server/nginx-odp-dpdk-git.yaml b/automated/linux/nginx-server/nginx-odp-dpdk-git.yaml
index acf3e37..3d4c49b 100644
--- a/automated/linux/nginx-server/nginx-odp-dpdk-git.yaml
+++ b/automated/linux/nginx-server/nginx-odp-dpdk-git.yaml
@@ -1,7 +1,7 @@
# NOTE: when calling this test, kernel-headers must have been installed
metadata:
name: httperf-nginx-ofp-odp-dpdk-git
- format: Lava-Test-Shell Test Definition 1.0
+ format: Lava-Test Test Definition 1.0
description: Server running OFP+ODP+DPDK NGiNX from git
environment:
- lava-test-shell
diff --git a/automated/linux/nginx-server/nginx-odp-dpdk.yaml b/automated/linux/nginx-server/nginx-odp-dpdk.yaml
index 749bde5..e299dfb 100644
--- a/automated/linux/nginx-server/nginx-odp-dpdk.yaml
+++ b/automated/linux/nginx-server/nginx-odp-dpdk.yaml
@@ -1,7 +1,7 @@
# NOTE: when calling this test, kernel-headers must have been installed
metadata:
name: httperf-nginx-ofp-odp-dpdk
- format: Lava-Test-Shell Test Definition 1.0
+ format: Lava-Test Test Definition 1.0
description: Server running OFP+ODP+DPDK NGiNX
environment:
- lava-test-shell
diff --git a/automated/linux/openssh/openssh-debian.yaml b/automated/linux/openssh/openssh-debian.yaml
index ea77da9..0670318 100644
--- a/automated/linux/openssh/openssh-debian.yaml
+++ b/automated/linux/openssh/openssh-debian.yaml
@@ -1,6 +1,6 @@
metadata:
name: openssh-debian
- format: "Manual Test Definition 1.0"
+ format: Lava-Test Test Definition 1.0
description: "Normal OpenSSH development produces a very small, secure,
and easy to maintain version for the OpenBSD project.
The OpenSSH Portability Team takes that pure version and adds
diff --git a/automated/linux/optee/optee-xtest-qemu.yaml b/automated/linux/optee/optee-xtest-qemu.yaml
index 32dfa9b..3f3fd71 100644
--- a/automated/linux/optee/optee-xtest-qemu.yaml
+++ b/automated/linux/optee/optee-xtest-qemu.yaml
@@ -1,6 +1,6 @@
metadata:
name: optee-xtest-qemu
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "OP-TEE sanity test suite using QEMU"
maintainer:
- fathi.boudra@linaro.org
diff --git a/automated/linux/optee/optee-xtest.yaml b/automated/linux/optee/optee-xtest.yaml
index 06415b3..f461b32 100644
--- a/automated/linux/optee/optee-xtest.yaml
+++ b/automated/linux/optee/optee-xtest.yaml
@@ -1,6 +1,6 @@
metadata:
name: optee-xtest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "OP-TEE sanity test suite"
maintainer:
- chase.qi@linaro.org
diff --git a/automated/linux/ota-update/ota-update.yaml b/automated/linux/ota-update/ota-update.yaml
index d5b8b22..1502fe4 100644
--- a/automated/linux/ota-update/ota-update.yaml
+++ b/automated/linux/ota-update/ota-update.yaml
@@ -1,6 +1,6 @@
metadata:
name: ota-update
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Tests OTA update on device"
maintainer:
- vishal.bhoj@linaro.org
diff --git a/automated/linux/overlayfs/overlayfs.yaml b/automated/linux/overlayfs/overlayfs.yaml
index f3d8918..1d728fa 100644
--- a/automated/linux/overlayfs/overlayfs.yaml
+++ b/automated/linux/overlayfs/overlayfs.yaml
@@ -1,6 +1,6 @@
metadata:
name: overlayfs
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run overlay filesystem test from unionmount testsuite. Kernel config
CONFIG_OVERLAY_FS=y is required for this test."
maintainer:
diff --git a/automated/linux/perf/perf.yaml b/automated/linux/perf/perf.yaml
index f578a8f..c812242 100644
--- a/automated/linux/perf/perf.yaml
+++ b/automated/linux/perf/perf.yaml
@@ -1,6 +1,6 @@
metadata:
name: perf
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Perf is a profiler tool for Linux and is used to collect and analyze performance and trace data.
This test runs following perf commands: record(record events for later reporting), report(break
down events by process, function, etc), stat(obtain event counts) and test(Run sanity tests)."
diff --git a/automated/linux/pm-qa/pm-qa.yaml b/automated/linux/pm-qa/pm-qa.yaml
index 1c7847c..70e6e14 100644
--- a/automated/linux/pm-qa/pm-qa.yaml
+++ b/automated/linux/pm-qa/pm-qa.yaml
@@ -1,6 +1,6 @@
metadata:
name: pm-qa
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Test power management (PM-QA). Currently, the test runs
cpufreq, cpuidle, cpuhotplug, thermal and cputopology by
default and individual test can be run by setting TESTS
diff --git a/automated/linux/pmqtest/pmqtest.yaml b/automated/linux/pmqtest/pmqtest.yaml
index 6498713..d75cf13 100644
--- a/automated/linux/pmqtest/pmqtest.yaml
+++ b/automated/linux/pmqtest/pmqtest.yaml
@@ -1,6 +1,6 @@
metadata:
name: pmqtest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Pmqtest measures the latency of interprocess communication
with POSIX messages queues. The test starts pairs of threads
that are synchronized via mq_send/mw_receive() and measures
diff --git a/automated/linux/pointer-tagging/pointer-tagging-tests.yaml b/automated/linux/pointer-tagging/pointer-tagging-tests.yaml
index 62435a7..07641c1 100644
--- a/automated/linux/pointer-tagging/pointer-tagging-tests.yaml
+++ b/automated/linux/pointer-tagging/pointer-tagging-tests.yaml
@@ -1,6 +1,6 @@
metadata:
name: pointer-tagging-tests
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "This test suite is designed to verify that the AArch64
tagged-addressing feature is properly supported. Tagged addressing
is guaranteed to be available in the ARMv8 architecture"
diff --git a/automated/linux/pritee_test_utility/pritee_test_utility.yaml b/automated/linux/pritee_test_utility/pritee_test_utility.yaml
index 2c7cfae..239a05d 100644
--- a/automated/linux/pritee_test_utility/pritee_test_utility.yaml
+++ b/automated/linux/pritee_test_utility/pritee_test_utility.yaml
@@ -1,6 +1,6 @@
metadata:
name: pritee_test_utility
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "PlayReady Interface for Trusted Execution Environment Test Utility"
maintainer:
- arthur.she@linaro.org
diff --git a/automated/linux/ptsematest/ptsematest.yaml b/automated/linux/ptsematest/ptsematest.yaml
index dfa8a2c..c22f70b 100644
--- a/automated/linux/ptsematest/ptsematest.yaml
+++ b/automated/linux/ptsematest/ptsematest.yaml
@@ -1,6 +1,6 @@
metadata:
name: pmsematest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "ptsematest starts threads that are synchronized via
pthread_mutex_unlock()/pthread_mutex_lock() and measures the latency
between releasing and getting the lock."
diff --git a/automated/linux/rcutorture/rcutorture.yaml b/automated/linux/rcutorture/rcutorture.yaml
index a472981..51ab0ac 100644
--- a/automated/linux/rcutorture/rcutorture.yaml
+++ b/automated/linux/rcutorture/rcutorture.yaml
@@ -1,6 +1,6 @@
metadata:
name: rcutorture
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "RCU or Read-Copy Update Torture test for Linux Kernel. Kernel
config required for this test CONFIG_RCU_TORTURE_TEST=m"
maintainer:
diff --git a/automated/linux/rt-migrate-test/rt-migrate-test.yaml b/automated/linux/rt-migrate-test/rt-migrate-test.yaml
index f6713f2..715ac6c 100644
--- a/automated/linux/rt-migrate-test/rt-migrate-test.yaml
+++ b/automated/linux/rt-migrate-test/rt-migrate-test.yaml
@@ -1,6 +1,6 @@
metadata:
name: rt-migrate-test
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "rt-migrate-test verifies the RT threads scheduler balancing."
maintainer:
- chase.qi@linaro.org
diff --git a/automated/linux/signaltest/signaltest.yaml b/automated/linux/signaltest/signaltest.yaml
index 29aa7a3..06b1205 100644
--- a/automated/linux/signaltest/signaltest.yaml
+++ b/automated/linux/signaltest/signaltest.yaml
@@ -1,6 +1,6 @@
metadata:
name: signaltest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "signaltest is a RT signal roundtrip test software."
maintainer:
diff --git a/automated/linux/sigwaittest/sigwaittest.yaml b/automated/linux/sigwaittest/sigwaittest.yaml
index 62edc3d..4074f63 100644
--- a/automated/linux/sigwaittest/sigwaittest.yaml
+++ b/automated/linux/sigwaittest/sigwaittest.yaml
@@ -1,6 +1,6 @@
metadata:
name: sigwaittest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "sigwaittest starts two threads or, optionally, forks
two processes that are synchronized via signals and
measures the latency between sending a signal and
diff --git a/automated/linux/spectre-meltdown-checker-test/spectre-meltdown-checker-test.yaml b/automated/linux/spectre-meltdown-checker-test/spectre-meltdown-checker-test.yaml
index c211873..77b81fc 100644
--- a/automated/linux/spectre-meltdown-checker-test/spectre-meltdown-checker-test.yaml
+++ b/automated/linux/spectre-meltdown-checker-test/spectre-meltdown-checker-test.yaml
@@ -1,6 +1,6 @@
metadata:
name: spectre-meltdown-checker
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Run spectre meltdown checker"
maintainer:
- naresh.kamboju@linaro.org
diff --git a/automated/linux/ssuite/run-bench.sh b/automated/linux/ssuite/run-bench.sh
index cb86911..7ebe627 100755
--- a/automated/linux/ssuite/run-bench.sh
+++ b/automated/linux/ssuite/run-bench.sh
@@ -5,7 +5,7 @@
OUTPUT="$(pwd)/output"
RESULT_FILE="${OUTPUT}/result.txt"
-TESTS="throughput replayed-startup"
+TESTS="throughput replayed-gnome-term-startup"
TEST_DEV=sda
FORMAT=no
S_VERSION=
@@ -29,7 +29,10 @@ usage() {
from meeting all non-trivial dependencies of these applications
(such as having an X session running). Results are
indistinguishable w.r.t. to actually starting these applications.
- Default value: \"throughput replayed-startup\"
+ A special case for replayed-startup is replayed-gnome-term-startup:
+ it benchmarks the startup of only gnome-terminal (a medium-size
+ application).
+ Default value: \"throughput replayed-gnome-term-startup\"
<TEST_DEV>:
Target device/partition: device/partition on which to
@@ -201,8 +204,8 @@ run_test() {
# form. To get a compliant file, we pick each statistic and
# put it in a separate line in the destination result file
# (${RESULT_FILE}, which is then parsed by LAVA).
- awk '{ print $1 "-max"" " $2 " " $3 " " $7 }' "${OUTPUT}"/result_list.txt 2>&1 | tee -a "${RESULT_FILE}"
- awk '{ print $1 "-min"" " $2 " " $4 " " $7 }' "${OUTPUT}"/result_list.txt 2>&1 | tee -a "${RESULT_FILE}"
+ awk '{ print $1 "-min"" " $2 " " $3 " " $7 }' "${OUTPUT}"/result_list.txt 2>&1 | tee -a "${RESULT_FILE}"
+ awk '{ print $1 "-max"" " $2 " " $4 " " $7 }' "${OUTPUT}"/result_list.txt 2>&1 | tee -a "${RESULT_FILE}"
awk '{ print $1 "-avg"" " $2 " " $5 " " $7 }' "${OUTPUT}"/result_list.txt 2>&1 | tee -a "${RESULT_FILE}"
awk '{ print $1 "-std"" " $2 " " $6 " " $7 }' "${OUTPUT}"/result_list.txt 2>&1 | tee -a "${RESULT_FILE}"
}
diff --git a/automated/linux/ssuite/ssuite-bench.yaml b/automated/linux/ssuite/ssuite-bench.yaml
index d746b1f..4831f1d 100644
--- a/automated/linux/ssuite/ssuite-bench.yaml
+++ b/automated/linux/ssuite/ssuite-bench.yaml
@@ -35,7 +35,10 @@ params:
# from meeting all non-trivial dependencies of these applications
# (such as having an X session running). Results are
# indistinguishable w.r.t. to actually starting these applications.
- TESTS: "throughput replayed-startup"
+ # A special case for replayed-startup is replayed-gnome-term-startup:
+ # it benchmarks the startup of only gnome-terminal (a medium-size
+ # application).
+ TESTS: "throughput replayed-gnome-term-startup"
# Target device/partition: device/partition on which to
# execute the benchmarks. If a partition is specified, then
diff --git a/automated/linux/svsematest/svsematest.yaml b/automated/linux/svsematest/svsematest.yaml
index 7724656..10b17c6 100644
--- a/automated/linux/svsematest/svsematest.yaml
+++ b/automated/linux/svsematest/svsematest.yaml
@@ -1,6 +1,6 @@
metadata:
name: svsematest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "svsematest starts two threads or, optionally, forks
two processes that are synchronized via SYSV semaphores
and measures the latency between releasing a semaphore
diff --git a/automated/linux/sysbench/sysbench.yaml b/automated/linux/sysbench/sysbench.yaml
index b156615..1edd0a4 100644
--- a/automated/linux/sysbench/sysbench.yaml
+++ b/automated/linux/sysbench/sysbench.yaml
@@ -1,6 +1,6 @@
metadata:
name: sysbench
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "SysBench is a modular, cross-platform and multi-threaded
benchmark tool for evaluating OS parameters that are
important for a system running a database under intensive
diff --git a/automated/linux/sysfs-bus-iio-smoke/sysfs-bus-iio-smoke.yaml b/automated/linux/sysfs-bus-iio-smoke/sysfs-bus-iio-smoke.yaml
index 527d45e..d9ed972 100644
--- a/automated/linux/sysfs-bus-iio-smoke/sysfs-bus-iio-smoke.yaml
+++ b/automated/linux/sysfs-bus-iio-smoke/sysfs-bus-iio-smoke.yaml
@@ -1,6 +1,6 @@
metadata:
name: sysfs-bus-iio-smoke
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Range checks for particular properties in the sysfs bus iio subsystem."
maintainer:
- luis.machado@linaro.org
diff --git a/automated/linux/toolchain-smoke/toolchain-smoke.yaml b/automated/linux/toolchain-smoke/toolchain-smoke.yaml
index a9d6481..aadcc9e 100644
--- a/automated/linux/toolchain-smoke/toolchain-smoke.yaml
+++ b/automated/linux/toolchain-smoke/toolchain-smoke.yaml
@@ -1,6 +1,6 @@
metadata:
name: toolchain-smoke
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Simple test that checks whether gcc compilation works"
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/linux/ui-browser-test/ui-browser-test.yaml b/automated/linux/ui-browser-test/ui-browser-test.yaml
index 980ba3d..a940324 100644
--- a/automated/linux/ui-browser-test/ui-browser-test.yaml
+++ b/automated/linux/ui-browser-test/ui-browser-test.yaml
@@ -1,6 +1,6 @@
metadata:
name: ui-browser-test
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "ui browser tests by using robot frame work
Test runs chromium, chrome and firefox browser test cases."
maintainer:
diff --git a/automated/linux/unixbench/unixbench.yaml b/automated/linux/unixbench/unixbench.yaml
index a5acdd4..6575e16 100644
--- a/automated/linux/unixbench/unixbench.yaml
+++ b/automated/linux/unixbench/unixbench.yaml
@@ -1,6 +1,6 @@
metadata:
name: unixbench
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "The purpose of UnixBench is to provide a basic
indicator of the performance of a Unix-like system."
maintainer:
diff --git a/automated/linux/v4l2/v4l2-compliance.yaml b/automated/linux/v4l2/v4l2-compliance.yaml
index cf01334..303d524 100644
--- a/automated/linux/v4l2/v4l2-compliance.yaml
+++ b/automated/linux/v4l2/v4l2-compliance.yaml
@@ -1,6 +1,6 @@
metadata:
name: v4l2-compliance
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "v4l2 Compliance test suite"
maintainer:
- ryan.harkin@linaro.org
diff --git a/automated/linux/widevine/widevine_unittest.yaml b/automated/linux/widevine/widevine_unittest.yaml
index bff4841..519cffd 100644
--- a/automated/linux/widevine/widevine_unittest.yaml
+++ b/automated/linux/widevine/widevine_unittest.yaml
@@ -1,6 +1,6 @@
metadata:
name: widevine_unittest
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Widevine Content Decryption Modules (CDMs) are how streaming services
protect content using HTML5 video to web browsers without relying on
an NPAPI plugin like Flash or Silverlight. This test runs widevine_ce_cdm_unittest"
@@ -30,6 +30,5 @@ run:
# Upload log file to artifactirial
- if [ -f "${LOG_FILE}" -a -n "${UPLOAD_URL}" ]; then
- gzip ${LOG_FILE} && LOG_FILE=${LOG_FILE}.gz || true
- - test -n "${TOKEN}" && TOKEN_OPT="-t \"${TOKEN}\""
- - ${UPLOAD_TOOL} -a "${LOG_FILE}" -u "${UPLOAD_URL}" "${TOKEN_OPT}"; fi
+ - ${UPLOAD_TOOL} -a "${LOG_FILE}" -u "${UPLOAD_URL}" -t "${TOKEN}"; fi
diff --git a/automated/linux/workload-automation/workload-automation.yaml b/automated/linux/workload-automation/workload-automation.yaml
index 9bc34a5..f4e4f8f 100644
--- a/automated/linux/workload-automation/workload-automation.yaml
+++ b/automated/linux/workload-automation/workload-automation.yaml
@@ -1,6 +1,6 @@
metadata:
name: workload-automation-linux
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Workload Automation on Linux"
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/linux/workload-automation3/workload-automation.yaml b/automated/linux/workload-automation3/workload-automation.yaml
index a82a378..fc6872d 100644
--- a/automated/linux/workload-automation3/workload-automation.yaml
+++ b/automated/linux/workload-automation3/workload-automation.yaml
@@ -1,6 +1,6 @@
metadata:
name: workload-automation-linux
- format: "Lava-Test-Shell Test Definition 1.0"
+ format: "Lava-Test Test Definition 1.0"
description: "Workload Automation on Linux"
maintainer:
- milosz.wasilewski@linaro.org
diff --git a/automated/utils/requirements.txt b/automated/utils/requirements.txt
index ee1a841..0bf76b6 100644
--- a/automated/utils/requirements.txt
+++ b/automated/utils/requirements.txt
@@ -2,3 +2,4 @@ pexpect
pyyaml
requests
pycodestyle
+squad_client>0.5
diff --git a/automated/utils/test-runner.py b/automated/utils/test-runner.py
index 33552b5..1cc9d81 100755
--- a/automated/utils/test-runner.py
+++ b/automated/utils/test-runner.py
@@ -1,10 +1,11 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import argparse
import csv
import cmd
import copy
import json
import logging
+import netrc
import os
import re
import shlex
@@ -18,13 +19,22 @@ from distutils.spawn import find_executable
try:
+ from squad_client.core.api import SquadApi
+ from squad_client.shortcuts import submit_results
+ from squad_client.core.models import Squad
+ from urllib.parse import urlparse
+except ImportError as e:
+ logger = logging.getLogger('RUNNER')
+ logger.warning('squad_client is needed if you want to upload to qa-reports')
+
+
+try:
import pexpect
import yaml
except ImportError as e:
print(e)
print('Please run the below command to install modules required')
- print('Python2: pip install -r ${REPO_PATH}/automated/utils/requirements.txt')
- print('Python3: pip3 install -r ${REPO_PATH}/automated/utils/requirements.txt')
+ print('pip3 install -r ${REPO_PATH}/automated/utils/requirements.txt')
sys.exit(1)
@@ -364,7 +374,7 @@ class AutomatedTestRun(TestRun):
def run(self):
self.logger.info('Executing %s/run.sh' % self.test['test_path'])
shell_cmd = '%s/run.sh 2>&1 | tee %s/stdout.log' % (self.test['test_path'], self.test['test_path'])
- self.child = pexpect.spawn('/bin/sh', ['-c', shell_cmd])
+ self.child = pexpect.spawnu('/bin/sh', ['-c', shell_cmd])
self.check_result()
def check_result(self):
@@ -379,10 +389,7 @@ class AutomatedTestRun(TestRun):
break
try:
self.child.expect('\r\n')
- if sys.version_info[0] < 3:
- print(self.child.before)
- else:
- print(self.child.before.decode('utf-8'))
+ print(self.child.before)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
@@ -421,7 +428,7 @@ class RemoteTestRun(AutomatedTestRun):
shell_cmd = 'ssh %s %s "%s/run.sh 2>&1"' % (SSH_PARAMS, self.args.target, self.test['target_test_path'])
self.logger.debug('shell_cmd: %s' % shell_cmd)
output = open("%s/stdout.log" % self.test['test_path'], "w")
- self.child = pexpect.spawn(shell_cmd)
+ self.child = pexpect.spawnu(shell_cmd)
self.child.logfile = output
self.check_result()
@@ -661,6 +668,19 @@ class ResultParser(object):
self.results['params'] = {}
self.pattern = None
self.fixup = None
+ self.qa_reports_server = args.qa_reports_server
+ if args.qa_reports_token is not None:
+ self.qa_reports_token = args.qa_reports_token
+ else:
+ self.qa_reports_token = os.environ.get("QA_REPORTS_TOKEN", get_token_from_netrc(self.qa_reports_server))
+ self.qa_reports_project = args.qa_reports_project
+ self.qa_reports_group = args.qa_reports_group
+ self.qa_reports_env = args.qa_reports_env
+ self.qa_reports_build_version = args.qa_reports_build_version
+ self.qa_reports_disable_metadata = args.qa_reports_disable_metadata
+ self.qa_reports_metadata = args.qa_reports_metadata
+ self.qa_reports_metadata_file = args.qa_reports_metadata_file
+
with open(os.path.join(self.test['test_path'], "testdef.yaml"), "r") as f:
self.testdef = yaml.safe_load(f)
self.results['name'] = ""
@@ -708,6 +728,7 @@ class ResultParser(object):
self.results['metrics'] = self.metrics
self.dict_to_json()
self.dict_to_csv()
+ self.send_to_qa_reports()
self.logger.info('Result files saved to: %s' % self.test['test_path'])
print('--- Printing result.csv ---')
with open('%s/result.csv' % self.test['test_path']) as f:
@@ -772,6 +793,58 @@ class ResultParser(object):
self.logger.debug('lava-run: cmd: {}'.format(cmd))
subprocess.call(shlex.split(cmd))
+ def send_to_qa_reports(self):
+ if None in (self.qa_reports_server, self.qa_reports_token, self.qa_reports_group, self.qa_reports_project, self.qa_reports_build_version, self.qa_reports_env):
+ self.logger.warning("All parameters for qa reports are not set, results will not be pushed to qa reports")
+ return
+
+ SquadApi.configure(
+ url=self.qa_reports_server, token=self.qa_reports_token
+ )
+ tests = {}
+ metrics = {}
+ for metric in self.metrics:
+ if metric['measurement'] != "":
+ metrics["{}/{}".format(self.test['test_name'], metric['test_case_id'])] = metric['measurement']
+ else:
+ tests["{}/{}".format(self.test['test_name'], metric['test_case_id'])] = metric['result']
+
+ with open("{}/stdout.log".format(self.test['test_path']), "r") as logfile:
+ log = logfile.read()
+
+ metadata = {}
+ if not self.qa_reports_disable_metadata:
+ if self.qa_reports_metadata:
+ metadata.update(self.qa_reports_metadata)
+ if self.qa_reports_metadata_file:
+ try:
+ with open(self.qa_reports_metadata_file, "r") as metadata_file:
+ loaded_metadata = yaml.load(metadata_file, Loader=yaml.SafeLoader)
+ # check if loaded metadata is key=value and both are strings
+ for key, value in loaded_metadata.items():
+ if type(key) == str and type(value) == str:
+ # only update metadata with simple keys
+ # ignore all other items in the dictionary
+ metadata.update({key: value})
+ else:
+ self.logger.warning("Ignoring key: %s" % key)
+ except FileNotFoundError:
+ self.logger.warning("Metadata file not found")
+ except PermissionError:
+ self.logger.warning("Insufficient permissions to open metadata file")
+ if submit_results(
+ group_project_slug="{}/{}".format(self.qa_reports_group, self.qa_reports_project),
+ build_version=self.qa_reports_build_version,
+ env_slug=self.qa_reports_env,
+ tests=tests,
+ metrics=metrics,
+ log=log,
+ metadata=metadata,
+ attachments=None):
+ self.logger.info("Results pushed to QA Reports")
+ else:
+ self.logger.warning("Results upload to QA Reports failed!")
+
def dict_to_json(self):
# Save test results to output/test_id/result.json
with open('%s/result.json' % self.test['test_path'], 'w') as f:
@@ -818,6 +891,19 @@ class ResultParser(object):
writer.writerow(metric)
+def get_token_from_netrc(qa_reports_server):
+ if qa_reports_server is None:
+ return
+ parse = urlparse(qa_reports_server)
+ netrc_local = netrc.netrc()
+ authTokens = netrc_local.authenticators("{}".format(parse.netloc))
+ if authTokens is not None:
+ hostname, username, authToken = authTokens
+ return authToken
+ # Unable to find Token hence returning None
+ return
+
+
def get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-o', '--output', default=os.getenv("HOME", "") + '/output', dest='output',
@@ -876,6 +962,65 @@ def get_args():
'''))
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
default=False, help='Set log level to DEBUG.')
+ parser.add_argument(
+ "--qa-reports-server",
+ dest="qa_reports_server",
+ default=None,
+ help="qa reports server where the results have to be sent",
+ )
+ parser.add_argument(
+ "--qa-reports-token",
+ dest="qa_reports_token",
+ default=None,
+ help="qa reports token to upload the results to qa_reports_server",
+ )
+ parser.add_argument(
+ "--qa-reports-project",
+ dest="qa_reports_project",
+ default=None,
+ help="qa reports projects to which the results have to be uploaded",
+ )
+ parser.add_argument(
+ "--qa-reports-group",
+ dest="qa_reports_group",
+ default=None,
+ help="qa reports group in which the results have to be stored",
+ )
+ parser.add_argument(
+ "--qa-reports-env",
+ dest="qa_reports_env",
+ default=None,
+ help="qa reports environment for the results that have to be stored",
+ )
+ parser.add_argument(
+ "--qa-reports-build-version",
+ dest="qa_reports_build_version",
+ default=None,
+ help="qa reports build id for the result set",
+ )
+ parser.add_argument(
+ "--qa-reports-disable-metadata",
+ dest="qa_reports_disable_metadata",
+ default=False,
+ action='store_true',
+ help="Disable sending metadata to SQUAD. Default: false",
+ )
+ parser.add_argument(
+ "--qa-reports-metadata",
+ dest="qa_reports_metadata",
+ default={},
+ action=StoreDictKeyPair,
+ nargs="+",
+ metavar="KEY=VALUE",
+ help="List of metadata key=value pairs to be sent to SQUAD",
+ )
+ parser.add_argument(
+ "--qa-reports-metadata-file",
+ dest="qa_reports_metadata_file",
+ default=None,
+ help="YAML file that defines metadata to be reported to SQUAD",
+ )
+
args = parser.parse_args()
return args