* [v2][PATCH 0/2] refactor ptest.py
@ 2019-05-16 16:06 Armin Kuster
2019-05-16 16:06 ` [PATCH 1/2] runtime ptest: refactor for future work Armin Kuster
2019-05-16 16:07 ` [PATCH 2/2] runtime ptest: run each ptest separately Armin Kuster
0 siblings, 2 replies; 3+ messages in thread
From: Armin Kuster @ 2019-05-16 16:06 UTC (permalink / raw)
To: openembedded-core
Sending out as a set.
Ran fine on AB for fast and full ptest.
Armin Kuster (2):
runtime ptest: refactor for future work
runtime ptest: run each ptest separately
meta/lib/oeqa/runtime/cases/ptest.py | 122 ++++++++++++++++++---------
1 file changed, 84 insertions(+), 38 deletions(-)
--
2.17.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 1/2] runtime ptest: refactor for future work
2019-05-16 16:06 [v2][PATCH 0/2] refactor ptest.py Armin Kuster
@ 2019-05-16 16:06 ` Armin Kuster
2019-05-16 16:07 ` [PATCH 2/2] runtime ptest: run each ptest separately Armin Kuster
1 sibling, 0 replies; 3+ messages in thread
From: Armin Kuster @ 2019-05-16 16:06 UTC (permalink / raw)
To: openembedded-core
This in preparation for being able to run and capture each
ptest separately.
moves things to setup and finish functions
breaks a few checks out into their own tests.
no change in result tool output.
Recipe | Passed | Failed | Skipped | Time(s)
-------------------------------------------------
openssh | 0 | 0 | 1 | 1
openssl | 146 | 1 | 8 | 68
python | 0 | 0 | 0 | 22
[v2]
remove unneeded check
move @unittest check to test_ptestrunner
Signed-off-by: Armin Kuster <akuster808@gmail.com>
---
meta/lib/oeqa/runtime/cases/ptest.py | 104 +++++++++++++++++----------
1 file changed, 66 insertions(+), 38 deletions(-)
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py
index d8d1e1b344f..fa95fefe011 100644
--- a/meta/lib/oeqa/runtime/cases/ptest.py
+++ b/meta/lib/oeqa/runtime/cases/ptest.py
@@ -12,59 +12,90 @@ from oeqa.core.decorator.data import skipIfNotFeature
from oeqa.runtime.decorator.package import OEHasPackage
from oeqa.utils.logparser import PtestParser
-class PtestRunnerTest(OERuntimeTestCase):
- @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
- @OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['ptest-runner'])
- @unittest.expectedFailure
- def test_ptestrunner(self):
- status, output = self.target.run('which ptest-runner', 0)
- if status != 0:
- self.skipTest("No -ptest packages are installed in the image")
+class PTestBase(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.ptest_startup()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.ptest_finishup()
- test_log_dir = self.td.get('TEST_LOG_DIR', '')
+ @classmethod
+ def ptest_startup(cls):
+ cls.failmsg = ""
+
+ cls.test_log_dir = cls.td.get('TEST_LOG_DIR', '')
# The TEST_LOG_DIR maybe NULL when testimage is added after
# testdata.json is generated.
- if not test_log_dir:
- test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
+ if not cls.test_log_dir:
+ cls.test_log_dir = os.path.join(cls.td.get('WORKDIR', ''), 'testimage')
# Don't use self.td.get('DATETIME'), it's from testdata.json, not
# up-to-date, and may cause "File exists" when re-reun.
+
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
- ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
- ptest_log_dir = '%s.%s' % (ptest_log_dir_link, timestamp)
- ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')
+ cls.ptest_log_dir_link = os.path.join(cls.test_log_dir, 'ptest_log')
+ cls.ptest_log_dir = '%s.%s' % (cls.ptest_log_dir_link, timestamp)
+ cls.ptest_runner_log = os.path.join(cls.ptest_log_dir, 'ptest-runner.log')
- status, output = self.target.run('ptest-runner', 0)
- os.makedirs(ptest_log_dir)
- with open(ptest_runner_log, 'w') as f:
- f.write(output)
+ os.makedirs(cls.ptest_log_dir)
- # status != 0 is OK since some ptest tests may fail
- self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
+ if not hasattr(cls.tc, "extraresults"):
+ cls.tc.extraresults = {}
- if not hasattr(self.tc, "extraresults"):
- self.tc.extraresults = {}
- extras = self.tc.extraresults
- extras['ptestresult.rawlogs'] = {'log': output}
+ cls.extras = cls.tc.extraresults
+
+ @classmethod
+ def ptest_finishup(cls):
+
+ # update symlink to ptest_log
+ if os.path.exists(cls.ptest_log_dir_link):
+ # Remove the old link to create a new one
+ os.remove(cls.ptest_log_dir_link)
+ os.symlink(os.path.basename(cls.ptest_log_dir), cls.ptest_log_dir_link)
+
+ if cls.failmsg:
+ cls.fail(cls.failmsg)
+
+class PtestRunnerTest(PTestBase):
+
+ @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['ptest-runner'])
+ def test_ptestrunner_check(self):
+ status, output = self.target.run('which ptest-runner')
+ msg = 'ptest-runner not installed . %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['ptest.PtestRunnerTest.test_ptestrunner_check'])
+ def test_ptests_installed(self):
+ status, output = self.target.run('ptest-runner -l')
+ msg = 'No ptests found. %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['ptest.PtestRunnerTest.test_ptests_installed'])
+ @unittest.expectedFailure
+ def test_ptestrunner(self):
+ status, output = self.target.run('ptest-runner', 0)
+ with open(self.ptest_runner_log, 'w') as f:
+ f.write(output)
# Parse and save results
parser = PtestParser()
- results, sections = parser.parse(ptest_runner_log)
- parser.results_as_files(ptest_log_dir)
- if os.path.exists(ptest_log_dir_link):
- # Remove the old link to create a new one
- os.remove(ptest_log_dir_link)
- os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
+ results, sections = parser.parse(self.ptest_runner_log)
+ parser.results_as_files(self.ptest_log_dir)
- extras['ptestresult.sections'] = sections
+ self.extras['ptestresult.rawlogs'] = {'log': output}
+ self.extras['ptestresult.sections'] = sections
trans = str.maketrans("()", "__")
for section in results:
for test in results[section]:
result = results[section][test]
testname = "ptestresult." + (section or "No-section") + "." + "_".join(test.translate(trans).split())
- extras[testname] = {'status': result}
+ self.extras[testname] = {'status': result}
failed_tests = {}
for section in results:
@@ -72,13 +103,10 @@ class PtestRunnerTest(OERuntimeTestCase):
if failed_testcases:
failed_tests[section] = failed_testcases
- failmsg = ""
status, output = self.target.run('dmesg | grep "Killed process"', 0)
if output:
- failmsg = "ERROR: Processes were killed by the OOM Killer:\n%s\n" % output
+ self.failmsg = "ERROR: Processes were killed by the OOM Killer:\n%s\n" % output
if failed_tests:
- failmsg = failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
+ self.failmsg = self.failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
- if failmsg:
- self.fail(failmsg)
--
2.17.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 2/2] runtime ptest: run each ptest separately
2019-05-16 16:06 [v2][PATCH 0/2] refactor ptest.py Armin Kuster
2019-05-16 16:06 ` [PATCH 1/2] runtime ptest: refactor for future work Armin Kuster
@ 2019-05-16 16:07 ` Armin Kuster
1 sibling, 0 replies; 3+ messages in thread
From: Armin Kuster @ 2019-05-16 16:07 UTC (permalink / raw)
To: openembedded-core
resulttool still working
Recipe | Passed | Failed | Skipped | Time(s)
------------------------------------------------
openssh | 0 | 0 | 1 | 1
openssl | 146 | 1 | 8 | 80
python | 0 | 0 | 0 | 21
[v2]
Lets pass ptest name to ptest-runner in the loop
Add check if test times out
Add saving raw info on per test bases {ptest}-raw.log.
Signed-off-by: Armin Kuster <akuster808@gmail.com>
---
meta/lib/oeqa/runtime/cases/ptest.py | 64 ++++++++++++++++++----------
1 file changed, 41 insertions(+), 23 deletions(-)
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py
index fa95fefe011..de24a3586d4 100644
--- a/meta/lib/oeqa/runtime/cases/ptest.py
+++ b/meta/lib/oeqa/runtime/cases/ptest.py
@@ -5,6 +5,7 @@
import unittest
import pprint
import datetime
+import bb
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
@@ -26,6 +27,7 @@ class PTestBase(OERuntimeTestCase):
@classmethod
def ptest_startup(cls):
cls.failmsg = ""
+ cls.ptests = []
cls.test_log_dir = cls.td.get('TEST_LOG_DIR', '')
# The TEST_LOG_DIR maybe NULL when testimage is added after
@@ -46,6 +48,8 @@ class PTestBase(OERuntimeTestCase):
cls.tc.extraresults = {}
cls.extras = cls.tc.extraresults
+ cls.extras['ptestresult.rawlogs'] = {'log': ""}
+ cls.extras['ptestresult.sections'] = {}
@classmethod
def ptest_finishup(cls):
@@ -60,35 +64,23 @@ class PTestBase(OERuntimeTestCase):
cls.fail(cls.failmsg)
class PtestRunnerTest(PTestBase):
-
- @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
- @OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['ptest-runner'])
- def test_ptestrunner_check(self):
- status, output = self.target.run('which ptest-runner')
- msg = 'ptest-runner not installed . %s' % output
- self.assertEqual(status, 0, msg=msg)
-
- @OETestDepends(['ptest.PtestRunnerTest.test_ptestrunner_check'])
- def test_ptests_installed(self):
- status, output = self.target.run('ptest-runner -l')
- msg = 'No ptests found. %s' % output
- self.assertEqual(status, 0, msg=msg)
-
- @OETestDepends(['ptest.PtestRunnerTest.test_ptests_installed'])
- @unittest.expectedFailure
- def test_ptestrunner(self):
- status, output = self.target.run('ptest-runner', 0)
- with open(self.ptest_runner_log, 'w') as f:
+ def run_ptest(self, ptest):
+ status, output = self.target.run('ptest-runner %s' % ptest, 0)
+ ptest_raw_log = os.path.join(self.ptest_log_dir, "%s-raw.log" % ptest)
+ with open(ptest_raw_log, 'w') as f:
f.write(output)
# Parse and save results
parser = PtestParser()
- results, sections = parser.parse(self.ptest_runner_log)
+ results, sections = parser.parse(ptest_raw_log)
parser.results_as_files(self.ptest_log_dir)
- self.extras['ptestresult.rawlogs'] = {'log': output}
- self.extras['ptestresult.sections'] = sections
+ self.extras['ptestresult.rawlogs']['log'] = self.extras['ptestresult.rawlogs']['log'] + output
+ try:
+ self.extras['ptestresult.sections'][ptest] = sections[ptest]
+ except KeyError:
+ bb.warn("ptest %s timedout or crashed for some reason. Check the log: %s" % (ptest, self.ptest_log_dir))
+ return
trans = str.maketrans("()", "__")
for section in results:
@@ -110,3 +102,29 @@ class PtestRunnerTest(PTestBase):
if failed_tests:
self.failmsg = self.failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
+
+ @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['ptest-runner'])
+ def test_ptestrunner_check(self):
+ status, output = self.target.run('which ptest-runner')
+ msg = 'ptest-runner not installed . %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestDepends(['ptest.PtestRunnerTest.test_ptestrunner_check'])
+ def test_ptests_installed(self):
+ status, output = self.target.run('ptest-runner -l')
+ msg = 'No ptests found. %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ # built ptest list
+ for ptest in output.split("\n"):
+ if ptest.startswith("Available"):
+ continue
+ self.ptests.append(ptest.split()[0])
+
+ @OETestDepends(['ptest.PtestRunnerTest.test_ptests_installed'])
+ @unittest.expectedFailure
+ def test_ptestrunner(self):
+ for ptest in self.ptests:
+ self.run_ptest(ptest)
--
2.17.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
end of thread, other threads:[~2019-05-16 16:07 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-16 16:06 [v2][PATCH 0/2] refactor ptest.py Armin Kuster
2019-05-16 16:06 ` [PATCH 1/2] runtime ptest: refactor for future work Armin Kuster
2019-05-16 16:07 ` [PATCH 2/2] runtime ptest: run each ptest separately Armin Kuster
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.