All of lore.kernel.org
 help / color / mirror / Atom feed
From: Markus Lehtonen <markus.lehtonen@linux.intel.com>
To: openembedded-core@lists.openembedded.org
Subject: [PATCH 2/9] oeqa.buildperf: derive BuildPerfTestCase class from unitest.TestCase
Date: Fri, 12 Aug 2016 12:11:19 +0300	[thread overview]
Message-ID: <1470993086-23718-3-git-send-email-markus.lehtonen@linux.intel.com> (raw)
In-Reply-To: <1470993086-23718-1-git-send-email-markus.lehtonen@linux.intel.com>

Rename BuildPerfTest to BuildPerfTestCase and convert it to be derived
from TestCase class from the unittest framework of the Python standard
library. This doesn't work with our existing testcases or test runner
class and these need to be modified, too.

Signed-off-by: Markus Lehtonen <markus.lehtonen@linux.intel.com>
---
 meta/lib/oeqa/buildperf/__init__.py |  4 ++-
 meta/lib/oeqa/buildperf/base.py     | 67 +++++++++++++++++--------------------
 2 files changed, 33 insertions(+), 38 deletions(-)

diff --git a/meta/lib/oeqa/buildperf/__init__.py b/meta/lib/oeqa/buildperf/__init__.py
index c816bd2..add3be2 100644
--- a/meta/lib/oeqa/buildperf/__init__.py
+++ b/meta/lib/oeqa/buildperf/__init__.py
@@ -10,6 +10,8 @@
 # more details.
 #
 """Build performance tests"""
-from .base import (perf_test_case, BuildPerfTest, BuildPerfTestRunner,
+from .base import (perf_test_case,
+                   BuildPerfTestCase,
+                   BuildPerfTestRunner,
                    KernelDropCaches)
 from .test_basic import *
diff --git a/meta/lib/oeqa/buildperf/base.py b/meta/lib/oeqa/buildperf/base.py
index 527563b..5b4c37c 100644
--- a/meta/lib/oeqa/buildperf/base.py
+++ b/meta/lib/oeqa/buildperf/base.py
@@ -19,6 +19,7 @@ import socket
 import tempfile
 import time
 import traceback
+import unittest
 from datetime import datetime, timedelta
 
 from oeqa.utils.commands import runCmd, get_bb_vars
@@ -191,50 +192,34 @@ def perf_test_case(obj):
     return obj
 
 
-class BuildPerfTest(object):
+class BuildPerfTestCase(unittest.TestCase):
     """Base class for build performance tests"""
     SYSRES = 'sysres'
     DISKUSAGE = 'diskusage'
 
-    name = None
-    description = None
-
-    def __init__(self, out_dir):
-        self.out_dir = out_dir
-        self.results = {'name':self.name,
-                        'description': self.description,
-                        'status': 'NOTRUN',
-                        'start_time': None,
-                        'elapsed_time': None,
-                        'measurements': []}
-        if not os.path.exists(self.out_dir):
-            os.makedirs(self.out_dir)
-        if not self.name:
-            self.name = self.__class__.__name__
+    def __init__(self, *args, **kwargs):
+        super(BuildPerfTestCase, self).__init__(*args, **kwargs)
+        self.name = self._testMethodName
+        self.out_dir = None
+        self.start_time = None
+        self.elapsed_time = None
+        self.measurements = []
         self.bb_vars = get_bb_vars()
-        # TODO: remove the _failed flag when globalres.log is ditched as all
-        # failures should raise an exception
-        self._failed = False
-        self.cmd_log = os.path.join(self.out_dir, 'commands.log')
+        # TODO: remove 'times' and 'sizes' arrays when globalres support is
+        # removed
+        self.times = []
+        self.sizes = []
 
-    def run(self):
+    def run(self, *args, **kwargs):
         """Run test"""
-        self.results['status'] = 'FAILED'
-        self.results['start_time'] = datetime.now()
-        self._run()
-        self.results['elapsed_time'] = (datetime.now() -
-                                        self.results['start_time'])
-        # Test is regarded as completed if it doesn't raise an exception
-        if not self._failed:
-            self.results['status'] = 'COMPLETED'
-
-    def _run(self):
-        """Actual test payload"""
-        raise NotImplementedError
+        self.start_time = datetime.now()
+        super(BuildPerfTestCase, self).run(*args, **kwargs)
+        self.elapsed_time = datetime.now() - self.start_time
 
     def log_cmd_output(self, cmd):
         """Run a command and log it's output"""
-        with open(self.cmd_log, 'a') as fobj:
+        cmd_log = os.path.join(self.out_dir, 'commands.log')
+        with open(cmd_log, 'a') as fobj:
             runCmd(cmd, stdout=fobj)
 
     def measure_cmd_resources(self, cmd, name, legend):
@@ -251,7 +236,8 @@ class BuildPerfTest(object):
 
         cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
         log.info("Timing command: %s", cmd_str)
-        with open(self.cmd_log, 'a') as fobj:
+        cmd_log = os.path.join(self.out_dir, 'commands.log')
+        with open(cmd_log, 'a') as fobj:
             ret, timedata = time_cmd(cmd, stdout=fobj)
         if ret.status:
             log.error("Time will be reported as 0. Command failed: %s",
@@ -266,12 +252,17 @@ class BuildPerfTest(object):
                        'name': name,
                        'legend': legend}
         measurement['values'] = {'elapsed_time': etime}
-        self.results['measurements'].append(measurement)
+        self.measurements.append(measurement)
+        e_sec = etime.total_seconds()
         nlogs = len(glob.glob(self.out_dir + '/results.log*'))
         results_log = os.path.join(self.out_dir,
                                    'results.log.{}'.format(nlogs + 1))
         with open(results_log, 'w') as fobj:
             fobj.write(timedata)
+        # Append to 'times' array for globalres log
+        self.times.append('{:d}:{:02d}:{:.2f}'.format(int(e_sec / 3600),
+                                                      int((e_sec % 3600) / 60),
+                                                       e_sec % 60))
 
     def measure_disk_usage(self, path, name, legend):
         """Estimate disk usage of a file or directory"""
@@ -289,7 +280,9 @@ class BuildPerfTest(object):
                        'name': name,
                        'legend': legend}
         measurement['values'] = {'size': size}
-        self.results['measurements'].append(measurement)
+        self.measurements.append(measurement)
+        # Append to 'sizes' array for globalres log
+        self.sizes.append(str(size))
 
     def save_buildstats(self):
         """Save buildstats"""
-- 
2.6.6



  parent reply	other threads:[~2016-08-12  9:11 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-12  9:11 [PATCH 0/9] oe-build-perf-test: use Python unittest framework Markus Lehtonen
2016-08-12  9:11 ` [PATCH 1/9] oeqa.buildperf: rename module containing basic tests Markus Lehtonen
2016-08-12  9:11 ` Markus Lehtonen [this message]
2016-08-12  9:11 ` [PATCH 3/9] oeqa.buildperf: add BuildPerfTestLoader class Markus Lehtonen
2016-08-12  9:11 ` [PATCH 4/9] oeqa.buildperf: add BuildPerfTestResult class Markus Lehtonen
2016-08-12  9:11 ` [PATCH 5/9] oeqa.buildperf: convert test cases to unittest Markus Lehtonen
2016-08-12  9:11 ` [PATCH 6/9] oe-build-perf-test: use new unittest based framework Markus Lehtonen
2016-08-12  9:11 ` [PATCH 7/9] oeqa.buildperf: introduce runCmd2() Markus Lehtonen
2016-08-12  9:11 ` [PATCH 8/9] oe-build-perf-test: write logger output into file only Markus Lehtonen
2016-08-12  9:11 ` [PATCH 9/9] oeqa.buildperf: be more verbose about failed commands Markus Lehtonen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1470993086-23718-3-git-send-email-markus.lehtonen@linux.intel.com \
    --to=markus.lehtonen@linux.intel.com \
    --cc=openembedded-core@lists.openembedded.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.