All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] oeqa: Added package installer to oetest to aid in future automatic install of packages
@ 2014-08-29 13:51 Lucian Musat
  2014-08-29 13:51 ` [PATCH 2/3] oeqa/utils/logparser.py: results based log parser utility Lucian Musat
  2014-08-29 13:51 ` [PATCH 3/3] oeqa/runtime: Automatic test for ptest Lucian Musat
  0 siblings, 2 replies; 5+ messages in thread
From: Lucian Musat @ 2014-08-29 13:51 UTC (permalink / raw)
  To: openembedded-core

Signed-off-by: Lucian Musat <georgex.l.musat@intel.com>
---
 meta/lib/oeqa/oetest.py | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/meta/lib/oeqa/oetest.py b/meta/lib/oeqa/oetest.py
index ed8b3b2..0b7e7dc 100644
--- a/meta/lib/oeqa/oetest.py
+++ b/meta/lib/oeqa/oetest.py
@@ -66,6 +66,13 @@ class oeRuntimeTest(oeTest):
         self.target = oeRuntimeTest.tc.target
         super(oeRuntimeTest, self).__init__(methodName)
 
+    #TODO: use package_manager.py to install packages on any type of image
+    def install_packages(self, packagelist):
+        for package in packagelist:
+            (status, result) = self.target.run("smart install -y "+package)
+            if status != 0:
+                return status
+
 class oeSDKTest(oeTest):
     def __init__(self, methodName='runTest'):
         self.sdktestdir = oeSDKTest.tc.sdktestdir
-- 
1.9.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/3] oeqa/utils/logparser.py: results based log parser utility
  2014-08-29 13:51 [PATCH 1/3] oeqa: Added package installer to oetest to aid in future automatic install of packages Lucian Musat
@ 2014-08-29 13:51 ` Lucian Musat
  2014-08-29 13:51 ` [PATCH 3/3] oeqa/runtime: Automatic test for ptest Lucian Musat
  1 sibling, 0 replies; 5+ messages in thread
From: Lucian Musat @ 2014-08-29 13:51 UTC (permalink / raw)
  To: openembedded-core

A module for parsing results based logs like ptest, compliance and performance.
Supports breaking the logs into multiple sections and also provides a result object to use the parser with.
The parser is initialized with the regex required to identify results and section statements in the target log file.

Signed-off-by: Corneliu Stoicescu <corneliux.stoicescu@intel.com>
Signed-off-by: Lucian Musat <georgex.l.musat@intel.com>
---
 meta/lib/oeqa/utils/logparser.py | 126 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 126 insertions(+)
 create mode 100644 meta/lib/oeqa/utils/logparser.py

diff --git a/meta/lib/oeqa/utils/logparser.py b/meta/lib/oeqa/utils/logparser.py
new file mode 100644
index 0000000..9dc7827
--- /dev/null
+++ b/meta/lib/oeqa/utils/logparser.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import re
+
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'meta/lib')))
+import oeqa.utils.ftools as ftools
+
+
+# A parser that can be used to identify weather a line is a test result or a section statement.
+class Lparser(object):
+
+    def __init__(self, test_0_pass_regex, test_0_fail_regex, section_0_begin_regex=None, section_0_end_regex=None, **kwargs):
+        # Initialize the arguments dictionary
+        if kwargs:
+            self.args = kwargs
+        else:
+            self.args = {}
+
+        # Add the default args to the dictionary
+        self.args['test_0_pass_regex'] = test_0_pass_regex
+        self.args['test_0_fail_regex'] = test_0_fail_regex
+        if section_0_begin_regex:
+            self.args['section_0_begin_regex'] = section_0_begin_regex
+        if section_0_end_regex:
+            self.args['section_0_end_regex'] = section_0_end_regex
+
+        self.test_possible_status = ['pass', 'fail', 'error']
+        self.section_possible_status = ['begin', 'end']
+
+        self.initialized = False
+
+
+    # Initialize the parser with the current configuration
+    def init(self):
+
+        # extra arguments can be added by the user to define new test and section categories. They must follow a pre-defined pattern: <type>_<category_name>_<status>_regex
+        self.test_argument_pattern = "^test_(.+?)_(%s)_regex" % '|'.join(map(str, self.test_possible_status))
+        self.section_argument_pattern = "^section_(.+?)_(%s)_regex" % '|'.join(map(str, self.section_possible_status))
+
+        # Initialize the test and section regex dictionaries
+        self.test_regex = {}
+        self.section_regex ={}
+
+        for arg, value in self.args.items():
+            if not value:
+                raise Exception('The value of provided argument %s is %s. Should have a valid value.' % (key, value))
+            is_test =  re.search(self.test_argument_pattern, arg)
+            is_section = re.search(self.section_argument_pattern, arg)
+            if is_test:
+                if not is_test.group(1) in self.test_regex:
+                    self.test_regex[is_test.group(1)] = {}
+                self.test_regex[is_test.group(1)][is_test.group(2)] = re.compile(value)
+            elif is_section:
+                if not is_section.group(1) in self.section_regex:
+                    self.section_regex[is_section.group(1)] = {}
+                self.section_regex[is_section.group(1)][is_section.group(2)] = re.compile(value)
+            else:
+                # TODO: Make these call a traceback instead of a simple exception..
+                raise Exception("The provided argument name does not correspond to any valid type. Please give one of the following types:\nfor tests: %s\nfor sections: %s" % (self.test_argument_pattern, self.section_argument_pattern))
+
+        self.initialized = True
+
+    # Parse a line and return a tuple containing the type of result (test/section) and its category, status and name
+    def parse_line(self, line):
+        if not self.initialized:
+            raise Exception("The parser is not initialized..")
+
+        for test_category, test_status_list in self.test_regex.items():
+            for test_status, status_regex in test_status_list.items():
+                test_name = status_regex.search(line)
+                if test_name:
+                    return ['test', test_category, test_status, test_name.group(1)]
+
+        for section_category, section_status_list in self.section_regex.items():
+            for section_status, status_regex in section_status_list.items():
+                section_name = status_regex.search(line)
+                if section_name:
+                    return ['section', section_category, section_status, section_name.group(1)]
+        return None
+
+
+class Result(object):
+
+    def __init__(self):
+        self.result_dict = {}
+
+    def store(self, section, test, status):
+        if not section in self.result_dict:
+            self.result_dict[section] = []
+
+        self.result_dict[section].append((test, status))
+
+    # sort tests by the test name(the first element of the tuple), for each section. This can be helpful when using git to diff for changes by making sure they are always in the same order.
+    def sort_tests(self):
+        for package in self.result_dict:
+            sorted_results = sorted(self.result_dict[package], key=lambda tup: tup[0])
+            self.result_dict[package] = sorted_results
+
+    # Log the results as files. The file name is the section name and the contents are the tests in that section.
+    def log_as_files(self, target_dir, test_status):
+        status_regex = re.compile('|'.join(map(str, test_status)))
+        if not type(test_status) == type([]):
+            raise Exception("test_status should be a list. Got " + str(test_status) + " instead.")
+        if not os.path.exists(target_dir):
+            raise Exception("Target directory does not exist: %s" % target_dir)
+
+        for section, test_results in self.result_dict.items():
+            prefix = ''
+            for x in test_status:
+                prefix +=x+'.'
+            prefix += section
+            section_file = os.path.join(target_dir, prefix)
+            # purge the file contents if it exists
+            open(section_file, 'w').close()
+            for test_result in test_results:
+                (test_name, status) = test_result
+                # we log only the tests with status in the test_status list
+                match_status = status_regex.search(status)
+                if match_status:
+                    ftools.append_file(section_file, status + ": " + test_name)
+
+    # Not yet implemented!
+    def log_to_lava(self):
+        pass
\ No newline at end of file
-- 
1.9.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/3] oeqa/runtime: Automatic test for ptest
  2014-08-29 13:51 [PATCH 1/3] oeqa: Added package installer to oetest to aid in future automatic install of packages Lucian Musat
  2014-08-29 13:51 ` [PATCH 2/3] oeqa/utils/logparser.py: results based log parser utility Lucian Musat
@ 2014-08-29 13:51 ` Lucian Musat
  2014-08-29 15:37   ` Richard Purdie
  1 sibling, 1 reply; 5+ messages in thread
From: Lucian Musat @ 2014-08-29 13:51 UTC (permalink / raw)
  To: openembedded-core; +Cc: Stefan Stanacar

For images without ptest the packages are automatically installed alongside ptest-runner. Log results are saved in ./results folder.
No cleanup is done for packages after the test is finished.

Signed-off-by: Stefan Stanacar <stefanx.stanacar@intel.com>
Signed-off-by: Lucian Musat <georgex.l.musat@intel.com>
---
 meta/lib/oeqa/runtime/_ptest.py | 140 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 140 insertions(+)
 create mode 100644 meta/lib/oeqa/runtime/_ptest.py

diff --git a/meta/lib/oeqa/runtime/_ptest.py b/meta/lib/oeqa/runtime/_ptest.py
new file mode 100644
index 0000000..cd1e4ad
--- /dev/null
+++ b/meta/lib/oeqa/runtime/_ptest.py
@@ -0,0 +1,140 @@
+import unittest, os, shutil
+from oeqa.oetest import oeRuntimeTest, skipModule
+from oeqa.utils.decorators import *
+from oeqa.utils.logparser import *
+from oeqa.utils.httpserver import HTTPService
+from difflib import SequenceMatcher
+
+def setUpModule():
+    if not oeRuntimeTest.hasFeature("package-management"):
+        skipModule("Image doesn't have package management feature")
+    if not oeRuntimeTest.hasPackage("smart"):
+        skipModule("Image doesn't have smart installed")
+    if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
+        skipModule("Rpm is not the primary package manager")
+
+class PtestRunnerTest(oeRuntimeTest):
+
+    # a ptest log parser
+    def parse_ptest(logfile):
+        parser = Lparser(test_0_pass_regex="^PASS:(.+)", test_0_fail_regex="^FAIL:(.+)", section_0_begin_regex="^BEGIN: .*/(.+)/ptest", section_0_end_regex="^END: .*/(.+)/ptest")
+        parser.init()
+        result = Result()
+
+        with open(logfile) as f:
+            for line in f:
+                result_tuple = parser.parse_line(line)
+                if not result_tuple:
+                    continue
+                result_tuple = line_type, category, status, name = parser.parse_line(line)
+
+                if line_type == 'section' and status == 'begin':
+                    current_section = name
+                    continue
+
+                if line_type == 'section' and status == 'end':
+                    current_section = None
+                    continue
+
+                if line_type == 'test' and status == 'pass':
+                    result.store(current_section, name, status)
+                    continue
+
+                if line_type == 'test' and status == 'fail':
+                    result.store(current_section, name, status)
+                    continue
+
+        result.sort_tests()
+        return result
+
+    @classmethod
+    def setUpClass(self):
+        #note the existing channels that are on the board before creating new ones
+        self.existingchannels = set()
+        (status, result) = oeRuntimeTest.tc.target.run('smart channel --show | grep "\["', 0)
+        for x in result.split("\n"):
+            self.existingchannels.add(x)
+        self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip)
+        self.repo_server.start()
+        (status, result) = oeRuntimeTest.tc.target.run('smart query', 0)
+        self.packagelist = result
+        if self.packagelist == "":
+            raise AssertionError("Cannot get package list!")
+        self.pkglist = self.packagelist.split("\n")
+
+    @classmethod
+    def tearDownClass(self):
+        self.repo_server.stop()
+        #remove created channels to be able to repeat the tests on same image
+        (status, result) = oeRuntimeTest.tc.target.run('smart channel --show | grep "\["', 0)
+        for x in result.split("\n"):
+            if x not in self.existingchannels:
+                oeRuntimeTest.tc.target.run('smart channel --remove '+x[1:-1]+' -y', 0)
+
+    def add_smart_channel(self):
+        image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
+        deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype)
+        pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
+        for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)):
+            if arch in pkgarchs:
+                self.target.run('smart channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url), 0)
+        self.target.run('smart update', 0)
+
+    def get_ptest_packages(self):
+        pkgs = set()
+        image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
+        pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
+        for arch in pkgarchs:
+            folder = self.repo_server.root_dir+"/"+image_pkgtype+'/'+arch
+            if (os.path.isdir(folder)):
+                for fil in os.listdir(folder):
+                    if ("ptest" in str(fil) and "ptest-runner" not in str(fil)):
+                        #get all the packages with -ptest in the name and remove ptest from them for future comparisons 
+                        filez = "".join(str(fil).split("-ptest"))
+                        rootfilez = ".".join(filez.split(".")[:-2])
+                        filex = str(fil).split("-ptest")[0]
+                        #compare with all the packages installed on the board and get a list of potential matches
+                        if filex in self.packagelist:
+                            i = 0
+                            matches = []
+                            while i<len(self.pkglist):
+                                if filex in self.pkglist[i]:
+                                    matches.append(self.pkglist[i])
+                                i +=1
+                            for i in matches:
+                                #sometimes package names differ from corresponding ptest package names (ex. libacl1 != acl-ptest) so we use the Source field from smart info to compare
+                                (status, result) = self.target.run("smart info "+i,0)
+                                rootpkg = re.search("(.*Source:.*)", result).group(1).split(":")[1][1:]
+                                #even source package names mai differ a little so we do a fuzzy string match (ex. libz-1.2.8-r0 -> zlib-1.2.8-r0)
+                                m = SequenceMatcher(None, rootpkg, rootfilez)
+                                if (m.ratio > 0.9):
+                                    filey = "".join(str(fil).split("-ptest")[0])+"-ptest"
+                                    pkgs.add(filey)
+        if str(pkgs) == "set([])":
+            raise AssertionError("Cannot get ptest packages to install!")
+        pkgs.add("ptest-runner")
+        return pkgs
+
+    def setUp(self):
+        self.buildhist_dir = oeRuntimeTest.tc.d.getVar("BUILDHISTORY_DIR_IMAGE", True)
+        self.assertTrue(os.path.exists(self.buildhist_dir))
+        self.ptest_log = os.path.join(oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR",True), "ptest-%s.log" % oeRuntimeTest.tc.d.getVar('DATETIME', True))
+
+    @skipUnlessPassed('test_ssh')
+    def test_ptestrunner(self):
+        self.add_smart_channel()
+        self.install_packages(list(self.get_ptest_packages()))
+
+        self.target.run('/usr/bin/ptest-runner > /tmp/ptest.log 2>&1', 0)
+        self.target.copy_from('/tmp/ptest.log', self.ptest_log)
+        shutil.copyfile(self.ptest_log, os.path.join(self.buildhist_dir, "ptest.log"))
+
+        result = self.parse_ptest(os.path.join(self.buildhist_dir, "ptest.log"))
+        log_results_to_location = os.path.join('./results')
+        if not os.path.exists(log_results_to_location):
+            os.makedirs(log_results_to_location)
+
+        # clear the results directory each time
+        for path in os.listdir(log_results_to_location):
+            os.remove(os.path.join(log_results_to_location, path))
+        result.log_as_files(log_results_to_location, test_status = ['fail'])
-- 
1.9.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 3/3] oeqa/runtime: Automatic test for ptest
  2014-08-29 13:51 ` [PATCH 3/3] oeqa/runtime: Automatic test for ptest Lucian Musat
@ 2014-08-29 15:37   ` Richard Purdie
  0 siblings, 0 replies; 5+ messages in thread
From: Richard Purdie @ 2014-08-29 15:37 UTC (permalink / raw)
  To: Lucian Musat; +Cc: Stefan Stanacar, openembedded-core

On Fri, 2014-08-29 at 16:51 +0300, Lucian Musat wrote:
> For images without ptest the packages are automatically installed alongside ptest-runner. Log results are saved in ./results folder.
> No cleanup is done for packages after the test is finished.
> 
> Signed-off-by: Stefan Stanacar <stefanx.stanacar@intel.com>
> Signed-off-by: Lucian Musat <georgex.l.musat@intel.com>
> ---

[...]

> +    def get_ptest_packages(self):
> +        pkgs = set()
> +        image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
> +        pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
> +        for arch in pkgarchs:
> +            folder = self.repo_server.root_dir+"/"+image_pkgtype+'/'+arch
> +            if (os.path.isdir(folder)):
> +                for fil in os.listdir(folder):
> +                    if ("ptest" in str(fil) and "ptest-runner" not in str(fil)):
> +                        #get all the packages with -ptest in the name and remove ptest from them for future comparisons 
> +                        filez = "".join(str(fil).split("-ptest"))
> +                        rootfilez = ".".join(filez.split(".")[:-2])
> +                        filex = str(fil).split("-ptest")[0]
> +                        #compare with all the packages installed on the board and get a list of potential matches
> +                        if filex in self.packagelist:
> +                            i = 0
> +                            matches = []
> +                            while i<len(self.pkglist):
> +                                if filex in self.pkglist[i]:
> +                                    matches.append(self.pkglist[i])
> +                                i +=1
> +                            for i in matches:
> +                                #sometimes package names differ from corresponding ptest package names (ex. libacl1 != acl-ptest) so we use the Source field from smart info to compare
> +                                (status, result) = self.target.run("smart info "+i,0)
> +                                rootpkg = re.search("(.*Source:.*)", result).group(1).split(":")[1][1:]
> +                                #even source package names mai differ a little so we do a fuzzy string match (ex. libz-1.2.8-r0 -> zlib-1.2.8-r0)
> +                                m = SequenceMatcher(None, rootpkg, rootfilez)
> +                                if (m.ratio > 0.9):
> +                                    filey = "".join(str(fil).split("-ptest")[0])+"-ptest"
> +                                    pkgs.add(filey)
> +        if str(pkgs) == "set([])":
> +            raise AssertionError("Cannot get ptest packages to install!")
> +        pkgs.add("ptest-runner")
> +        return pkgs

At a quick glance, I wondered if it would be possible to use oe-pkg-util
here so we have one code path for finding the list of ptest packages
available? You can see example usage at  lib/oe/package_manager.py, the
install_complementary function.

I did hear a comment that there may be some issues with that function,
if there are we need to figure out what they are and fix them.

I'm pleased to see progress on this btw!

Cheers,

Richard



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 2/3] oeqa/utils/logparser.py: results based log parser utility
  2014-09-04 11:27 [PATCH 1/3] oeqa: Added package installer to oetest to aid in future automatic install of packages Lucian Musat
@ 2014-09-04 11:27 ` Lucian Musat
  0 siblings, 0 replies; 5+ messages in thread
From: Lucian Musat @ 2014-09-04 11:27 UTC (permalink / raw)
  To: openembedded-core

A module for parsing results based logs like ptest, compliance and performance.
Supports breaking the logs into multiple sections and also provides a result object to use the parser with.
The parser is initialized with the regex required to identify results and section statements in the target log file.

Signed-off-by: Corneliu Stoicescu <corneliux.stoicescu@intel.com>
Signed-off-by: Lucian Musat <georgex.l.musat@intel.com>
---
 meta/lib/oeqa/utils/logparser.py | 125 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 125 insertions(+)
 create mode 100644 meta/lib/oeqa/utils/logparser.py

diff --git a/meta/lib/oeqa/utils/logparser.py b/meta/lib/oeqa/utils/logparser.py
new file mode 100644
index 0000000..0ec1808
--- /dev/null
+++ b/meta/lib/oeqa/utils/logparser.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import re
+import ftools
+
+
+# A parser that can be used to identify weather a line is a test result or a section statement.
+class Lparser(object):
+
+    def __init__(self, test_0_pass_regex, test_0_fail_regex, section_0_begin_regex=None, section_0_end_regex=None, **kwargs):
+        # Initialize the arguments dictionary
+        if kwargs:
+            self.args = kwargs
+        else:
+            self.args = {}
+
+        # Add the default args to the dictionary
+        self.args['test_0_pass_regex'] = test_0_pass_regex
+        self.args['test_0_fail_regex'] = test_0_fail_regex
+        if section_0_begin_regex:
+            self.args['section_0_begin_regex'] = section_0_begin_regex
+        if section_0_end_regex:
+            self.args['section_0_end_regex'] = section_0_end_regex
+
+        self.test_possible_status = ['pass', 'fail', 'error']
+        self.section_possible_status = ['begin', 'end']
+
+        self.initialized = False
+
+
+    # Initialize the parser with the current configuration
+    def init(self):
+
+        # extra arguments can be added by the user to define new test and section categories. They must follow a pre-defined pattern: <type>_<category_name>_<status>_regex
+        self.test_argument_pattern = "^test_(.+?)_(%s)_regex" % '|'.join(map(str, self.test_possible_status))
+        self.section_argument_pattern = "^section_(.+?)_(%s)_regex" % '|'.join(map(str, self.section_possible_status))
+
+        # Initialize the test and section regex dictionaries
+        self.test_regex = {}
+        self.section_regex ={}
+
+        for arg, value in self.args.items():
+            if not value:
+                raise Exception('The value of provided argument %s is %s. Should have a valid value.' % (key, value))
+            is_test =  re.search(self.test_argument_pattern, arg)
+            is_section = re.search(self.section_argument_pattern, arg)
+            if is_test:
+                if not is_test.group(1) in self.test_regex:
+                    self.test_regex[is_test.group(1)] = {}
+                self.test_regex[is_test.group(1)][is_test.group(2)] = re.compile(value)
+            elif is_section:
+                if not is_section.group(1) in self.section_regex:
+                    self.section_regex[is_section.group(1)] = {}
+                self.section_regex[is_section.group(1)][is_section.group(2)] = re.compile(value)
+            else:
+                # TODO: Make these call a traceback instead of a simple exception..
+                raise Exception("The provided argument name does not correspond to any valid type. Please give one of the following types:\nfor tests: %s\nfor sections: %s" % (self.test_argument_pattern, self.section_argument_pattern))
+
+        self.initialized = True
+
+    # Parse a line and return a tuple containing the type of result (test/section) and its category, status and name
+    def parse_line(self, line):
+        if not self.initialized:
+            raise Exception("The parser is not initialized..")
+
+        for test_category, test_status_list in self.test_regex.items():
+            for test_status, status_regex in test_status_list.items():
+                test_name = status_regex.search(line)
+                if test_name:
+                    return ['test', test_category, test_status, test_name.group(1)]
+
+        for section_category, section_status_list in self.section_regex.items():
+            for section_status, status_regex in section_status_list.items():
+                section_name = status_regex.search(line)
+                if section_name:
+                    return ['section', section_category, section_status, section_name.group(1)]
+        return None
+
+
+class Result(object):
+
+    def __init__(self):
+        self.result_dict = {}
+
+    def store(self, section, test, status):
+        if not section in self.result_dict:
+            self.result_dict[section] = []
+
+        self.result_dict[section].append((test, status))
+
+    # sort tests by the test name(the first element of the tuple), for each section. This can be helpful when using git to diff for changes by making sure they are always in the same order.
+    def sort_tests(self):
+        for package in self.result_dict:
+            sorted_results = sorted(self.result_dict[package], key=lambda tup: tup[0])
+            self.result_dict[package] = sorted_results
+
+    # Log the results as files. The file name is the section name and the contents are the tests in that section.
+    def log_as_files(self, target_dir, test_status):
+        status_regex = re.compile('|'.join(map(str, test_status)))
+        if not type(test_status) == type([]):
+            raise Exception("test_status should be a list. Got " + str(test_status) + " instead.")
+        if not os.path.exists(target_dir):
+            raise Exception("Target directory does not exist: %s" % target_dir)
+
+        for section, test_results in self.result_dict.items():
+            prefix = ''
+            for x in test_status:
+                prefix +=x+'.'
+            if (section != ''):
+                prefix += section
+            section_file = os.path.join(target_dir, prefix)
+            # purge the file contents if it exists
+            open(section_file, 'w').close()
+            for test_result in test_results:
+                (test_name, status) = test_result
+                # we log only the tests with status in the test_status list
+                match_status = status_regex.search(status)
+                if match_status:
+                    ftools.append_file(section_file, status + ": " + test_name)
+
+    # Not yet implemented!
+    def log_to_lava(self):
+        pass
\ No newline at end of file
-- 
1.9.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2014-09-04 11:32 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-08-29 13:51 [PATCH 1/3] oeqa: Added package installer to oetest to aid in future automatic install of packages Lucian Musat
2014-08-29 13:51 ` [PATCH 2/3] oeqa/utils/logparser.py: results based log parser utility Lucian Musat
2014-08-29 13:51 ` [PATCH 3/3] oeqa/runtime: Automatic test for ptest Lucian Musat
2014-08-29 15:37   ` Richard Purdie
2014-09-04 11:27 [PATCH 1/3] oeqa: Added package installer to oetest to aid in future automatic install of packages Lucian Musat
2014-09-04 11:27 ` [PATCH 2/3] oeqa/utils/logparser.py: results based log parser utility Lucian Musat

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.