All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH][Autotest] Autotest: Add subtest inteface to client utils.
@ 2011-12-09 12:50 Jiří Župka
  2011-12-20 20:40 ` [PATCH] " Lucas Meneghel Rodrigues
  0 siblings, 1 reply; 2+ messages in thread
From: Jiří Župka @ 2011-12-09 12:50 UTC (permalink / raw)
  To: autotest, kvm, kvm-autotest, lmr, ldoktor, jzupka

This class and some decorators are for easy way of start function like a subtest.
Subtests result are collected and it is posible for review on end of test.
Subtest class and decorators should be placed in autotest_lib.client.utils.

    There is possibility how to change  results format.

    Example:
        @staticmethod
        def result_to_string(result):
            """
            @param result: Result of test.
            """
            print result
            return ("[%(result)]%(name): %(output)") % (result)

      1)
        Subtest.result_to_string = result_to_string
        Subtest.get_text_result()

      2)
        Subtest.get_text_result(result_to_string)

Pull-request: https://github.com/autotest/autotest/pull/111

Signed-off-by: Jiří Župka <jzupka@redhat.com>
---
 client/common_lib/base_utils.py          |  214 ++++++++++++++++++++++++++++++
 client/common_lib/base_utils_unittest.py |  117 ++++++++++++++++
 2 files changed, 331 insertions(+), 0 deletions(-)

diff --git a/client/common_lib/base_utils.py b/client/common_lib/base_utils.py
index 005e3b0..fc6578d 100644
--- a/client/common_lib/base_utils.py
+++ b/client/common_lib/base_utils.py
@@ -119,6 +119,220 @@ class BgJob(object):
         signal.signal(signal.SIGPIPE, signal.SIG_DFL)
 
 
+def subtest_fatal(function):
+    """
+    Decorator which mark test critical.
+    If subtest failed whole test ends.
+    """
+    def wrapped(self, *args, **kwds):
+        self._fatal = True
+        self.decored()
+        result = function(self, *args, **kwds)
+        return result
+    wrapped.func_name = function.func_name
+    return wrapped
+
+
+def subtest_nocleanup(function):
+    """
+    Decorator disable cleanup function.
+    """
+    def wrapped(self, *args, **kwds):
+        self._cleanup = False
+        self.decored()
+        result = function(self, *args, **kwds)
+        return result
+    wrapped.func_name = function.func_name
+    return wrapped
+
+
+class Subtest(object):
+    """
+    Collect result of subtest of main test.
+    """
+    result = []
+    passed = 0
+    failed = 0
+    def __new__(cls, *args, **kargs):
+        self = super(Subtest, cls).__new__(cls)
+
+        self._fatal = False
+        self._cleanup = True
+        self._num_decored = 0
+
+        ret = None
+        if args is None:
+            args = []
+
+        res = {
+               'result' : None,
+               'name'   : self.__class__.__name__,
+               'args'   : args,
+               'kargs'  : kargs,
+               'output' : None,
+              }
+        try:
+            logging.info("Starting test %s" % self.__class__.__name__)
+            ret = self.test(*args, **kargs)
+            res['result'] = 'PASS'
+            res['output'] = ret
+            try:
+                logging.info(Subtest.result_to_string(res))
+            except:
+                self._num_decored = 0
+                raise
+            Subtest.result.append(res)
+            Subtest.passed += 1
+        except NotImplementedError:
+            raise
+        except Exception:
+            exc_type, exc_value, exc_traceback = sys.exc_info()
+            for _ in range(self._num_decored):
+                exc_traceback = exc_traceback.tb_next
+            logging.error("In function (" + self.__class__.__name__ + "):")
+            logging.error("Call from:\n" +
+                          traceback.format_stack()[-2][:-1])
+            logging.error("Exception from:\n" +
+                          "".join(traceback.format_exception(
+                                                  exc_type, exc_value,
+                                                  exc_traceback.tb_next)))
+            # Clean up environment after subTest crash
+            res['result'] = 'FAIL'
+            logging.info(self.result_to_string(res))
+            Subtest.result.append(res)
+            Subtest.failed += 1
+            if self._fatal:
+                raise
+        finally:
+            if self._cleanup:
+                self.clean()
+
+        return ret
+
+
+    def test(self):
+        """
+        Check if test is defined.
+
+        For makes test fatal add before implementation of test method
+        decorator @subtest_fatal
+        """
+        raise NotImplementedError("Method test is not implemented.")
+
+
+    def clean(self):
+        """
+        Check if cleanup is defined.
+
+        For makes test fatal add before implementation of test method
+        decorator @subtest_nocleanup
+        """
+        raise NotImplementedError("Method cleanup is not implemented.")
+
+
+    def decored(self):
+        self._num_decored += 1
+
+
+    @classmethod
+    def has_failed(cls):
+        """
+        @return: If any of subtest not pass return True.
+        """
+        if cls.failed > 0:
+            return True
+        else:
+            return False
+
+
+    @classmethod
+    def get_result(cls):
+        """
+        @return: Result of subtests.
+           Format:
+             tuple(pass/fail,function_name,call_arguments)
+        """
+        return cls.result
+
+
+    @staticmethod
+    def result_to_string_debug(result):
+        """
+        @param result: Result of test.
+        """
+        sargs = ""
+        for arg in result['args']:
+            sargs += str(arg) + ","
+        sargs = sargs[:-1]
+        return ("Subtest (%s(%s)): --> %s") % (result['name'],
+                                               sargs,
+                                               result['status'])
+
+
+    @staticmethod
+    def result_to_string(result):
+        """
+        Format of result dict.
+
+        result = {
+               'result' : "PASS" / "FAIL",
+               'name'   : class name,
+               'args'   : test's args,
+               'kargs'  : test's kargs,
+               'output' : return of test function,
+              }
+
+        @param result: Result of test.
+        """
+        return ("Subtest (%(name)s): --> %(result)s") % (result)
+
+
+    @classmethod
+    def log_append(cls, msg):
+        """
+        Add log_append to result output.
+
+        @param msg: Test of log_append
+        """
+        cls.result.append([msg])
+
+
+    @classmethod
+    def _gen_res(cls, format_func):
+        """
+        Format result with formatting function
+
+        @param format_func: Func for formating result.
+        """
+        result = ""
+        for res in cls.result:
+            if (isinstance(res,dict)):
+                result += format_func(res) + "\n"
+            else:
+                result += str(res[0]) + "\n"
+        return result
+
+
+    @classmethod
+    def get_full_text_result(cls, format_func=None):
+        """
+        @return string with text form of result
+        """
+        if format_func is None:
+            format_func = cls.result_to_string_debug
+        return cls._gen_res(lambda s: format_func(s))
+
+
+    @classmethod
+    def get_text_result(cls, format_func=None):
+        """
+        @return string with text form of result
+        """
+        if format_func is None:
+            format_func = cls.result_to_string
+        return cls._gen_res(lambda s: format_func(s))
+
+
 def ip_to_long(ip):
     # !L is a long in network byte order
     return struct.unpack('!L', socket.inet_aton(ip))[0]
diff --git a/client/common_lib/base_utils_unittest.py b/client/common_lib/base_utils_unittest.py
index 39acab2..e697ff1 100755
--- a/client/common_lib/base_utils_unittest.py
+++ b/client/common_lib/base_utils_unittest.py
@@ -625,6 +625,123 @@ class test_sh_escape(unittest.TestCase):
         self._test_in_shell('\\000')
 
 
+class test_subtest(unittest.TestCase):
+    """
+    Test subtest class.
+    """
+    def setUp(self):
+        self.god = mock.mock_god(ut=self)
+        self.god.stub_function(base_utils.logging, 'error')
+        self.god.stub_function(base_utils.logging, 'info')
+
+    def tearDown(self):
+        self.god.unstub_all()
+
+    def test_test_not_implemented_raise(self):
+        base_utils.logging.info.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.info.expect_call("Subtest (test_not_implement):"
+                                            " --> FAIL")
+
+        class test_not_implement(base_utils.Subtest):
+            pass
+
+        self.assertRaises(NotImplementedError, test_not_implement)
+
+    def test_clean_not_implemented_raise(self):
+        base_utils.logging.info.expect_any_call()
+        base_utils.logging.info.expect_any_call()
+
+        class test_test_not_cleanup_implement(base_utils.Subtest):
+            def test(self):
+                pass
+
+        self.assertRaises(NotImplementedError, test_test_not_cleanup_implement)
+
+    def test_fail_in_nofatal_test(self):
+        base_utils.logging.info.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.info.expect_call("Subtest (test_raise_in_nofatal"
+                                            "_test): --> FAIL")
+
+        class test_raise_in_nofatal_test(base_utils.Subtest):
+            @base_utils.subtest_nocleanup
+            def test(self):
+                raise Exception("No fatal test.")
+
+        test_raise_in_nofatal_test()
+
+    def test_fail_in_fatal_test(self):
+        base_utils.logging.info.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.info.expect_call("Subtest (test_raise_in_fatal"
+                                            "_test): --> FAIL")
+
+        class test_raise_in_fatal_test(base_utils.Subtest):
+            @base_utils.subtest_nocleanup
+            @base_utils.subtest_fatal
+            def test(self):
+                raise Exception("Fatal test.")
+
+        self.assertRaises(Exception, test_raise_in_fatal_test)
+
+    def test_pass_with_cleanup_test(self):
+        base_utils.logging.info.expect_any_call()
+        base_utils.logging.info.expect_call("Subtest (test_pass_test):"
+                                            " --> PASS")
+
+        class test_pass_test(base_utils.Subtest):
+            @base_utils.subtest_fatal
+            def test(self):
+                pass
+
+            def clean(self):
+                pass
+
+        test_pass_test()
+
+
+    def test_results(self):
+        base_utils.logging.info.expect_any_call()
+        base_utils.logging.info.expect_call("Subtest (test_pass_test):"
+                                            " --> PASS")
+        base_utils.logging.info.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.error.expect_any_call()
+        base_utils.logging.info.expect_call("Subtest (test_raise_in_nofatal"
+                                            "_test): --> FAIL")
+
+        #Reset test fail count.
+        base_utils.Subtest.failed = 0
+
+        class test_pass_test(base_utils.Subtest):
+            @base_utils.subtest_fatal
+            def test(self):
+                pass
+
+            def clean(self):
+                pass
+
+        class test_raise_in_nofatal_test(base_utils.Subtest):
+            @base_utils.subtest_nocleanup
+            def test(self):
+                raise Exception("No fatal test.")
+
+        test_pass_test()
+        test_raise_in_nofatal_test()
+        self.assertEqual(base_utils.Subtest.has_failed(), True,
+                         "Subtest not catch subtest fail.")
+        self.assertEqual(base_utils.Subtest.failed, 1,
+                         "Count of test failing is wrong")
+
+
 class test_run(unittest.TestCase):
     """
     Test the base_utils.run() function.
-- 
1.7.7.3


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] Autotest: Add subtest inteface to client utils.
  2011-12-09 12:50 [PATCH][Autotest] Autotest: Add subtest inteface to client utils Jiří Župka
@ 2011-12-20 20:40 ` Lucas Meneghel Rodrigues
  0 siblings, 0 replies; 2+ messages in thread
From: Lucas Meneghel Rodrigues @ 2011-12-20 20:40 UTC (permalink / raw)
  To: Jiří Župka; +Cc: autotest, kvm-autotest, kvm

On 12/09/2011 10:50 AM, Jiří Župka wrote:
> This class and some decorators are for easy way of start function like a subtest.
> Subtests result are collected and it is posible for review on end of test.
> Subtest class and decorators should be placed in autotest_lib.client.utils.
>
>      There is possibility how to change  results format.
>
>      Example:
>          @staticmethod
>          def result_to_string(result):
>              """
>              @param result: Result of test.
>              """
>              print result
>              return ("[%(result)]%(name): %(output)") % (result)
>
>        1)
>          Subtest.result_to_string = result_to_string
>          Subtest.get_text_result()
>
>        2)
>          Subtest.get_text_result(result_to_string)
>
> Pull-request: https://github.com/autotest/autotest/pull/111

^ I made a few remarks to the pull request, and now I wait on an updated 
version of the patchset. Thanks Jiri!

> Signed-off-by: Jiří Župka<jzupka@redhat.com>
> ---
>   client/common_lib/base_utils.py          |  214 ++++++++++++++++++++++++++++++
>   client/common_lib/base_utils_unittest.py |  117 ++++++++++++++++
>   2 files changed, 331 insertions(+), 0 deletions(-)
>
> diff --git a/client/common_lib/base_utils.py b/client/common_lib/base_utils.py
> index 005e3b0..fc6578d 100644
> --- a/client/common_lib/base_utils.py
> +++ b/client/common_lib/base_utils.py
> @@ -119,6 +119,220 @@ class BgJob(object):
>           signal.signal(signal.SIGPIPE, signal.SIG_DFL)
>
>
> +def subtest_fatal(function):
> +    """
> +    Decorator which mark test critical.
> +    If subtest failed whole test ends.
> +    """
> +    def wrapped(self, *args, **kwds):
> +        self._fatal = True
> +        self.decored()
> +        result = function(self, *args, **kwds)
> +        return result
> +    wrapped.func_name = function.func_name
> +    return wrapped
> +
> +
> +def subtest_nocleanup(function):
> +    """
> +    Decorator disable cleanup function.
> +    """
> +    def wrapped(self, *args, **kwds):
> +        self._cleanup = False
> +        self.decored()
> +        result = function(self, *args, **kwds)
> +        return result
> +    wrapped.func_name = function.func_name
> +    return wrapped
> +
> +
> +class Subtest(object):
> +    """
> +    Collect result of subtest of main test.
> +    """
> +    result = []
> +    passed = 0
> +    failed = 0
> +    def __new__(cls, *args, **kargs):
> +        self = super(Subtest, cls).__new__(cls)
> +
> +        self._fatal = False
> +        self._cleanup = True
> +        self._num_decored = 0
> +
> +        ret = None
> +        if args is None:
> +            args = []
> +
> +        res = {
> +               'result' : None,
> +               'name'   : self.__class__.__name__,
> +               'args'   : args,
> +               'kargs'  : kargs,
> +               'output' : None,
> +              }
> +        try:
> +            logging.info("Starting test %s" % self.__class__.__name__)
> +            ret = self.test(*args, **kargs)
> +            res['result'] = 'PASS'
> +            res['output'] = ret
> +            try:
> +                logging.info(Subtest.result_to_string(res))
> +            except:
> +                self._num_decored = 0
> +                raise
> +            Subtest.result.append(res)
> +            Subtest.passed += 1
> +        except NotImplementedError:
> +            raise
> +        except Exception:
> +            exc_type, exc_value, exc_traceback = sys.exc_info()
> +            for _ in range(self._num_decored):
> +                exc_traceback = exc_traceback.tb_next
> +            logging.error("In function (" + self.__class__.__name__ + "):")
> +            logging.error("Call from:\n" +
> +                          traceback.format_stack()[-2][:-1])
> +            logging.error("Exception from:\n" +
> +                          "".join(traceback.format_exception(
> +                                                  exc_type, exc_value,
> +                                                  exc_traceback.tb_next)))
> +            # Clean up environment after subTest crash
> +            res['result'] = 'FAIL'
> +            logging.info(self.result_to_string(res))
> +            Subtest.result.append(res)
> +            Subtest.failed += 1
> +            if self._fatal:
> +                raise
> +        finally:
> +            if self._cleanup:
> +                self.clean()
> +
> +        return ret
> +
> +
> +    def test(self):
> +        """
> +        Check if test is defined.
> +
> +        For makes test fatal add before implementation of test method
> +        decorator @subtest_fatal
> +        """
> +        raise NotImplementedError("Method test is not implemented.")
> +
> +
> +    def clean(self):
> +        """
> +        Check if cleanup is defined.
> +
> +        For makes test fatal add before implementation of test method
> +        decorator @subtest_nocleanup
> +        """
> +        raise NotImplementedError("Method cleanup is not implemented.")
> +
> +
> +    def decored(self):
> +        self._num_decored += 1
> +
> +
> +    @classmethod
> +    def has_failed(cls):
> +        """
> +        @return: If any of subtest not pass return True.
> +        """
> +        if cls.failed>  0:
> +            return True
> +        else:
> +            return False
> +
> +
> +    @classmethod
> +    def get_result(cls):
> +        """
> +        @return: Result of subtests.
> +           Format:
> +             tuple(pass/fail,function_name,call_arguments)
> +        """
> +        return cls.result
> +
> +
> +    @staticmethod
> +    def result_to_string_debug(result):
> +        """
> +        @param result: Result of test.
> +        """
> +        sargs = ""
> +        for arg in result['args']:
> +            sargs += str(arg) + ","
> +        sargs = sargs[:-1]
> +        return ("Subtest (%s(%s)): -->  %s") % (result['name'],
> +                                               sargs,
> +                                               result['status'])
> +
> +
> +    @staticmethod
> +    def result_to_string(result):
> +        """
> +        Format of result dict.
> +
> +        result = {
> +               'result' : "PASS" / "FAIL",
> +               'name'   : class name,
> +               'args'   : test's args,
> +               'kargs'  : test's kargs,
> +               'output' : return of test function,
> +              }
> +
> +        @param result: Result of test.
> +        """
> +        return ("Subtest (%(name)s): -->  %(result)s") % (result)
> +
> +
> +    @classmethod
> +    def log_append(cls, msg):
> +        """
> +        Add log_append to result output.
> +
> +        @param msg: Test of log_append
> +        """
> +        cls.result.append([msg])
> +
> +
> +    @classmethod
> +    def _gen_res(cls, format_func):
> +        """
> +        Format result with formatting function
> +
> +        @param format_func: Func for formating result.
> +        """
> +        result = ""
> +        for res in cls.result:
> +            if (isinstance(res,dict)):
> +                result += format_func(res) + "\n"
> +            else:
> +                result += str(res[0]) + "\n"
> +        return result
> +
> +
> +    @classmethod
> +    def get_full_text_result(cls, format_func=None):
> +        """
> +        @return string with text form of result
> +        """
> +        if format_func is None:
> +            format_func = cls.result_to_string_debug
> +        return cls._gen_res(lambda s: format_func(s))
> +
> +
> +    @classmethod
> +    def get_text_result(cls, format_func=None):
> +        """
> +        @return string with text form of result
> +        """
> +        if format_func is None:
> +            format_func = cls.result_to_string
> +        return cls._gen_res(lambda s: format_func(s))
> +
> +
>   def ip_to_long(ip):
>       # !L is a long in network byte order
>       return struct.unpack('!L', socket.inet_aton(ip))[0]
> diff --git a/client/common_lib/base_utils_unittest.py b/client/common_lib/base_utils_unittest.py
> index 39acab2..e697ff1 100755
> --- a/client/common_lib/base_utils_unittest.py
> +++ b/client/common_lib/base_utils_unittest.py
> @@ -625,6 +625,123 @@ class test_sh_escape(unittest.TestCase):
>           self._test_in_shell('\\000')
>
>
> +class test_subtest(unittest.TestCase):
> +    """
> +    Test subtest class.
> +    """
> +    def setUp(self):
> +        self.god = mock.mock_god(ut=self)
> +        self.god.stub_function(base_utils.logging, 'error')
> +        self.god.stub_function(base_utils.logging, 'info')
> +
> +    def tearDown(self):
> +        self.god.unstub_all()
> +
> +    def test_test_not_implemented_raise(self):
> +        base_utils.logging.info.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.info.expect_call("Subtest (test_not_implement):"
> +                                            " -->  FAIL")
> +
> +        class test_not_implement(base_utils.Subtest):
> +            pass
> +
> +        self.assertRaises(NotImplementedError, test_not_implement)
> +
> +    def test_clean_not_implemented_raise(self):
> +        base_utils.logging.info.expect_any_call()
> +        base_utils.logging.info.expect_any_call()
> +
> +        class test_test_not_cleanup_implement(base_utils.Subtest):
> +            def test(self):
> +                pass
> +
> +        self.assertRaises(NotImplementedError, test_test_not_cleanup_implement)
> +
> +    def test_fail_in_nofatal_test(self):
> +        base_utils.logging.info.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.info.expect_call("Subtest (test_raise_in_nofatal"
> +                                            "_test): -->  FAIL")
> +
> +        class test_raise_in_nofatal_test(base_utils.Subtest):
> +            @base_utils.subtest_nocleanup
> +            def test(self):
> +                raise Exception("No fatal test.")
> +
> +        test_raise_in_nofatal_test()
> +
> +    def test_fail_in_fatal_test(self):
> +        base_utils.logging.info.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.info.expect_call("Subtest (test_raise_in_fatal"
> +                                            "_test): -->  FAIL")
> +
> +        class test_raise_in_fatal_test(base_utils.Subtest):
> +            @base_utils.subtest_nocleanup
> +            @base_utils.subtest_fatal
> +            def test(self):
> +                raise Exception("Fatal test.")
> +
> +        self.assertRaises(Exception, test_raise_in_fatal_test)
> +
> +    def test_pass_with_cleanup_test(self):
> +        base_utils.logging.info.expect_any_call()
> +        base_utils.logging.info.expect_call("Subtest (test_pass_test):"
> +                                            " -->  PASS")
> +
> +        class test_pass_test(base_utils.Subtest):
> +            @base_utils.subtest_fatal
> +            def test(self):
> +                pass
> +
> +            def clean(self):
> +                pass
> +
> +        test_pass_test()
> +
> +
> +    def test_results(self):
> +        base_utils.logging.info.expect_any_call()
> +        base_utils.logging.info.expect_call("Subtest (test_pass_test):"
> +                                            " -->  PASS")
> +        base_utils.logging.info.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.error.expect_any_call()
> +        base_utils.logging.info.expect_call("Subtest (test_raise_in_nofatal"
> +                                            "_test): -->  FAIL")
> +
> +        #Reset test fail count.
> +        base_utils.Subtest.failed = 0
> +
> +        class test_pass_test(base_utils.Subtest):
> +            @base_utils.subtest_fatal
> +            def test(self):
> +                pass
> +
> +            def clean(self):
> +                pass
> +
> +        class test_raise_in_nofatal_test(base_utils.Subtest):
> +            @base_utils.subtest_nocleanup
> +            def test(self):
> +                raise Exception("No fatal test.")
> +
> +        test_pass_test()
> +        test_raise_in_nofatal_test()
> +        self.assertEqual(base_utils.Subtest.has_failed(), True,
> +                         "Subtest not catch subtest fail.")
> +        self.assertEqual(base_utils.Subtest.failed, 1,
> +                         "Count of test failing is wrong")
> +
> +
>   class test_run(unittest.TestCase):
>       """
>       Test the base_utils.run() function.

_______________________________________________
Autotest mailing list
Autotest@test.kernel.org
http://test.kernel.org/cgi-bin/mailman/listinfo/autotest

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2011-12-20 20:40 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-12-09 12:50 [PATCH][Autotest] Autotest: Add subtest inteface to client utils Jiří Župka
2011-12-20 20:40 ` [PATCH] " Lucas Meneghel Rodrigues

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.