From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=profusion-mobi.20150623.gappssmtp.com; s=20150623; h=mime-version:in-reply-to:references:from:date:message-id:subject:to :cc; bh=MV365pp6zu1B4PI+aI6g6NIaWp5wkzF62gViEJPtA/0=; b=ePcO+doiAS0EalFtLWFM66qx1vIyHI6MXD+SYgy8s6ugrYAo0RnhSY4QDe7XQwWl4p 4lJXq/e1z6L3RyvYnMFoC8BFgUVf27WNkQxVxdu5CghLw3hZn0inxmVtGlanssiRloCN VD6My+nqNk7q91MfCV7pUP4y2ev+D9QK6hJgyJJVRg9dEGLWK587ueVREnJnsbWU1cTJ 6cnIaHa148X6YyHaURf1Lj4iezLd6pitDOuJrosj5+bvYEnCAxnSIdsJh9FmXt9hl2cJ OIE4R/ONy0n7/+Ek8SQ+lECfrJcosSDBasfJJHpmK1cDCJ08jKyiW9KRkmsPh7Hldq9n YKAw== MIME-Version: 1.0 In-Reply-To: References: <20180502142052.29544-1-guicc@profusion.mobi> <20180502142052.29544-9-guicc@profusion.mobi> From: Guilherme Camargo Date: Mon, 7 May 2018 11:55:20 -0300 Message-ID: Content-Type: multipart/alternative; boundary="000000000000d544c9056b9edb55" Subject: Re: [Fuego] [PATCH 8/8] Log test results with the REPORT log level and print descriptions List-Id: Mailing list for the Fuego test framework List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: "Bird, Timothy" Cc: fuego@lists.linuxfoundation.org --000000000000d544c9056b9edb55 Content-Type: text/plain; charset="UTF-8" Content-Transfer-Encoding: quoted-printable =E2=80=8BHello, Tim. =E2=80=8B I wanted to avoid setting the number of expected OKs to avoid mismatches between that number and the real number of tests that are present on COMMANDS_TO_TEST - what may change frequently in the near future. Maybe the best would be simply to keep only the log_compare for the `not ok`? Catching in case any one of the tests fails? An I agree we could use `test_run:REPORT: ok` or `test_run:REPORT: not ok` to give more context. Thanks On Fri, May 4, 2018 at 5:24 PM, wrote: > > > > -----Original Message----- > > From: fuego-bounces@lists.linuxfoundation.org [mailto:fuego- > > bounces@lists.linuxfoundation.org] On Behalf Of Guilherme Campos > > Camargo > > Sent: Wednesday, May 02, 2018 7:21 AM > > To: fuego@lists.linuxfoundation.org > > Subject: [Fuego] [PATCH 8/8] Log test results with the REPORT log level > and > > print descriptions > > > > Log test results with the REPORT log level and print descriptions. In > > this patch we also update fuego_test.sh to allow parsing the new test > > results format. > > > > Signed-off-by: Guilherme Campos Camargo > > --- > > .../tests/Functional.fuego_release_test/fuego_test.sh | 10 +++------- > > engine/tests/Functional.fuego_release_test/test_run.py | 7 ++++--- > > 2 files changed, 7 insertions(+), 10 deletions(-) > > > > diff --git a/engine/tests/Functional.fuego_release_test/fuego_test.sh > > b/engine/tests/Functional.fuego_release_test/fuego_test.sh > > index 05b4b7b..242fa70 100755 > > --- a/engine/tests/Functional.fuego_release_test/fuego_test.sh > > +++ b/engine/tests/Functional.fuego_release_test/fuego_test.sh > > @@ -21,14 +21,10 @@ function test_build { > > } > > > > function test_run { > > - sudo -n "${TEST_HOME}/test_run.py" "${fuego_release_dir}/fuego" -d > > ${TEST_HOME} -o . > > - if [ "${?}" =3D 0 ]; then > > - report "echo ok 1 fuego release test" > > - else > > - report "echo not ok 1 fuego release test" > > - fi > > + report "sudo -n ${TEST_HOME}/test_run.py ${fuego_release_dir}/fueg= o > - > > d ${TEST_HOME} -o ." > > } > > > > function test_processing { > > - log_compare "$TESTDIR" "1" "^ok" "p" > > + log_compare "$TESTDIR" "1" "ok" "p" > > I suspect this should be more than 1. How many reports are generated wit= h > "ok" > > Also, I think more context would be good, to avoid false matches. > > Maybe log_compare "$TESTDIR" "19" "test_run:REPORT: ok" > > > + log_compare "$TESTDIR" "0" "not ok" "n" > I'm not sure this is needed, but it does future-proof the test a bit. > > > } > > diff --git a/engine/tests/Functional.fuego_release_test/test_run.py > > b/engine/tests/Functional.fuego_release_test/test_run.py > > index 52fbae3..d3c65f6 100755 > > --- a/engine/tests/Functional.fuego_release_test/test_run.py > > +++ b/engine/tests/Functional.fuego_release_test/test_run.py > > @@ -761,14 +761,15 @@ def main(): > > } > > > > tests_ok =3D True > > - for cmd in COMMANDS_TO_TEST: > > + for cmd_id, cmd in enumerate(COMMANDS_TO_TEST, 1): > > for base_class, ctx in ctx_mapper.items(): > > if isinstance(cmd, base_class): > > + LOGGER.report("run %s %s", cmd_id, cmd.description= ) > > if not cmd.exec(ctx): > > tests_ok =3D False > > - LOGGER.error(" FAIL") > > + LOGGER.report("not ok %s", cmd_id) > > break > > - LOGGER.info(" PASS") > > + LOGGER.report("ok %s", cmd_id) > > > > if tests_ok: > > LOGGER.info("All tests finished with SUCCESS") > > -- > > 2.17.0 > > Thanks, > -- Tim > > --000000000000d544c9056b9edb55 Content-Type: text/html; charset="UTF-8" Content-Transfer-Encoding: quoted-printable
=E2=80=8BHello, Tim.

=E2=80=8B
I wanted to avoid set= ting the number of expected OKs to avoid
mismatches between that number an= d the real number of tests
that are present on COMMANDS_TO_TEST - what may= change frequently
in the near future.

Maybe the best would be simp= ly to keep only the log_compare for
the `not ok`? Catching in case any one= of the tests fails?

An I agree we could use `test_run:REPORT: ok` or
`test_run:REPORT: not ok` to give more context.

Thanks


On Fri, May 4, 2018 at 5:24 PM, <Tim.Bird@sony.com> wrot= e:


> -----Original Message-----
> From: fuego= -bounces@lists.linuxfoundation.org [mailto:fuego-
> bounces@lists.lin= uxfoundation.org] On Behalf Of Guilherme Campos
> Camargo
> Sent: Wednesday, May 02, 2018 7:21 AM
> To: fuego@lists.lin= uxfoundation.org
> Subject: [Fuego] [PATCH 8/8] Log test results with the REPORT log leve= l and
> print descriptions
>
> Log test results with the REPORT log level and print descriptions. In<= br> > this patch we also update fuego_test.sh to allow parsing the new test<= br> > results format.
>
> Signed-off-by: Guilherme Campos Camargo <guicc@profusion.mobi>
> ---
>=C2=A0 .../tests/Functional.fuego_release_test/fuego_test.sh=C2=A0= | 10 +++-------
>=C2=A0 engine/tests/Functional.fuego_release_test/test_run.py |=C2= =A0 7 ++++---
>=C2=A0 2 files changed, 7 insertions(+), 10 deletions(-)
>
> diff --git a/engine/tests/Functional.fuego_release_test/fuego_tes= t.sh
> b/engine/tests/Functional.fuego_release_test/fuego_test.sh > index 05b4b7b..242fa70 100755
> --- a/engine/tests/Functional.fuego_release_test/fuego_test.= sh
> +++ b/engine/tests/Functional.fuego_release_test/fuego_test.= sh
> @@ -21,14 +21,10 @@ function test_build {
>=C2=A0 }
>
>=C2=A0 function test_run {
> -=C2=A0 =C2=A0 sudo -n "${TEST_HOME}/test_run.py" "${fu= ego_release_dir}/fuego" -d
> ${TEST_HOME} -o .
> -=C2=A0 =C2=A0 if [ "${?}" =3D 0 ]; then
> -=C2=A0 =C2=A0 =C2=A0 =C2=A0 report "echo ok 1 fuego release test= "
> -=C2=A0 =C2=A0 else
> -=C2=A0 =C2=A0 =C2=A0 =C2=A0 report "echo not ok 1 fuego release = test"
> -=C2=A0 =C2=A0 fi
> +=C2=A0 =C2=A0 report "sudo -n ${TEST_HOME}/test_run.py ${fuego_r= elease_dir}/fuego -
> d ${TEST_HOME} -o ."
>=C2=A0 }
>
>=C2=A0 function test_processing {
> -=C2=A0 =C2=A0 log_compare "$TESTDIR" "1" "^o= k" "p"
> +=C2=A0 =C2=A0 log_compare "$TESTDIR" "1" "ok= " "p"

I suspect this should be more than 1.=C2=A0 How many reports ar= e generated with "ok"

Also, I think more context would be good, to avoid false matches.

Maybe log_compare "$TESTDIR" "19" "test_run:REPORT= : ok"

> +=C2=A0 =C2=A0 log_compare "$TESTDIR" "0" "no= t ok" "n"
I'm not sure this is needed, but it does future-proof the test a= bit.

>=C2=A0 }
> diff --git a/engine/tests/Functional.fuego_release_test/test_run.= py
> b/engine/tests/Functional.fuego_release_test/test_run.py
> index 52fbae3..d3c65f6 100755
> --- a/engine/tests/Functional.fuego_release_test/test_run.py
> +++ b/engine/tests/Functional.fuego_release_test/test_run.py
> @@ -761,14 +761,15 @@ def main():
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 }
>
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 tests_ok =3D True
> -=C2=A0 =C2=A0 =C2=A0 =C2=A0 for cmd in COMMANDS_TO_TEST:
> +=C2=A0 =C2=A0 =C2=A0 =C2=A0 for cmd_id, cmd in enumerate(COMMANDS_TO_= TEST, 1):
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 for base_class, ctx in= ctx_mapper.items():
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 if isins= tance(cmd, base_class):
> +=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0= LOGGER.report("run %s %s", cmd_id, cmd.description)
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 = =C2=A0 if not cmd.exec(ctx):
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 = =C2=A0 =C2=A0 =C2=A0 tests_ok =3D False
> -=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0= =C2=A0 =C2=A0 LOGGER.error("=C2=A0 FAIL")
> +=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0= =C2=A0 =C2=A0 LOGGER.report("not ok %s", cmd_id)
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 = =C2=A0 =C2=A0 =C2=A0 break
> -=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0= LOGGER.info("=C2=A0 PASS")
> +=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0= LOGGER.report("ok %s", cmd_id)
>
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 if tests_ok:
>=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 LOGGER.info("All = tests finished with SUCCESS")
> --
> 2.17.0

Thanks,
=C2=A0 -- Tim


--000000000000d544c9056b9edb55--