Commit ff1857af authored by Lukasz Mrugala's avatar Lukasz Mrugala Committed by Mahesh Mahadevan
Browse files

scripts: twister: Enchance TestCase/Instance info and presentation



ExecutionCounter has been expanded and now hold i.a.
more information on the statuses of TestCases.
This information is now incorporated in relevant summaries
- runner.py and reports.py.
Layout of those was changed to present that
and previous information in a clear and concise way.

TestInstance execution counter now is more intuitive.
Instances filtered out before running are no longer included there.
Retries now properly reset the counter.

TestCases with None and other incorrect final statuses
are logged as errors, but do not
exit Twister with a nonzero exit code.
This is because None statuses, although incorrect,
are currently common.

Inconsistent spacing in ERROR and FAILED fixed.

Signed-off-by: default avatarLukasz Mrugala <lukaszx.mrugala@intel.com>

scripts: Dmitri fix

Fix of a problem noticed by Dmitri

Removed unnecessary additional spaces when
printing FAILED and ERROR status.
Now TwisterStatus.get_color is used more.

Signed-off-by: default avatarLukasz Mrugala <lukaszx.mrugala@intel.com>
parent 502c9ffb
Loading
Loading
Loading
Loading
+30 −26
Original line number Diff line number Diff line
@@ -584,39 +584,43 @@ class Reporting:
            pass_rate = 0

        logger.info(
            "{}{} of {}{} test configurations passed ({:.2%}), {} built (not run), {}{}{} failed, {}{}{} errored, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
                Fore.RED if failed else Fore.GREEN,
                results.passed,
                results.total,
                Fore.RESET,
                pass_rate,
                results.notrun,
                Fore.RED if results.failed else Fore.RESET,
                results.failed,
                Fore.RESET,
                Fore.RED if results.error else Fore.RESET,
                results.error,
                Fore.RESET,
                results.skipped_configs,
                Fore.YELLOW if self.plan.warnings else Fore.RESET,
                self.plan.warnings,
                Fore.RESET,
                duration))
            f"{TwisterStatus.get_color(TwisterStatus.FAIL) if failed else TwisterStatus.get_color(TwisterStatus.PASS)}{results.passed}"
            f" of {results.total - results.skipped_configs}{Fore.RESET}"
            f" executed test configurations passed ({pass_rate:.2%}),"
            f" {f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}' if results.notrun else f'{results.notrun}'} built (not run),"
            f" {f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}' if results.failed else f'{results.failed}'} failed,"
            f" {f'{TwisterStatus.get_color(TwisterStatus.ERROR)}{results.error}{Fore.RESET}' if results.error else f'{results.error}'} errored,"
            f" with {f'{Fore.YELLOW}{self.plan.warnings}{Fore.RESET}' if self.plan.warnings else 'no'} warnings"
            f" in {duration:.2f} seconds."
        )

        total_platforms = len(self.platforms)
        # if we are only building, do not report about tests being executed.
        if self.platforms and not self.env.options.build_only:
            logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
                results.cases - results.skipped_cases - results.notrun,
                results.skipped_cases,
                len(self.filtered_platforms),
                total_platforms,
                (100 * len(self.filtered_platforms) / len(self.platforms))
            ))
            executed_cases = results.cases - results.filtered_cases - results.skipped_cases - results.notrun_cases
            pass_rate = 100 * (float(results.passed_cases) / float(executed_cases)) \
                if executed_cases != 0 else 0
            platform_rate = (100 * len(self.filtered_platforms) / len(self.platforms))
            logger.info(
                f'{results.passed_cases} of {executed_cases} executed test cases passed ({pass_rate:02.2f}%)'
                f'{", " + str(results.blocked_cases) + " blocked" if results.blocked_cases else ""}'
                f'{", " + str(results.failed_cases) + " failed" if results.failed_cases else ""}'
                f'{", " + str(results.error_cases) + " errored" if results.error_cases else ""}'
                f'{", " + str(results.none_cases) + " without a status" if results.none_cases else ""}'
                f' on {len(self.filtered_platforms)} out of total {total_platforms} platforms ({platform_rate:02.2f}%).'
            )
            if results.skipped_cases or results.filtered_cases or results.notrun_cases:
                logger.info(
                    f'{results.skipped_cases + results.filtered_cases} selected test cases not executed:' \
                    f'{" " + str(results.skipped_cases) + " skipped" if results.skipped_cases else ""}' \
                    f'{(", " if results.skipped_cases else " ") + str(results.filtered_cases) + " filtered" if results.filtered_cases else ""}' \
                    f'{(", " if results.skipped_cases or results.filtered_cases else " ") + str(results.notrun_cases) + " not run (built only)" if results.notrun_cases else ""}' \
                    f'.'
                )

        built_only = results.total - run - results.skipped_configs
        logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{built_only}{Fore.RESET} test configurations were only built.")
{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{built_only}{Fore.RESET} test configurations were only built.")

    def save_reports(self, name, suffix, report_dir, no_update, platform_reports):
        if not self.instances:
+326 −63

File changed.

Preview size limit exceeded, changes collapsed.

+107 −36
Original line number Diff line number Diff line
@@ -206,20 +206,31 @@ def test_executioncounter(capfd):
    sys.stderr.write(err)

    assert (
        f'--------------------------------\n'
        f'Total test suites: 12\n'
        f'Total test cases: 25\n'
        f'Executed test cases: 19\n'
        f'Skipped test cases: 6\n'
        f'Completed test suites: 9\n'
        f'Passing test suites: 6\n'
        f'Built only test suites: 0\n'
        f'Failing test suites: 1\n'
        f'Skipped test suites: 3\n'
        f'Skipped test suites (runtime): 1\n'
        f'Skipped test suites (filter): 2\n'
        f'Errors: 2\n'
        f'--------------------------------'
        '--------------------------------------------------\n'
        'Total test suites:     12\n'
        'Processed test suites:  9\n'
        '├─ Filtered test suites (static):       2\n'
        '└─ Completed test suites:               7\n'
        '   ├─ Filtered test suites (at runtime):   1\n'
        '   ├─ Passed test suites:                  6\n'
        '   ├─ Built only test suites:              0\n'
        '   ├─ Failed test suites:                  1\n'
        '   └─ Errors in test suites:               2\n'
        '\n'
        'Filtered test suites: 3\n'
        '├─ Filtered test suites (static):       2\n'
        '└─ Filtered test suites (at runtime):   1\n'
        '----------------------      ----------------------\n'
        'Total test cases: 25\n'
        '├─ Filtered test cases:  0\n'
        '├─ Skipped test cases:   6\n'
        '└─ Executed test cases: 19\n'
        '   ├─ Passed test cases:        0\n'
        '   ├─ Built only test cases:    0\n'
        '   ├─ Blocked test cases:       0\n'
        '   ├─ Failed test cases:        0\n'
        '   └─ Errors in test cases:     0\n'
        '--------------------------------------------------\n'
    ) in out

    assert ec.cases == 25
@@ -1547,7 +1558,7 @@ def test_projectbuilder_process(

    assert pb.instance.status == expected_status
    assert pb.instance.reason == expected_reason
    assert results_mock.skipped_runtime == expected_skipped
    assert results_mock.skipped_runtime_increment.call_args_list == [mock.call()] * expected_skipped

    if expected_missing:
        pb.instance.add_missing_case_status.assert_called_with(*expected_missing)
@@ -1945,7 +1956,7 @@ TESTDATA_13 = [
         '            dummy.testsuite.name' \
         '                               FAILED: dummy reason'],
        'INFO    - Total complete:   20/  25  80%' \
        '  built (not run):    0, skipped:    3, failed:    3, error:    1'
        '  built (not run):    0, filtered:    3, failed:    3, error:    1'
    ),
    (
        TwisterStatus.SKIP, True, False, False,
@@ -1958,7 +1969,7 @@ TESTDATA_13 = [
        TwisterStatus.FILTER, False, False, False,
        [],
        'INFO    - Total complete:   20/  25  80%' \
        '  built (not run):    0, skipped:    4, failed:    2, error:    1'
        '  built (not run):    0, filtered:    4, failed:    2, error:    1'
    ),
    (
        TwisterStatus.PASS, True, False, True,
@@ -1979,7 +1990,7 @@ TESTDATA_13 = [
        'unknown status', False, False, False,
        ['Unknown status = unknown status'],
        'INFO    - Total complete:   20/  25  80%'
        '  built (not run):    0, skipped:    3, failed:    2, error:    1\r'
        '  built (not run):    0, filtered:    3, failed:    2, error:    1\r'
    )
]

@@ -2026,21 +2037,49 @@ def test_projectbuilder_report_out(
    pb.options.seed = 123
    pb.log_info_file = mock.Mock()

    results_mock = mock.Mock()
    results_mock = mock.Mock(
        total = 25,
        done = 19,
        passed = 17,
        notrun = 0,
        failed = 2,
        skipped_configs = 3,
        skipped_runtime = 0,
        skipped_filter = 0,
        error = 1,
        cases = 0,
        filtered_cases = 0,
        skipped_cases = 4,
        failed_cases = 0,
        error_cases = 0,
        blocked_cases = 0,
        passed_cases = 0,
        none_cases = 0,
        started_cases = 0
    )
    results_mock.iteration = 1
    results_mock.total = 25
    results_mock.done = 19
    results_mock.passed = 17
    results_mock.notrun = 0
    results_mock.skipped_configs = 3
    results_mock.skipped_cases = 4
    results_mock.failed = 2
    results_mock.error = 1
    results_mock.cases = 0
    def results_done_increment(value=1, decrement=False):
        results_mock.done += value * (-1 if decrement else 1)
    results_mock.done_increment = results_done_increment
    def skipped_configs_increment(value=1, decrement=False):
        results_mock.skipped_configs += value * (-1 if decrement else 1)
    results_mock.skipped_configs_increment = skipped_configs_increment
    def skipped_filter_increment(value=1, decrement=False):
        results_mock.skipped_filter += value * (-1 if decrement else 1)
    results_mock.skipped_filter_increment = skipped_filter_increment
    def skipped_runtime_increment(value=1, decrement=False):
        results_mock.skipped_runtime += value * (-1 if decrement else 1)
    results_mock.skipped_runtime_increment = skipped_runtime_increment
    def failed_increment(value=1, decrement=False):
        results_mock.failed += value * (-1 if decrement else 1)
    results_mock.failed_increment = failed_increment
    def notrun_increment(value=1, decrement=False):
        results_mock.notrun += value * (-1 if decrement else 1)
    results_mock.notrun_increment = notrun_increment

    pb.report_out(results_mock)

    assert results_mock.cases == 25
    assert results_mock.cases_increment.call_args_list == [mock.call(25)]

    trim_actual_log = re.sub(
        r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])',
@@ -2449,6 +2488,10 @@ def test_twisterrunner_run(
    results_mock().failed = 2
    results_mock().total = 9

    def iteration_increment(value=1, decrement=False):
        results_mock().iteration += value * (-1 if decrement else 1)
    results_mock().iteration_increment = iteration_increment

    with mock.patch('twisterlib.runner.ExecutionCounter', results_mock), \
         mock.patch('twisterlib.runner.BaseManager', manager_mock), \
         mock.patch('twisterlib.runner.GNUMakeJobClient.from_environ',
@@ -2519,18 +2562,45 @@ def test_twisterrunner_update_counting_before_pipeline():

    tr = TwisterRunner(instances, suites, env=env_mock)
    tr.results = mock.Mock(
        skipped_filter = 0,
        total = 0,
        done = 0,
        passed = 0,
        failed = 0,
        skipped_configs = 0,
        skipped_cases = 0,
        skipped_runtime = 0,
        skipped_filter = 0,
        error = 0,
        cases = 0,
        error = 0
        filtered_cases = 0,
        skipped_cases = 0,
        failed_cases = 0,
        error_cases = 0,
        blocked_cases = 0,
        passed_cases = 0,
        none_cases = 0,
        started_cases = 0
    )
    def skipped_configs_increment(value=1, decrement=False):
        tr.results.skipped_configs += value * (-1 if decrement else 1)
    tr.results.skipped_configs_increment = skipped_configs_increment
    def skipped_filter_increment(value=1, decrement=False):
        tr.results.skipped_filter += value * (-1 if decrement else 1)
    tr.results.skipped_filter_increment = skipped_filter_increment
    def error_increment(value=1, decrement=False):
        tr.results.error += value * (-1 if decrement else 1)
    tr.results.error_increment = error_increment
    def cases_increment(value=1, decrement=False):
        tr.results.cases += value * (-1 if decrement else 1)
    tr.results.cases_increment = cases_increment
    def filtered_cases_increment(value=1, decrement=False):
        tr.results.filtered_cases += value * (-1 if decrement else 1)
    tr.results.filtered_cases_increment = filtered_cases_increment

    tr.update_counting_before_pipeline()

    assert tr.results.skipped_filter == 1
    assert tr.results.skipped_configs == 1
    assert tr.results.skipped_cases == 4
    assert tr.results.filtered_cases == 4
    assert tr.results.cases == 4
    assert tr.results.error == 1

@@ -2558,7 +2628,7 @@ def test_twisterrunner_show_brief(caplog):
    tr.show_brief()

    log = '2 test scenarios (5 test instances) selected,' \
          ' 4 configurations skipped (3 by static filter, 1 at runtime).'
          ' 4 configurations filtered (3 by static filter, 1 at runtime).'

    assert log in caplog.text

@@ -2609,6 +2679,7 @@ def test_twisterrunner_add_tasks_to_queue(
    tr.get_cmake_filter_stages = mock.Mock(
        side_effect=mock_get_cmake_filter_stages
    )
    tr.results = mock.Mock(iteration=0)

    pipeline_mock = mock.Mock()

+2 −1
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@ import pytest
import sys
import re

# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan

+2 −1
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@ import pytest
import sys
import re

# pylint: disable=no-name-in-module
from conftest import ZEPHYR_BASE, TEST_DATA, testsuite_filename_mock
from twisterlib.testplan import TestPlan
from twisterlib.error import TwisterRuntimeError
Loading