WebKit Bugzilla
Attachment 348894 Details for
Bug 189219
: run-webkit-tests prints confusing messages when test expectations list results that are not compatible with the run options
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-189219-20180904215733.patch (text/plain), 23.37 KB, created by
Simon Fraser (smfr)
on 2018-09-04 21:57:34 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
Simon Fraser (smfr)
Created:
2018-09-04 21:57:34 PDT
Size:
23.37 KB
patch
obsolete
>Subversion Revision: 235656 >diff --git a/Tools/ChangeLog b/Tools/ChangeLog >index 9fc788dc43041c88c21708dfc8fa2d5240597723..58a7dfe765a5748add95f1b17ecc1b5e1a4b620f 100644 >--- a/Tools/ChangeLog >+++ b/Tools/ChangeLog >@@ -1,3 +1,49 @@ >+2018-09-04 Simon Fraser <simon.fraser@apple.com> >+ >+ run-webkit-tests prints confusing messages when test expectations list results that are not compatible with the run options >+ https://bugs.webkit.org/show_bug.cgi?id=189219 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ If you call run-webkit-tests without --pixel-tests, and a non-ref test is marked as ImageOnlyFailure, >+ it will be reported as unexpectedly passing. This is more confusing if you run without --world-leaks, yet >+ tests are marked as [ Leak ] in TestExpectations. >+ >+ Fix by filtering out expectations that don't apply given the run options. So without --pixel-tests, >+ a non-ref ImageOnlyFailure becomes a Pass, and without --world-leaks, a Leak becomes a Pass. >+ >+ Add various unit tests to test the various combinations. >+ >+ * Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py: >+ (LayoutTestRunner._update_summary_with_result): >+ (LayoutTestRunner._annotate_results_with_additional_failures): >+ * Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py: >+ (LayoutTestRunnerTests.test_update_summary_with_result): >+ * Scripts/webkitpy/layout_tests/models/test_expectations.py: >+ (TestExpectationsModel.get_expectations_or_pass): >+ (TestExpectationsModel): >+ (TestExpectationsModel.expectations_to_string): >+ (TestExpectationsModel.get_expectations_string): >+ (TestExpectations.filtered_expectations_for_test): >+ (TestExpectations): >+ (TestExpectations.matches_an_expected_result): >+ * Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py: >+ * Scripts/webkitpy/layout_tests/models/test_run_results.py: >+ (summarize_results): >+ * Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py: >+ (summarized_results): >+ (SummarizedResultsTest.setUp): >+ (SummarizedResultsTest.test_summarized_results_include_passes): >+ (SummarizedResultsTest): >+ (SummarizedResultsTest.test_summarized_results_world_leaks_disabled): >+ * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py: >+ * Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py: >+ (BuildBotPrinterTests.test_print_unexpected_results): >+ (BuildBotPrinterTests.test_print_unexpected_results_with_options): >+ (BuildBotPrinterTests.test_print_results): >+ * Scripts/webkitpy/port/test.py: >+ (unit_test_list): >+ > 2018-09-04 Don Olmstead <don.olmstead@sony.com> > > Add generic entrypoint and run loop in TestWebKitAPI >diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py >index 4b19974d9874dd826d3ab5d6db2b80eed99d1412..506bd06890725b4b07dc547d157952e6e290bf03 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py >+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py >@@ -182,8 +182,9 @@ class LayoutTestRunner(object): > exp_str = got_str = 'SKIP' > expected = True > else: >- expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or result.reftest_type, self._options.world_leaks) >- exp_str = self._expectations.model().get_expectations_string(result.test_name) >+ expectations = self._expectations.filtered_expectations_for_test(result.test_name, self._options.pixel_tests or bool(result.reftest_type), self._options.world_leaks) >+ expected = self._expectations.matches_an_expected_result(result.test_name, result.type, expectations) >+ exp_str = self._expectations.model().expectations_to_string(expectations) > got_str = self._expectations.model().expectation_to_string(result.type) > > run_results.add(result, expected, self._test_is_slow(result.test_name)) >@@ -198,8 +199,9 @@ class LayoutTestRunner(object): > # When running a chunk (--run-chunk), results_by_name contains all the tests, but (confusingly) all_tests only contains those in the chunk that was run, > # and we don't want to modify the results of a test that didn't run. existing_result.test_number is only non-None for tests that ran. > if existing_result and existing_result.test_number is not None: >- was_expected = self._expectations.matches_an_expected_result(new_result.test_name, existing_result.type, self._options.pixel_tests or existing_result.reftest_type, self._options.world_leaks) >- now_expected = self._expectations.matches_an_expected_result(new_result.test_name, new_result.type, self._options.pixel_tests or new_result.reftest_type, self._options.world_leaks) >+ expectations = self._expectations.filtered_expectations_for_test(new_result.test_name, self._options.pixel_tests or bool(new_result.reftest_type), self._options.world_leaks) >+ was_expected = self._expectations.matches_an_expected_result(new_result.test_name, existing_result.type, expectations) >+ now_expected = self._expectations.matches_an_expected_result(new_result.test_name, new_result.type, expectations) > run_results.change_result_to_failure(existing_result, new_result, was_expected, now_expected) > > def start_servers(self): >diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py >index fff96dfe4de88561123e9691804948ecb0d835c9..b80ca45485a3294d02e2455e5ef3dfc1743d189f 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py >+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py >@@ -122,8 +122,10 @@ class LayoutTestRunnerTests(unittest.TestCase): > # Reftests expected to be image mismatch should be respected when pixel_tests=False. > runner = self._runner() > runner._options.pixel_tests = False >+ runner._options.world_leaks = False > test = 'failures/expected/reftest.html' >- expectations = TestExpectations(runner._port, tests=[test]) >+ leak_test = 'failures/expected/leak.html' >+ expectations = TestExpectations(runner._port, tests=[test, leak_test]) > expectations.parse_all_expectations() > runner._expectations = expectations > >@@ -139,6 +141,12 @@ class LayoutTestRunnerTests(unittest.TestCase): > self.assertEqual(0, run_results.expected) > self.assertEqual(1, run_results.unexpected) > >+ run_results = TestRunResults(expectations, 1) >+ result = TestResult(test_name=leak_test, failures=[]) >+ runner._update_summary_with_result(run_results, result) >+ self.assertEqual(1, run_results.expected) >+ self.assertEqual(0, run_results.unexpected) >+ > def test_servers_started(self): > > def start_http_server(): >diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py >index b3acc6725a74e4a3f8b938a8d0df48f87d699b20..e70c65cda1093c08d611c847bc96bfc395a726fc 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py >+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py >@@ -598,8 +598,22 @@ class TestExpectationsModel(object): > def get_expectations(self, test): > return self._test_to_expectations[test] > >+ def get_expectations_or_pass(self, test): >+ try: >+ return self.get_expectations(test) >+ except: >+ return set([PASS]) >+ >+ def expectations_to_string(self, expectations): >+ retval = [] >+ >+ for expectation in expectations: >+ retval.append(self.expectation_to_string(expectation)) >+ >+ return " ".join(retval) >+ > def get_expectations_string(self, test): >- """Returns the expectatons for the given test as an uppercase string. >+ """Returns the expectations for the given test as an uppercase string. > If there are no expectations for the test, then "PASS" is returned.""" > try: > expectations = self.get_expectations(test) >@@ -965,14 +979,16 @@ class TestExpectations(object): > def get_rebaselining_failures(self): > return self._model.get_test_set(REBASELINE) > >- def matches_an_expected_result(self, test, result, pixel_tests_are_enabled, world_leaks_are_enabled): >- expected_results = self._model.get_expectations(test) >+ def filtered_expectations_for_test(self, test, pixel_tests_are_enabled, world_leaks_are_enabled): >+ expected_results = self._model.get_expectations_or_pass(test) > if not pixel_tests_are_enabled: > expected_results = self.remove_pixel_failures(expected_results) >- > if not world_leaks_are_enabled: > expected_results = self.remove_leak_failures(expected_results) > >+ return expected_results >+ >+ def matches_an_expected_result(self, test, result, expected_results): > return self.result_was_expected(result, > expected_results, > self.is_rebaselining(test), >diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py >index 4d3f0b6676ba30336d2f33dd2c310d8987e4dcee..30adabcc2319747f10681340d4b9edf0b1e7a23b 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py >+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py >@@ -234,7 +234,8 @@ class MiscTests(Base): > > def test_pixel_tests_flag(self): > def match(test, result, pixel_tests_enabled): >- return self._exp.matches_an_expected_result(test, result, pixel_tests_enabled, False) >+ expectations = self._exp.filtered_expectations_for_test(test, pixel_tests_enabled, False) >+ return self._exp.matches_an_expected_result(test, result, expectations) > > self.parse_exp(self.get_basic_expectations()) > pixel_tests_enabled = True >@@ -250,7 +251,8 @@ class MiscTests(Base): > > def test_world_leaks_flag(self): > def match(test, result, pixel_tests_enabled, world_leaks_enabled): >- return self._exp.matches_an_expected_result(test, result, pixel_tests_enabled, world_leaks_enabled) >+ expectations = self._exp.filtered_expectations_for_test(test, pixel_tests_enabled, world_leaks_enabled) >+ return self._exp.matches_an_expected_result(test, result, expectations) > > pixel_tests_enabled = True > pixel_tests_disabled = False >diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py >index 4370fddc6709c9cf14be97ba18792e6681b1d00e..56830038bc1b22dbf3de035890aa3b0e9d5854c5 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py >+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py >@@ -233,7 +233,10 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en > # Note that if a test crashed in the original run, we ignore > # whether or not it crashed when we retried it (if we retried it), > # and always consider the result not flaky. >- expected = expectations.model().get_expectations_string(test_name) >+ pixel_tests_enabled = enabled_pixel_tests_in_retry or port_obj._options.pixel_tests or bool(result.reftest_type) >+ test_expectation = expectations.filtered_expectations_for_test(test_name, pixel_tests_enabled, port_obj._options.world_leaks) >+ expected = expectations.model().expectations_to_string(test_expectation) >+ > result_type = result.type > actual = [keywords[result_type]] > >@@ -267,10 +270,6 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en > if test_name in initial_results.unexpected_results_by_name: > num_missing += 1 > test_dict['report'] = 'MISSING' >- elif result_type == test_expectations.LEAK: >- if test_name in initial_results.unexpected_results_by_name: >- num_regressions += 1 >- test_dict['report'] = 'REGRESSION' > elif test_name in initial_results.unexpected_results_by_name: > if retry_results and test_name not in retry_results.unexpected_results_by_name: > actual.extend(expectations.model().get_expectations_string(test_name).split(" ")) >diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py >index cbda1998065a3c7b7c976890c033da41e856c2b9..2838e29de5ad328cd4749b5b0263057f116cce95 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py >+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py >@@ -33,6 +33,7 @@ from webkitpy.layout_tests.models import test_expectations > from webkitpy.layout_tests.models import test_failures > from webkitpy.layout_tests.models import test_results > from webkitpy.layout_tests.models import test_run_results >+from webkitpy.tool.mocktool import MockOptions > > > def get_result(test_name, result_type=test_expectations.PASS, run_time=0): >@@ -65,18 +66,38 @@ def summarized_results(port, expected, passing, flaky, include_passes=False): > initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow) > initial_results.add(get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow) > initial_results.add(get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow) >- initial_results.add(get_result('failures/expected/leak.html', test_expectations.LEAK), expected, test_is_slow) >+ >+ if port._options.pixel_tests: >+ initial_results.add(get_result('failures/expected/pixel-fail.html', test_expectations.IMAGE), expected, test_is_slow) >+ else: >+ initial_results.add(get_result('failures/expected/pixel-fail.html', test_expectations.PASS), expected, test_is_slow) >+ >+ if port._options.world_leaks: >+ initial_results.add(get_result('failures/expected/leak.html', test_expectations.LEAK), expected, test_is_slow) >+ else: >+ initial_results.add(get_result('failures/expected/leak.html', test_expectations.PASS), expected, test_is_slow) >+ > elif passing: > initial_results.add(get_result('passes/text.html'), expected, test_is_slow) > initial_results.add(get_result('failures/expected/audio.html'), expected, test_is_slow) > initial_results.add(get_result('failures/expected/timeout.html'), expected, test_is_slow) > initial_results.add(get_result('failures/expected/crash.html'), expected, test_is_slow) >- initial_results.add(get_result('failures/expected/leak.html'), expected, test_is_slow) >+ >+ if port._options.pixel_tests: >+ initial_results.add(get_result('failures/expected/pixel-fail.html'), expected, test_is_slow) >+ else: >+ initial_results.add(get_result('failures/expected/pixel-fail.html', test_expectations.IMAGE), expected, test_is_slow) >+ >+ if port._options.world_leaks: >+ initial_results.add(get_result('failures/expected/leak.html'), expected, test_is_slow) >+ else: >+ initial_results.add(get_result('failures/expected/leak.html', test_expectations.PASS), expected, test_is_slow) > else: > initial_results.add(get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow) > initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow) > initial_results.add(get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow) > initial_results.add(get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow) >+ initial_results.add(get_result('failures/expected/pixel-fail.html', test_expectations.TIMEOUT), expected, test_is_slow) > initial_results.add(get_result('failures/expected/leak.html', test_expectations.CRASH), expected, test_is_slow) > > # we only list hang.html here, since normally this is WontFix >@@ -87,6 +108,7 @@ def summarized_results(port, expected, passing, flaky, include_passes=False): > retry_results.add(get_result('passes/text.html'), True, test_is_slow) > retry_results.add(get_result('failures/expected/timeout.html'), True, test_is_slow) > retry_results.add(get_result('failures/expected/crash.html'), True, test_is_slow) >+ retry_results.add(get_result('failures/expected/pixel-fail.html'), True, test_is_slow) > retry_results.add(get_result('failures/expected/leak.html'), True, test_is_slow) > else: > retry_results = None >@@ -126,7 +148,7 @@ class InterpretTestFailuresTest(unittest.TestCase): > class SummarizedResultsTest(unittest.TestCase): > def setUp(self): > host = MockHost(initialize_scm_by_default=False) >- self.port = host.port_factory.get(port_name='test') >+ self.port = host.port_factory.get(port_name='test', options=MockOptions(http=True, pixel_tests=False, world_leaks=False)) > > def test_no_svn_revision(self): > summary = summarized_results(self.port, expected=False, passing=False, flaky=False) >@@ -146,3 +168,8 @@ class SummarizedResultsTest(unittest.TestCase): > self.port._options.builder_name = 'dummy builder' > summary = summarized_results(self.port, expected=False, passing=True, flaky=False, include_passes=True) > self.assertEqual(summary['tests']['passes']['text.html']['expected'], 'PASS') >+ >+ def test_summarized_results_world_leaks_disabled(self): >+ self.port._options.builder_name = 'dummy builder' >+ summary = summarized_results(self.port, expected=False, passing=True, flaky=False, include_passes=True) >+ self.assertEqual(summary['tests']['failures']['expected']['leak.html']['expected'], 'PASS') >diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py >index f04cb58cd680ae13e806fe3ed2b68e4506d4eed5..bb57a56caf136add8b76bf4e87ee595ccda06ea2 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py >+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py >@@ -54,7 +54,6 @@ from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATU > from webkitpy.port import Port > from webkitpy.port import test > from webkitpy.test.skip import skip_if >-from webkitpy.tool.mocktool import MockOptions > > > def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True): >diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py >index 8e536a3275de39bc47b18ca65d9044ddbcaa8def..558b0bd007bc530fd587e50a014aba6884079efb 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py >+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py >@@ -30,6 +30,7 @@ import StringIO > import unittest > > from webkitpy.common.host_mock import MockHost >+from webkitpy.tool.mocktool import MockOptions > > from webkitpy.layout_tests.models import test_expectations > from webkitpy.layout_tests.models import test_failures >@@ -52,7 +53,7 @@ class BuildBotPrinterTests(unittest.TestCase): > return printer, stream > > def test_print_unexpected_results(self): >- port = MockHost().port_factory.get('test') >+ port = MockHost().port_factory.get('test', options=MockOptions(pixel_tests=False, world_leaks=False)) > printer, out = self.get_printer() > > # test everything running as expected >@@ -88,8 +89,34 @@ class BuildBotPrinterTests(unittest.TestCase): > printer.print_unexpected_results(summary) > self.assertNotEmpty(out) > >+ def test_print_unexpected_results_with_options(self): >+ port = MockHost().port_factory.get('test', options=MockOptions(pixel_tests=False, world_leaks=False)) >+ >+ DASHED_LINE = "-" * 78 + "\n" >+ >+ port._options.pixel_tests = True >+ port._options.world_leaks = False >+ printer, out = self.get_printer() >+ summary = test_run_results_unittest.summarized_results(port, expected=True, passing=False, flaky=False) >+ printer.print_unexpected_results(summary) >+ self.assertEqual(out.getvalue(), DASHED_LINE) >+ >+ port._options.pixel_tests = False >+ port._options.world_leaks = True >+ printer, out = self.get_printer() >+ summary = test_run_results_unittest.summarized_results(port, expected=True, passing=False, flaky=False) >+ printer.print_unexpected_results(summary) >+ self.assertEqual(out.getvalue(), DASHED_LINE) >+ >+ port._options.pixel_tests = True >+ port._options.world_leaks = True >+ printer, out = self.get_printer() >+ summary = test_run_results_unittest.summarized_results(port, expected=True, passing=False, flaky=False) >+ printer.print_unexpected_results(summary) >+ self.assertEqual(out.getvalue(), DASHED_LINE) >+ > def test_print_results(self): >- port = MockHost().port_factory.get('test') >+ port = MockHost().port_factory.get('test', options=MockOptions(pixel_tests=False, world_leaks=False)) > printer, out = self.get_printer() > initial_results = test_run_results_unittest.run_results(port) > summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False) >diff --git a/Tools/Scripts/webkitpy/port/test.py b/Tools/Scripts/webkitpy/port/test.py >index 96d72ddf47ccb479f8dd5bde7610621abd954fbc..31f38a9e9dfe18ed34aadac13ae5fee1de2eb723 100644 >--- a/Tools/Scripts/webkitpy/port/test.py >+++ b/Tools/Scripts/webkitpy/port/test.py >@@ -102,7 +102,7 @@ class TestList(object): > # > # These numbers may need to be updated whenever we add or delete tests. > # >-TOTAL_TESTS = 76 >+TOTAL_TESTS = 77 > TOTAL_SKIPS = 9 > TOTAL_RETRIES = 15 > >@@ -125,6 +125,9 @@ def unit_test_list(): > tests.add('failures/expected/image.html', > actual_image='image_fail-pngtEXtchecksum\x00checksum_fail', > expected_image='image-pngtEXtchecksum\x00checksum-png') >+ tests.add('failures/expected/pixel-fail.html', >+ actual_image='image_fail-pngtEXtchecksum\x00checksum_fail', >+ expected_image='image-pngtEXtchecksum\x00checksum-png') > tests.add('failures/expected/image_checksum.html', > actual_checksum='image_checksum_fail-checksum', > actual_image='image_checksum_fail-png') >@@ -293,6 +296,7 @@ Bug(test) failures/expected/leak.html [ Leak ] > Bug(test) failures/expected/flaky-leak.html [ Failure Leak ] > Bug(test) failures/expected/leaky-reftest.html [ ImageOnlyFailure Leak ] > Bug(test) failures/expected/image.html [ ImageOnlyFailure ] >+Bug(test) failures/expected/pixel-fail.html [ ImageOnlyFailure ] > Bug(test) failures/expected/audio.html [ Failure ] > Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ] > Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 189219
:
348694
| 348894