WebKit Bugzilla
Attachment 348694 Details for
Bug 189219
: run-webkit-tests prints confusing messages when test expectations list results that are not compatible with the run options
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-189219-20180831182320.patch (text/plain), 16.14 KB, created by
Simon Fraser (smfr)
on 2018-08-31 18:23:21 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
Simon Fraser (smfr)
Created:
2018-08-31 18:23:21 PDT
Size:
16.14 KB
patch
obsolete
>Subversion Revision: 235571 >diff --git a/Tools/ChangeLog b/Tools/ChangeLog >index 7a499fd5f42edb18d476bbb3b429090750209c11..cafc350fecc1bad2289f347e84609b5dfc18208d 100644 >--- a/Tools/ChangeLog >+++ b/Tools/ChangeLog >@@ -1,3 +1,34 @@ >+2018-08-31 Simon Fraser <simon.fraser@apple.com> >+ >+ run-webkit-tests prints confusing messages when test expectations list results that are not compatible with the run options >+ https://bugs.webkit.org/show_bug.cgi?id=189219 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ * Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py: >+ (LayoutTestRunner._update_summary_with_result): >+ (LayoutTestRunner._annotate_results_with_additional_failures): >+ * Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py: >+ (LayoutTestRunnerTests.test_update_summary_with_result): >+ * Scripts/webkitpy/layout_tests/models/test_expectations.py: >+ (TestExpectationsModel.get_expectations_or_pass): >+ (TestExpectationsModel): >+ (TestExpectationsModel.expectations_to_string): >+ (TestExpectationsModel.get_expectations_string): >+ (TestExpectations.filtered_expectations_for_test): >+ (TestExpectations.matches_an_expected_result): >+ * Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py: >+ * Scripts/webkitpy/layout_tests/models/test_run_results.py: >+ (summarize_results): >+ * Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py: >+ (SummarizedResultsTest.test_svn_revision): >+ (SummarizedResultsTest.test_summarized_results_wontfix): >+ (SummarizedResultsTest.test_summarized_results_include_passes): >+ (SummarizedResultsTest): >+ (SummarizedResultsTest.test_summarized_results_world_leaks_disabled): >+ * Scripts/webkitpy/layout_tests/views/printing.py: >+ (Printer.print_test_result_changed): >+ > 2018-08-31 Myles C. Maxfield <mmaxfield@apple.com> > > [WHLSL] Remove useless code in NameResolver >diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py >index 4b19974d9874dd826d3ab5d6db2b80eed99d1412..4bb48fd46444010435cc5e561cebbd4df9edbbc6 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py >+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py >@@ -182,8 +182,9 @@ class LayoutTestRunner(object): > exp_str = got_str = 'SKIP' > expected = True > else: >- expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or result.reftest_type, self._options.world_leaks) >- exp_str = self._expectations.model().get_expectations_string(result.test_name) >+ expectations = self._expectations.filtered_expectations_for_test(result.test_name, self._options.pixel_tests or bool(result.reftest_type), self._options.world_leaks) >+ expected = self._expectations.matches_an_expected_result(result.test_name, result.type, expectations) >+ exp_str = self._expectations.model().expectations_to_string(expectations); > got_str = self._expectations.model().expectation_to_string(result.type) > > run_results.add(result, expected, self._test_is_slow(result.test_name)) >@@ -198,9 +199,16 @@ class LayoutTestRunner(object): > # When running a chunk (--run-chunk), results_by_name contains all the tests, but (confusingly) all_tests only contains those in the chunk that was run, > # and we don't want to modify the results of a test that didn't run. existing_result.test_number is only non-None for tests that ran. > if existing_result and existing_result.test_number is not None: >- was_expected = self._expectations.matches_an_expected_result(new_result.test_name, existing_result.type, self._options.pixel_tests or existing_result.reftest_type, self._options.world_leaks) >- now_expected = self._expectations.matches_an_expected_result(new_result.test_name, new_result.type, self._options.pixel_tests or new_result.reftest_type, self._options.world_leaks) >+ expectations = self._expectations.filtered_expectations_for_test(new_result.test_name, self._options.pixel_tests or bool(new_result.reftest_type), self._options.world_leaks) >+ was_expected = self._expectations.matches_an_expected_result(new_result.test_name, existing_result.type, expectations) >+ now_expected = self._expectations.matches_an_expected_result(new_result.test_name, new_result.type, expectations) > run_results.change_result_to_failure(existing_result, new_result, was_expected, now_expected) >+ >+ if was_expected is not now_expected: >+ exp_str = self._expectations.model().expectations_to_string(expectations); >+ got_str = self._expectations.model().expectation_to_string(new_result.type) >+ self._printer.print_test_result_changed(new_result, now_expected, exp_str, got_str) >+ > > def start_servers(self): > if self._needs_http and not self._did_start_http_server and not self._port.is_http_server_running(): >diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py >index fff96dfe4de88561123e9691804948ecb0d835c9..b80ca45485a3294d02e2455e5ef3dfc1743d189f 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py >+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py >@@ -122,8 +122,10 @@ class LayoutTestRunnerTests(unittest.TestCase): > # Reftests expected to be image mismatch should be respected when pixel_tests=False. > runner = self._runner() > runner._options.pixel_tests = False >+ runner._options.world_leaks = False > test = 'failures/expected/reftest.html' >- expectations = TestExpectations(runner._port, tests=[test]) >+ leak_test = 'failures/expected/leak.html' >+ expectations = TestExpectations(runner._port, tests=[test, leak_test]) > expectations.parse_all_expectations() > runner._expectations = expectations > >@@ -139,6 +141,12 @@ class LayoutTestRunnerTests(unittest.TestCase): > self.assertEqual(0, run_results.expected) > self.assertEqual(1, run_results.unexpected) > >+ run_results = TestRunResults(expectations, 1) >+ result = TestResult(test_name=leak_test, failures=[]) >+ runner._update_summary_with_result(run_results, result) >+ self.assertEqual(1, run_results.expected) >+ self.assertEqual(0, run_results.unexpected) >+ > def test_servers_started(self): > > def start_http_server(): >diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py >index b3acc6725a74e4a3f8b938a8d0df48f87d699b20..9a9059e1c59fbdde7d27a15dfa0f5a921d59f76e 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py >+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py >@@ -598,8 +598,22 @@ class TestExpectationsModel(object): > def get_expectations(self, test): > return self._test_to_expectations[test] > >- def get_expectations_string(self, test): >- """Returns the expectatons for the given test as an uppercase string. >+ def get_expectations_or_pass(self, test): >+ try: >+ return self.get_expectations(test) >+ except: >+ return "PASS" >+ >+ def expectations_to_string(self, expectations): >+ retval = [] >+ >+ for expectation in expectations: >+ retval.append(self.expectation_to_string(expectation)) >+ >+ return " ".join(retval) >+ >+ def get_expectations_string(self, test): # FIXME: remove. >+ """Returns the expectations for the given test as an uppercase string. > If there are no expectations for the test, then "PASS" is returned.""" > try: > expectations = self.get_expectations(test) >@@ -965,14 +979,16 @@ class TestExpectations(object): > def get_rebaselining_failures(self): > return self._model.get_test_set(REBASELINE) > >- def matches_an_expected_result(self, test, result, pixel_tests_are_enabled, world_leaks_are_enabled): >- expected_results = self._model.get_expectations(test) >+ def filtered_expectations_for_test(self, test, pixel_tests_are_enabled, world_leaks_are_enabled): # FIXME: unit test. >+ expected_results = self._model.get_expectations_or_pass(test) > if not pixel_tests_are_enabled: > expected_results = self.remove_pixel_failures(expected_results) >- > if not world_leaks_are_enabled: > expected_results = self.remove_leak_failures(expected_results) >+ >+ return expected_results > >+ def matches_an_expected_result(self, test, result, expected_results): > return self.result_was_expected(result, > expected_results, > self.is_rebaselining(test), >diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py >index 4d3f0b6676ba30336d2f33dd2c310d8987e4dcee..30adabcc2319747f10681340d4b9edf0b1e7a23b 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py >+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py >@@ -234,7 +234,8 @@ class MiscTests(Base): > > def test_pixel_tests_flag(self): > def match(test, result, pixel_tests_enabled): >- return self._exp.matches_an_expected_result(test, result, pixel_tests_enabled, False) >+ expectations = self._exp.filtered_expectations_for_test(test, pixel_tests_enabled, False) >+ return self._exp.matches_an_expected_result(test, result, expectations) > > self.parse_exp(self.get_basic_expectations()) > pixel_tests_enabled = True >@@ -250,7 +251,8 @@ class MiscTests(Base): > > def test_world_leaks_flag(self): > def match(test, result, pixel_tests_enabled, world_leaks_enabled): >- return self._exp.matches_an_expected_result(test, result, pixel_tests_enabled, world_leaks_enabled) >+ expectations = self._exp.filtered_expectations_for_test(test, pixel_tests_enabled, world_leaks_enabled) >+ return self._exp.matches_an_expected_result(test, result, expectations) > > pixel_tests_enabled = True > pixel_tests_disabled = False >diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py >index 4370fddc6709c9cf14be97ba18792e6681b1d00e..0e2b8d2c6e1064b4a73b8b5775c5230e9d0aa27e 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py >+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py >@@ -233,7 +233,11 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en > # Note that if a test crashed in the original run, we ignore > # whether or not it crashed when we retried it (if we retried it), > # and always consider the result not flaky. >- expected = expectations.model().get_expectations_string(test_name) >+ pixel_tests_enabled = enabled_pixel_tests_in_retry or port_obj._options.pixel_tests or bool(result.reftest_type) >+ test_expectation = expectations.filtered_expectations_for_test(test_name, pixel_tests_enabled, port_obj._options.world_leaks) >+ expected = expectations.model().expectations_to_string(test_expectation) >+ >+ # expected = expectations.model().get_expectations_string(test_name) > result_type = result.type > actual = [keywords[result_type]] > >diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py >index cbda1998065a3c7b7c976890c033da41e856c2b9..8198ebd1092ffb65f798869e493757b4dd8e392d 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py >+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py >@@ -134,15 +134,28 @@ class SummarizedResultsTest(unittest.TestCase): > > def test_svn_revision(self): > self.port._options.builder_name = 'dummy builder' >+ self.port._options.pixel_tests = False >+ self.port._options.world_leaks = False > summary = summarized_results(self.port, expected=False, passing=False, flaky=False) > self.assertNotEquals(summary['revision'], '') > > def test_summarized_results_wontfix(self): > self.port._options.builder_name = 'dummy builder' >+ self.port._options.pixel_tests = False >+ self.port._options.world_leaks = False > summary = summarized_results(self.port, expected=False, passing=False, flaky=False) > self.assertTrue(summary['tests']['failures']['expected']['hang.html']['wontfix']) > > def test_summarized_results_include_passes(self): > self.port._options.builder_name = 'dummy builder' >+ self.port._options.pixel_tests = False >+ self.port._options.world_leaks = False > summary = summarized_results(self.port, expected=False, passing=True, flaky=False, include_passes=True) > self.assertEqual(summary['tests']['passes']['text.html']['expected'], 'PASS') >+ >+ def test_summarized_results_world_leaks_disabled(self): >+ self.port._options.builder_name = 'dummy builder' >+ self.port._options.pixel_tests = False >+ self.port._options.world_leaks = False >+ summary = summarized_results(self.port, expected=False, passing=True, flaky=False, include_passes=True) >+ self.assertEqual(summary['tests']['failures']['expected']['leak.html']['expected'], 'PASS') >diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing.py b/Tools/Scripts/webkitpy/layout_tests/views/printing.py >index 0dec9c81ac4d4785d10a767303f7319873a39a2e..919a21d072a7292a0b5f19e166e2ae6d1153f2c7 100644 >--- a/Tools/Scripts/webkitpy/layout_tests/views/printing.py >+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing.py >@@ -318,6 +318,16 @@ class Printer(object): > write = self._meter.write_throttled_update > write(self._test_status_line(test_name, suffix)) > >+ def print_test_result_changed(self, result, expected, exp_str, got_str): >+ result_message = self._result_message(result.type, result.failures, expected, self._options.verbose) >+ >+ if self._options.details: >+ self._print_test_trace(result, exp_str, got_str) >+ elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected: >+ self.writeln(self._test_status_line(result.test_name, result_message, truncate=False)) >+ else: >+ self._meter.write_throttled_update(self._test_status_line(result.test_name, result_message, truncate=False)) >+ > def print_finished_test(self, result, expected, exp_str, got_str): > test_name = result.test_name > >diff --git a/LayoutTests/ChangeLog b/LayoutTests/ChangeLog >index 4517b1182ce3a155b5e8137bb87f0859bcef04c2..34d74b2681d4eb173a98eeccec669c1fd29caaae 100644 >--- a/LayoutTests/ChangeLog >+++ b/LayoutTests/ChangeLog >@@ -1,3 +1,12 @@ >+2018-08-31 Simon Fraser <simon.fraser@apple.com> >+ >+ run-webkit-tests prints confusing messages when test expectations list results that are not compatible with the run options >+ https://bugs.webkit.org/show_bug.cgi?id=189219 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ * TestExpectations: >+ > 2018-08-31 Simon Fraser <simon.fraser@apple.com> > > Clean up TestExpectations so that ./Tools/Scripts/lint-test-expectations >diff --git a/LayoutTests/TestExpectations b/LayoutTests/TestExpectations >index 3193ba318f7c2e78767641b59922e8c4c6805e2e..6dc08b9e5024c4964c3c5c0861ebe2038b3be5c6 100644 >--- a/LayoutTests/TestExpectations >+++ b/LayoutTests/TestExpectations >@@ -2241,3 +2241,5 @@ webkit.org/b/185308 legacy-animation-engine/animations/combo-transform-translate > > # This test is currently only relevant on mac-wk2 > fast/canvas/webgl/context-update-on-display-configuration.html [ Skip ] >+ >+fast/css/user-drag-none.html [ Leak ]
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 189219
:
348694
|
348894