WebKit Bugzilla
Attachment 356444 Details for
Bug 192030
: Layout test should generate performance metrics
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-192030-20181203182846.patch (text/plain), 10.16 KB, created by
Zhifei Fang
on 2018-12-03 18:28:46 PST
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
Zhifei Fang
Created:
2018-12-03 18:28:46 PST
Size:
10.16 KB
patch
obsolete
>Index: Tools/ChangeLog >=================================================================== >--- Tools/ChangeLog (revision 238578) >+++ Tools/ChangeLog (working copy) >@@ -1,3 +1,27 @@ >+2018-11-27 Zhifei Fang <zhifei_fang@apple.com> >+ >+ Layout test will generate a perf metric file to results dir. >+ https://bugs.webkit.org/show_bug.cgi?id=192030 >+ <rdar://problem/32779516> >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ * Scripts/webkitpy/layout_tests/controllers/manager.py: >+ (Manager._end_test_run): >+ (Manager._output_perf_metrics): >+ (Manager._print_expectation_line_for_test): >+ * Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py: >+ (add_test_perf_metric): >+ (test_perf_metrics): >+ * Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py: >+ (JSONGeneratorTest.test_test_timings_trie): >+ (JSONGeneratorTest): >+ (JSONGeneratorTest.test_test_perf_metrics): >+ * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py: >+ (RebaselineTest.test_reset_results): >+ (RebaselineTest.test_missing_results): >+ (RebaselineTest.test_new_baseline): >+ > 2018-11-27 Chris Dumez <cdumez@apple.com> > > Regression(PSON) crash under WebPageProxy::didReceiveServerRedirectForProvisionalLoadForFrame() >Index: Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py >=================================================================== >--- Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py (revision 238523) >+++ Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py (working copy) >@@ -879,7 +879,7 @@ class RebaselineTest(unittest.TestCase, > tests_included=True, host=host, new_results=True) > file_list = host.filesystem.written_files.keys() > self.assertEqual(details.exit_code, 0) >- self.assertEqual(len(file_list), 8) >+ self.assertEqual(len(file_list), 9) > self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err) > self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err) > >@@ -895,7 +895,7 @@ class RebaselineTest(unittest.TestCase, > tests_included=True, host=host, new_results=True) > file_list = host.filesystem.written_files.keys() > self.assertEqual(details.exit_code, 0) >- self.assertEqual(len(file_list), 10) >+ self.assertEqual(len(file_list), 11) > self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err) > self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err) > self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err) >@@ -909,7 +909,7 @@ class RebaselineTest(unittest.TestCase, > tests_included=True, host=host, new_results=True) > file_list = host.filesystem.written_files.keys() > self.assertEqual(details.exit_code, 0) >- self.assertEqual(len(file_list), 8) >+ self.assertEqual(len(file_list), 9) > self.assertBaselines(file_list, > "platform/test-mac-leopard/passes/image", [".txt", ".png"], err) > self.assertBaselines(file_list, >Index: Tools/Scripts/webkitpy/layout_tests/controllers/manager.py >=================================================================== >--- Tools/Scripts/webkitpy/layout_tests/controllers/manager.py (revision 238523) >+++ Tools/Scripts/webkitpy/layout_tests/controllers/manager.py (working copy) >@@ -313,6 +313,7 @@ class Manager(object): > exit_code = -1 > if not self._options.dry_run: > self._port.print_leaks_summary() >+ self._output_perf_metrics(end_time - start_time, initial_results) > self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time) > > results_path = self._filesystem.join(self._results_directory, "results.html") >@@ -405,6 +406,11 @@ class Manager(object): > (result.type != test_expectations.MISSING) and > (result.type != test_expectations.CRASH or include_crashes))] > >+ def _output_perf_metrics(self, run_time, initial_results): >+ perf_metrics_json = json_results_generator.test_perf_metrics(run_time, initial_results.results_by_name.values()) >+ perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json") >+ self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json)) >+ > def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None): > """Writes the results of the test run as JSON files into the results > dir and upload the files to the appengine server. >@@ -543,7 +549,7 @@ class Manager(object): > def _print_expectation_line_for_test(self, format_string, test): > line = self._expectations.model().get_expectation_line(test) > print(format_string.format(test, line.expected_behavior, self._expectations.readable_filename_and_line_number(line), line.original_string or '')) >- >+ > def _print_expectations_for_subset(self, device_class, test_col_width, tests_to_run, tests_to_skip={}): > format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width) > if tests_to_skip: >Index: Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py >=================================================================== >--- Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py (revision 238523) >+++ Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py (working copy) >@@ -114,12 +114,73 @@ def test_timings_trie(port, individual_t > trie = {} > for test_result in individual_test_timings: > test = test_result.test_name >- > add_path_to_trie(test, int(1000 * test_result.test_run_time), trie) > > return trie > > >+def add_test_perf_metric(path, time, tests, depth, target_depth): >+ if not "/" in path: >+ tests["tests"][path] = { >+ "metrics": { >+ "Time": { >+ "current": [time], >+ } >+ } >+ } >+ return >+ >+ directory, slash, rest = path.partition("/") >+ if depth == target_depth: >+ if directory not in tests["tests"]: >+ tests["tests"][directory] = { >+ "metrics": { >+ "Time": { >+ "current": [time], >+ }}} >+ else: >+ tests["tests"][directory]["metrics"]["Time"]["current"][0] += time >+ return >+ else: >+ if directory not in tests["tests"]: >+ tests["tests"][directory] = { >+ "metrics": { >+ "Time": ["Total", "Arithmetic"], >+ }, >+ "tests": {} >+ } >+ add_test_perf_metric(rest, time, tests["tests"][directory], depth + 1, target_depth) >+ >+ >+def test_perf_metrics(run_time, individual_test_timings): >+ """ >+ Output two performace metrics >+ 1. run time, which is how much time consumed by the layout tests script >+ 2. run time of first-level and second-level of test categories >+ """ >+ total_run_time = 0 >+ >+ for test_result in individual_test_timings: >+ total_run_time += int(1000 * test_result.test_run_time) >+ >+ perf_metric = { >+ "layout_tests": { >+ "metrics": { >+ "Time": ["Total", "Arithmetic"], >+ }, >+ "tests": {} >+ }, >+ "layout_tests_run_time": { >+ "metrics": { >+ "Time": {"current": [run_time]}, >+ }}} >+ for test_result in individual_test_timings: >+ test = test_result.test_name >+ # for now, we only sent two level of categories >+ add_test_perf_metric(test, int(1000 * test_result.test_run_time), perf_metric["layout_tests"], 1, 2) >+ return perf_metric >+ >+ > # FIXME: We already have a TestResult class in test_results.py > class TestResult(object): > """A simple class that represents a single test result.""" >Index: Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py >=================================================================== >--- Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py (revision 238523) >+++ Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py (working copy) >@@ -226,3 +226,42 @@ class JSONGeneratorTest(unittest.TestCas > } > > self.assertEqual(json.dumps(trie), json.dumps(expected_trie)) >+ >+ def test_test_perf_metrics(self): >+ individual_test_timings = [] >+ individual_test_timings.append(json_results_generator.TestResult('foo/bar/baz.html', elapsed_time=1.2)) >+ individual_test_timings.append(json_results_generator.TestResult('foo/bar/ba.html', elapsed_time=1.4)) >+ individual_test_timings.append(json_results_generator.TestResult('bar.html', elapsed_time=0.0001)) >+ metrics = json_results_generator.test_perf_metrics(1200, individual_test_timings) >+ >+ expected_metrics = { >+ "layout_tests": { >+ "metrics": { >+ "Time": ["Total", "Arithmetic"], >+ }, >+ "tests": { >+ "foo": { >+ "metrics": { >+ "Time": ["Total", "Arithmetic"], >+ }, >+ "tests": { >+ "bar": { >+ "metrics": { >+ "Time": {"current": [2600]}, >+ } >+ } >+ } >+ }, >+ "bar.html": { >+ "metrics": { >+ "Time": {"current": [0]}, >+ } >+ } >+ } >+ }, >+ "layout_tests_run_time": { >+ "metrics": { >+ "Time": {"current": [1200]}, >+ } >+ }} >+ self.assertEqual(json.dumps(metrics), json.dumps(expected_metrics))
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 192030
:
355780
|
355784
|
355795
|
355797
|
355890
|
355892
|
356101
|
356194
|
356444
|
356513
|
358550
|
358564
|
358613