WebKit Bugzilla
Attachment 371778 Details for
Bug 198729
: Extend run-benchmark to allow diagnosing before closing browser on test failure.
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-198729-20190610143844.patch (text/plain), 8.49 KB, created by
dewei_zhu
on 2019-06-10 14:38:45 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
dewei_zhu
Created:
2019-06-10 14:38:45 PDT
Size:
8.49 KB
patch
obsolete
>Subversion Revision: 246278 >diff --git a/Tools/ChangeLog b/Tools/ChangeLog >index cce5ef6c316c7948079d6928ca30219384a25804..c8cd5cfff3b7742f428baafcd19a4db553b99f99 100644 >--- a/Tools/ChangeLog >+++ b/Tools/ChangeLog >@@ -1,3 +1,25 @@ >+2019-06-10 Dewei Zhu <dewei_zhu@apple.com> >+ >+ Extend run-benchmark to allow diagnosing before closing browser on test failure. >+ https://bugs.webkit.org/show_bug.cgi?id=198729 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ Add '--diagnose-directory' option to store diagnose information when test failed. >+ >+ * Scripts/webkitpy/benchmark_runner/benchmark_runner.py: >+ (BenchmarkRunner.__init__): >+ * Scripts/webkitpy/benchmark_runner/browser_driver/browser_driver.py: >+ (BrowserDriver.diagnose_test_failure): Add default no-op function to base class. >+ * Scripts/webkitpy/benchmark_runner/run_benchmark.py: Added '--diagnose-directory' option. >+ (parse_args): >+ (run_benchmark_plan): >+ * Scripts/webkitpy/benchmark_runner/webdriver_benchmark_runner.py: >+ (WebDriverBenchmarkRunner._run_one_test): Added 'diagnose_test_failure' invocation on test failure. >+ * Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py: >+ (WebServerBenchmarkRunner.__init__): >+ (WebServerBenchmarkRunner._run_one_test): Added 'diagnose_test_failure' invocation on test failure. >+ > 2019-06-10 Wenson Hsieh <wenson_hsieh@apple.com> > > [iOS] fast/xsl tests are flaky when run after certain viewport shrink-to-fit tests >diff --git a/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py b/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py >index 94958c8ce8314c55dd2a3ae910d5a8424f6d5b74..d72c5297f45ef5836cd7134becc1923b144c3df9 100755 >--- a/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py >+++ b/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py >@@ -23,7 +23,7 @@ _log = logging.getLogger(__name__) > class BenchmarkRunner(object): > name = 'benchmark_runner' > >- def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None): >+ def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None, diagnose_dir=None): > try: > plan_file = self._find_plan_file(plan_file) > with open(plan_file, 'r') as fp: >@@ -38,6 +38,7 @@ class BenchmarkRunner(object): > self._browser_driver = BrowserDriverFactory.create(platform, browser) > self._browser_path = browser_path > self._build_dir = os.path.abspath(build_dir) if build_dir else None >+ self._diagnose_dir = os.path.abspath(diagnose_dir) if diagnose_dir else None > self._output_file = output_file > self._scale_unit = scale_unit > self._show_iteration_values = show_iteration_values >diff --git a/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/browser_driver.py b/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/browser_driver.py >index 2c1a86001749683322f3403ad39d5fc2111c6899..10ea7987606d9897a4f705f0d4676499c1c1a507 100755 >--- a/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/browser_driver.py >+++ b/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/browser_driver.py >@@ -42,6 +42,9 @@ class BrowserDriver(object): > def restore_env_after_all_testing(self): > pass > >+ def diagnose_test_failure(self, debug_directory, error): >+ pass >+ > @property > def webdriver_binary_path(self): > return get_driver_binary_path(self.browser_name) >diff --git a/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py b/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py >index a74f20783b2dd34f14e911c9d28e819fa533ba55..e230204561f4d42f9b0ecf9e067f6088d30ecec8 100755 >--- a/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py >+++ b/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py >@@ -47,6 +47,7 @@ def parse_args(): > parser.add_argument('--local-copy', help='Path to a local copy of the benchmark (e.g. PerformanceTests/SunSpider/).') > parser.add_argument('--device-id', default=None, help='Undocumented option for mobile device testing.') > parser.add_argument('--debug', action='store_true', help='Enable debug logging.') >+ parser.add_argument('--diagnose-directory', dest='diagnose_dir', default=None, help='Directory for storing diagnose information on test failure. It\'s up to browser driver implementation when this option is not specified.') > parser.add_argument('--no-adjust-unit', dest='scale_unit', action='store_false', help="Don't convert to scientific notation.") > parser.add_argument('--show-iteration-values', dest='show_iteration_values', action='store_true', help="Show the measured value for each iteration in addition to averages.") > >@@ -68,7 +69,7 @@ def parse_args(): > > def run_benchmark_plan(args, plan): > benchmark_runner_class = benchmark_runner_subclasses[args.driver] >- runner = benchmark_runner_class(plan, args.local_copy, args.count, args.build_dir, args.output_file, args.platform, args.browser, args.browser_path, args.scale_unit, args.show_iteration_values, args.device_id) >+ runner = benchmark_runner_class(plan, args.local_copy, args.count, args.build_dir, args.output_file, args.platform, args.browser, args.browser_path, args.scale_unit, args.show_iteration_values, args.device_id, args.diagnose_dir) > runner.execute() > > >diff --git a/Tools/Scripts/webkitpy/benchmark_runner/webdriver_benchmark_runner.py b/Tools/Scripts/webkitpy/benchmark_runner/webdriver_benchmark_runner.py >index b3b6cc837d2922ebd8fb6827b444b8b5d196f64f..8b391c52587532c209a54d09511d67e222c9e342 100755 >--- a/Tools/Scripts/webkitpy/benchmark_runner/webdriver_benchmark_runner.py >+++ b/Tools/Scripts/webkitpy/benchmark_runner/webdriver_benchmark_runner.py >@@ -26,6 +26,9 @@ class WebDriverBenchmarkRunner(BenchmarkRunner): > _log.info('Waiting on results from web browser') > result = WebDriverWait(driver, self._plan['timeout'], poll_frequency=1.0).until(self._get_result) > driver.quit() >+ except Exception as error: >+ self._browser_driver.diagnose_test_failure(self._diagnose_dir, error) >+ raise error > finally: > self._browser_driver.close_browsers() > >diff --git a/Tools/Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py b/Tools/Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py >index 3cb14eca5e388c6446a4c696a35e36f0da01c0a2..203730fe6588751841c2376ac06999ce3705e97b 100755 >--- a/Tools/Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py >+++ b/Tools/Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py >@@ -15,10 +15,10 @@ _log = logging.getLogger(__name__) > class WebServerBenchmarkRunner(BenchmarkRunner): > name = 'webserver' > >- def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None): >+ def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None, diagnose_dir=None): > self._http_server_driver = HTTPServerDriverFactory.create(platform) > self._http_server_driver.set_device_id(device_id) >- super(WebServerBenchmarkRunner, self).__init__(plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit, show_iteration_values, device_id) >+ super(WebServerBenchmarkRunner, self).__init__(plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit, show_iteration_values, device_id, diagnose_dir) > > def _get_result(self, test_url): > result = self._browser_driver.add_additional_results(test_url, self._http_server_driver.fetch_result()) >@@ -33,6 +33,9 @@ class WebServerBenchmarkRunner(BenchmarkRunner): > self._browser_driver.launch_url(url, self._plan['options'], self._build_dir, self._browser_path) > with Timeout(self._plan['timeout']): > result = self._get_result(url) >+ except Exception as error: >+ self._browser_driver.diagnose_test_failure(self._diagnose_dir, error) >+ raise error > finally: > self._browser_driver.close_browsers() > self._http_server_driver.kill_server()
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Flags:
rniwa
:
review+
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 198729
: 371778