<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[190779] trunk/Tools</title>
</head>
<body>

<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt;  }
#msg dl a { font-weight: bold}
#msg dl a:link    { color:#fc3; }
#msg dl a:active  { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff  {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.webkit.org/projects/webkit/changeset/190779">190779</a></dd>
<dt>Author</dt> <dd>dewei_zhu@apple.com</dd>
<dt>Date</dt> <dd>2015-10-08 22:53:12 -0700 (Thu, 08 Oct 2015)</dd>
</dl>

<h3>Log Message</h3>
<pre>Extend run-benchmark script to support human-readable results conversion.
https://bugs.webkit.org/show_bug.cgi?id=149944

Reviewed by Ryosuke Niwa.

Add '--read-results-json' and '--no-adjust-unit' options.
'--read-results-json' option converts result file to human readable format.
'--no-adjust-unit' option skips scientific notation convertion.
'--platform' defaults to 'osx' and '--browser' defaults to 'safari'.

* Scripts/webkitpy/benchmark_runner/benchmark_results.py:
(BenchmarkResults.format):
(BenchmarkResults._format_tests):
(BenchmarkResults._format_values):
* Scripts/webkitpy/benchmark_runner/benchmark_runner.py:
(BenchmarkRunner.__init__):
(BenchmarkRunner._run_benchmark):
(BenchmarkRunner._dump):
(BenchmarkRunner.show_results):
(BenchmarkRunner._show_results): Deleted.
* Scripts/webkitpy/benchmark_runner/run_benchmark.py:
(parse_args):
(start):</pre>

<h3>Modified Paths</h3>
<ul>
<li><a href="#trunkToolsChangeLog">trunk/Tools/ChangeLog</a></li>
<li><a href="#trunkToolsScriptswebkitpybenchmark_runnerbenchmark_resultspy">trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py</a></li>
<li><a href="#trunkToolsScriptswebkitpybenchmark_runnerbenchmark_runnerpy">trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py</a></li>
<li><a href="#trunkToolsScriptswebkitpybenchmark_runnerrun_benchmarkpy">trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py</a></li>
</ul>

</div>
<div id="patch">
<h3>Diff</h3>
<a id="trunkToolsChangeLog"></a>
<div class="modfile"><h4>Modified: trunk/Tools/ChangeLog (190778 => 190779)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/ChangeLog        2015-10-09 05:46:43 UTC (rev 190778)
+++ trunk/Tools/ChangeLog        2015-10-09 05:53:12 UTC (rev 190779)
</span><span class="lines">@@ -1,3 +1,29 @@
</span><ins>+2015-10-08  Dewei Zhu  &lt;dewei_zhu@apple.com&gt;
+
+        Extend run-benchmark script to support human-readable results conversion.
+        https://bugs.webkit.org/show_bug.cgi?id=149944
+
+        Reviewed by Ryosuke Niwa.
+
+        Add '--read-results-json' and '--no-adjust-unit' options.
+        '--read-results-json' option converts result file to human readable format.
+        '--no-adjust-unit' option skips scientific notation convertion.
+        '--platform' defaults to 'osx' and '--browser' defaults to 'safari'.
+
+        * Scripts/webkitpy/benchmark_runner/benchmark_results.py:
+        (BenchmarkResults.format):
+        (BenchmarkResults._format_tests):
+        (BenchmarkResults._format_values):
+        * Scripts/webkitpy/benchmark_runner/benchmark_runner.py:
+        (BenchmarkRunner.__init__):
+        (BenchmarkRunner._run_benchmark):
+        (BenchmarkRunner._dump):
+        (BenchmarkRunner.show_results):
+        (BenchmarkRunner._show_results): Deleted.
+        * Scripts/webkitpy/benchmark_runner/run_benchmark.py:
+        (parse_args):
+        (start):
+
</ins><span class="cx"> 2015-10-08  Daniel Bates  &lt;dabates@apple.com&gt;
</span><span class="cx"> 
</span><span class="cx">         Add iOS 9 device builder to WebKit Bot Watcher's Dashboard
</span></span></pre></div>
<a id="trunkToolsScriptswebkitpybenchmark_runnerbenchmark_resultspy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py (190778 => 190779)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py        2015-10-09 05:46:43 UTC (rev 190778)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py        2015-10-09 05:53:12 UTC (rev 190779)
</span><span class="lines">@@ -48,11 +48,11 @@
</span><span class="cx">         self._lint_results(results)
</span><span class="cx">         self._results = self._aggregate_results(results)
</span><span class="cx"> 
</span><del>-    def format(self):
-        return self._format_tests(self._results)
</del><ins>+    def format(self, scale_unit):
+        return self._format_tests(self._results, scale_unit)
</ins><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    def _format_tests(self, tests, indent=''):
</del><ins>+    def _format_tests(cls, tests, scale_unit, indent=''):
</ins><span class="cx">         output = ''
</span><span class="cx">         config_name = 'current'
</span><span class="cx">         for test_name in sorted(tests.keys()):
</span><span class="lines">@@ -71,13 +71,13 @@
</span><span class="cx">                     output += ':' + metric_name + ':'
</span><span class="cx">                     if aggregator_name:
</span><span class="cx">                         output += aggregator_name + ':'
</span><del>-                    output += ' ' + self._format_values(metric_name, metric[aggregator_name][config_name]) + '\n'
</del><ins>+                    output += ' ' + cls._format_values(metric_name, metric[aggregator_name][config_name], scale_unit) + '\n'
</ins><span class="cx">             if 'tests' in test:
</span><del>-                output += self._format_tests(test['tests'], indent=(indent + ' ' * len(test_name)))
</del><ins>+                output += cls._format_tests(test['tests'], scale_unit, indent=(indent + ' ' * len(test_name)))
</ins><span class="cx">         return output
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    def _format_values(cls, metric_name, values):
</del><ins>+    def _format_values(cls, metric_name, values, scale_unit):
</ins><span class="cx">         values = map(float, values)
</span><span class="cx">         total = sum(values)
</span><span class="cx">         mean = total / len(values)
</span><span class="lines">@@ -93,6 +93,9 @@
</span><span class="cx"> 
</span><span class="cx">         unit = cls._unit_from_metric(metric_name)
</span><span class="cx"> 
</span><ins>+        if not scale_unit:
+            return ('{mean:.3f}{unit} stdev={delta:.1%}').format(mean=mean, delta=sample_stdev / mean, unit=unit)
+
</ins><span class="cx">         if unit == 'ms':
</span><span class="cx">             unit = 's'
</span><span class="cx">             mean = float(mean) / 1000
</span></span></pre></div>
<a id="trunkToolsScriptswebkitpybenchmark_runnerbenchmark_runnerpy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py (190778 => 190779)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py        2015-10-09 05:46:43 UTC (rev 190778)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py        2015-10-09 05:53:12 UTC (rev 190779)
</span><span class="lines">@@ -24,7 +24,7 @@
</span><span class="cx"> 
</span><span class="cx"> class BenchmarkRunner(object):
</span><span class="cx"> 
</span><del>-    def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, device_id=None):
</del><ins>+    def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, scale_unit=True, device_id=None):
</ins><span class="cx">         try:
</span><span class="cx">             plan_file = self._find_plan_file(plan_file)
</span><span class="cx">             with open(plan_file, 'r') as fp:
</span><span class="lines">@@ -39,6 +39,7 @@
</span><span class="cx">                 self._http_server_driver.set_device_id(device_id)
</span><span class="cx">                 self._build_dir = os.path.abspath(build_dir) if build_dir else None
</span><span class="cx">                 self._output_file = output_file
</span><ins>+                self._scale_unit = scale_unit
</ins><span class="cx">                 self._device_id = device_id
</span><span class="cx">         except IOError as error:
</span><span class="cx">             _log.error('Can not open plan file: {plan_file} - Error {error}'.format(plan_file=plan_file, error=error))
</span><span class="lines">@@ -80,7 +81,7 @@
</span><span class="cx">             _log.info('End of {current_iteration} iteration of current benchmark'.format(current_iteration=iteration))
</span><span class="cx">         results = self._wrap(results)
</span><span class="cx">         self._dump(results, self._output_file if self._output_file else self._plan['output_file'])
</span><del>-        self._show_results(results)
</del><ins>+        self.show_results(results, self._scale_unit)
</ins><span class="cx"> 
</span><span class="cx">     def execute(self):
</span><span class="cx">         with BenchmarkBuilder(self._plan_name, self._plan) as web_root:
</span><span class="lines">@@ -94,7 +95,7 @@
</span><span class="cx">                 json.dump(results, fp)
</span><span class="cx">         except IOError as error:
</span><span class="cx">             _log.error('Cannot open output file: {output_file} - Error: {error}'.format(output_file=output_file, error=error))
</span><del>-            _log.error('Results are:\n {result}'.format(json.dumps(results)))
</del><ins>+            _log.error('Results are:\n {result}'.format(result=json.dumps(results)))
</ins><span class="cx"> 
</span><span class="cx">     @classmethod
</span><span class="cx">     def _wrap(cls, dicts):
</span><span class="lines">@@ -129,6 +130,6 @@
</span><span class="cx">         return a + b
</span><span class="cx"> 
</span><span class="cx">     @classmethod
</span><del>-    def _show_results(cls, results):
</del><ins>+    def show_results(cls, results, scale_unit=True):
</ins><span class="cx">         results = BenchmarkResults(results)
</span><del>-        print results.format()
</del><ins>+        print results.format(scale_unit)
</ins></span></pre></div>
<a id="trunkToolsScriptswebkitpybenchmark_runnerrun_benchmarkpy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py (190778 => 190779)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py        2015-10-09 05:46:43 UTC (rev 190778)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py        2015-10-09 05:53:12 UTC (rev 190779)
</span><span class="lines">@@ -1,6 +1,7 @@
</span><span class="cx"> #!/usr/bin/env python
</span><span class="cx"> 
</span><span class="cx"> import argparse
</span><ins>+import json
</ins><span class="cx"> import logging
</span><span class="cx"> import platform
</span><span class="cx"> import os
</span><span class="lines">@@ -16,14 +17,17 @@
</span><span class="cx">     parser = argparse.ArgumentParser(description='Automate the browser based performance benchmarks')
</span><span class="cx">     parser.add_argument('--output-file', dest='output', default=None)
</span><span class="cx">     parser.add_argument('--build-directory', dest='buildDir', help='Path to the browser executable. e.g. WebKitBuild/Release/')
</span><del>-    parser.add_argument('--plan', dest='plan', required=True, help='Benchmark plan to run. e.g. speedometer, jetstream')
-    parser.add_argument('--platform', dest='platform', required=True, choices=BrowserDriverFactory.available_platforms())
</del><ins>+    parser.add_argument('--platform', dest='platform', default='osx', choices=BrowserDriverFactory.available_platforms())
</ins><span class="cx">     # FIXME: Should we add chrome as an option? Well, chrome uses webkit in iOS.
</span><del>-    parser.add_argument('--browser', dest='browser', required=True, choices=BrowserDriverFactory.available_browsers())
</del><ins>+    parser.add_argument('--browser', dest='browser', default='safari', choices=BrowserDriverFactory.available_browsers())
</ins><span class="cx">     parser.add_argument('--debug', action='store_true')
</span><span class="cx">     parser.add_argument('--local-copy', dest='localCopy', help='Path to a local copy of the benchmark. e.g. PerformanceTests/SunSpider/')
</span><span class="cx">     parser.add_argument('--count', dest='countOverride', type=int, help='Number of times to run the benchmark. e.g. 5')
</span><span class="cx">     parser.add_argument('--device-id', dest='device_id', default=None)
</span><ins>+    parser.add_argument('--no-adjust-unit', dest='scale_unit', action='store_false')
+    mutual_group = parser.add_mutually_exclusive_group(required=True)
+    mutual_group.add_argument('--read-results-json', dest='json_file', help='Specify file you want to format')
+    mutual_group.add_argument('--plan', dest='plan', help='Benchmark plan to run. e.g. speedometer, jetstream')
</ins><span class="cx"> 
</span><span class="cx">     args = parser.parse_args()
</span><span class="cx"> 
</span><span class="lines">@@ -38,7 +42,10 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> def start(args):
</span><del>-    runner = BenchmarkRunner(args.plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.device_id)
</del><ins>+    if args.json_file:
+        BenchmarkRunner.show_results(json.load(open(args.json_file, 'r')), args.scale_unit)
+        return
+    runner = BenchmarkRunner(args.plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.scale_unit, args.device_id)
</ins><span class="cx">     runner.execute()
</span><span class="cx"> 
</span><span class="cx"> 
</span></span></pre>
</div>
</div>

</body>
</html>