<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[197505] trunk/Tools</title>
</head>
<body>

<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt;  }
#msg dl a { font-weight: bold}
#msg dl a:link    { color:#fc3; }
#msg dl a:active  { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff  {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.webkit.org/projects/webkit/changeset/197505">197505</a></dd>
<dt>Author</dt> <dd>clopez@igalia.com</dd>
<dt>Date</dt> <dd>2016-03-03 08:40:19 -0800 (Thu, 03 Mar 2016)</dd>
</dl>

<h3>Log Message</h3>
<pre>[GTK] Run the run-benchmark script on the performance bot.
https://bugs.webkit.org/show_bug.cgi?id=154595

Reviewed by Carlos Garcia Campos.

* BuildSlaveSupport/build.webkit.org-config/master.cfg: Add new RunBenchmarkTests step on the perf bots (only for the GTK+ port at this moment).
(RunBenchmarkTests):
(RunBenchmarkTests.start):
(RunBenchmarkTests.getText):
(RunBenchmarkTests.getText2):
(BuildAndPerfTestFactory.__init__):
(DownloadAndPerfTestFactory.__init__):
* BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py: Add new test for RunBenchmarkTests and update expected steps of GTK+ perf bot.
(RunBenchmarkTest):
(RunBenchmarkTest.assertResults):
(RunBenchmarkTest.test_success):
(RunBenchmarkTest.test_tests_failed):
* Scripts/webkitpy/benchmark_runner/benchmark_runner.py: Log both the current iteration as also the total iterations for the current benchmark/plan.
(BenchmarkRunner._run_benchmark):
* Scripts/webkitpy/benchmark_runner/browser_driver/gtk_minibrowser_driver.py: Use python logging also for errors.
(GTKMiniBrowserDriver.close_browsers):
* Scripts/webkitpy/benchmark_runner/run_benchmark.py: Implement support for running all available benchmark plans.
(parse_args):
(start):
(main):</pre>

<h3>Modified Paths</h3>
<ul>
<li><a href="#trunkToolsBuildSlaveSupportbuildwebkitorgconfigmastercfg">trunk/Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg</a></li>
<li><a href="#trunkToolsBuildSlaveSupportbuildwebkitorgconfigmastercfg_unittestpy">trunk/Tools/BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py</a></li>
<li><a href="#trunkToolsChangeLog">trunk/Tools/ChangeLog</a></li>
<li><a href="#trunkToolsScriptswebkitpybenchmark_runnerbenchmark_runnerpy">trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py</a></li>
<li><a href="#trunkToolsScriptswebkitpybenchmark_runnerbrowser_drivergtk_minibrowser_driverpy">trunk/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/gtk_minibrowser_driver.py</a></li>
<li><a href="#trunkToolsScriptswebkitpybenchmark_runnerrun_benchmarkpy">trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py</a></li>
</ul>

</div>
<div id="patch">
<h3>Diff</h3>
<a id="trunkToolsBuildSlaveSupportbuildwebkitorgconfigmastercfg"></a>
<div class="modfile"><h4>Modified: trunk/Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg (197504 => 197505)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg        2016-03-03 14:09:22 UTC (rev 197504)
+++ trunk/Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg        2016-03-03 16:40:19 UTC (rev 197505)
</span><span class="lines">@@ -701,7 +701,31 @@
</span><span class="cx"> 
</span><span class="cx">         return [self.name]
</span><span class="cx"> 
</span><ins>+class RunBenchmarkTests(shell.Test):
+    name = &quot;benchmark-test&quot;
+    description = [&quot;benchmark tests running&quot;]
+    descriptionDone = [&quot;benchmark tests&quot;]
+    # Buildbot default timeout without output for a step is 1200.
+    # The current maximum timeout for a benchmark plan is also 1200.
+    # So raise the buildbot timeout to avoid aborting this whole step when a test timeouts.
+    timeout = 1500
+    command = [&quot;python&quot;, &quot;./Tools/Scripts/run-benchmark&quot;, &quot;--allplans&quot;]
</ins><span class="cx"> 
</span><ins>+    def start(self):
+        platform = self.getProperty(&quot;platform&quot;)
+        if platform == &quot;gtk&quot;:
+            command += [&quot;--platform&quot;, &quot;gtk&quot;, &quot;--browser&quot;, &quot;minibrowser&quot;]
+        self.setCommand(self.command)
+        return shell.Test.start(self)
+
+    def getText(self, cmd, results):
+        return self.getText2(cmd, results)
+
+    def getText2(self, cmd, results):
+        if results != SUCCESS:
+            return [&quot;%d benchmark tests failed&quot; % cmd.rc]
+        return [self.name]
+
</ins><span class="cx"> class ArchiveTestResults(shell.ShellCommand):
</span><span class="cx">     command = [&quot;python&quot;, &quot;./Tools/BuildSlaveSupport/test-result-archive&quot;,
</span><span class="cx">                WithProperties(&quot;--platform=%(platform)s&quot;), WithProperties(&quot;--%(configuration)s&quot;), &quot;archive&quot;]
</span><span class="lines">@@ -883,6 +907,8 @@
</span><span class="cx">         Factory.__init__(self, platform, configuration, architectures, False, additionalArguments, SVNMirror, **kwargs)
</span><span class="cx">         self.addStep(CompileWebKit())
</span><span class="cx">         self.addStep(RunAndUploadPerfTests())
</span><ins>+        if platform == &quot;gtk&quot;:
+            self.addStep(RunBenchmarkTests())
</ins><span class="cx"> 
</span><span class="cx"> class DownloadAndPerfTestFactory(Factory):
</span><span class="cx">     def __init__(self, platform, configuration, architectures, additionalArguments=None, SVNMirror=None, **kwargs):
</span><span class="lines">@@ -890,6 +916,8 @@
</span><span class="cx">         self.addStep(DownloadBuiltProduct())
</span><span class="cx">         self.addStep(ExtractBuiltProduct())
</span><span class="cx">         self.addStep(RunAndUploadPerfTests())
</span><ins>+        if platform == &quot;gtk&quot;:
+            self.addStep(RunBenchmarkTests())
</ins><span class="cx"> 
</span><span class="cx"> class PlatformSpecificScheduler(AnyBranchScheduler):
</span><span class="cx">     def __init__(self, platform, branch, **kwargs):
</span></span></pre></div>
<a id="trunkToolsBuildSlaveSupportbuildwebkitorgconfigmastercfg_unittestpy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py (197504 => 197505)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py        2016-03-03 14:09:22 UTC (rev 197504)
+++ trunk/Tools/BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py        2016-03-03 16:40:19 UTC (rev 197505)
</span><span class="lines">@@ -414,7 +414,7 @@
</span><span class="cx">     'GTK Linux 64-bit Debug (Build)' : ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'compile-webkit', 'archive-built-product', 'upload', 'trigger'],
</span><span class="cx">     'GTK Linux 64-bit Debug (Tests)' : ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'jscore-test', 'layout-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'archive-test-results', 'upload', 'MasterShellCommand', 'API tests', 'WebKit GObject DOM bindings API break tests'],
</span><span class="cx">     'GTK Linux 64-bit Release (Build)' : ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'compile-webkit', 'archive-built-product', 'upload', 'trigger'],
</span><del>-    'GTK Linux 64-bit Release (Perf)' : ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'perf-test'],
</del><ins>+    'GTK Linux 64-bit Release (Perf)' : ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'perf-test', 'benchmark-test'],
</ins><span class="cx">     'GTK Linux 64-bit Release (Tests)' : ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'jscore-test', 'layout-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'archive-test-results', 'upload', 'MasterShellCommand', 'API tests', 'WebKit GObject DOM bindings API break tests'],
</span><span class="cx">     'GTK Linux ARM Release' : ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'compile-webkit', 'jscore-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'API tests', 'WebKit GObject DOM bindings API break tests'],
</span><span class="cx"> 
</span><span class="lines">@@ -528,6 +528,22 @@
</span><span class="cx">         self.assertResults(-1, &quot;timeout&quot;)
</span><span class="cx"> 
</span><span class="cx"> 
</span><ins>+class RunBenchmarkTest(unittest.TestCase):
+    def assertResults(self, rc, expected_text):
+        cmd = StubRemoteCommand(rc, expected_text)
+        step = RunBenchmarkTests()
+        step.commandComplete(cmd)
+        actual_results = step.evaluateCommand(cmd)
+        actual_text = str(step.getText2(cmd, actual_results)[0])
+        self.assertEqual(expected_text, actual_text)
+
+    def test_success(self):
+        self.assertResults(0, &quot;benchmark-test&quot;)
+
+    def test_tests_failed(self):
+        self.assertResults(7, &quot;7 benchmark tests failed&quot;)
+
+
</ins><span class="cx"> # FIXME: We should run this file as part of test-webkitpy.
</span><span class="cx"> # Unfortunately test-webkitpy currently requires that unittests
</span><span class="cx"> # be located in a directory with a valid module name.
</span></span></pre></div>
<a id="trunkToolsChangeLog"></a>
<div class="modfile"><h4>Modified: trunk/Tools/ChangeLog (197504 => 197505)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/ChangeLog        2016-03-03 14:09:22 UTC (rev 197504)
+++ trunk/Tools/ChangeLog        2016-03-03 16:40:19 UTC (rev 197505)
</span><span class="lines">@@ -1,3 +1,31 @@
</span><ins>+2016-03-03  Carlos Alberto Lopez Perez  &lt;clopez@igalia.com&gt;
+
+        [GTK] Run the run-benchmark script on the performance bot.
+        https://bugs.webkit.org/show_bug.cgi?id=154595
+
+        Reviewed by Carlos Garcia Campos.
+
+        * BuildSlaveSupport/build.webkit.org-config/master.cfg: Add new RunBenchmarkTests step on the perf bots (only for the GTK+ port at this moment).
+        (RunBenchmarkTests):
+        (RunBenchmarkTests.start):
+        (RunBenchmarkTests.getText):
+        (RunBenchmarkTests.getText2):
+        (BuildAndPerfTestFactory.__init__):
+        (DownloadAndPerfTestFactory.__init__):
+        * BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py: Add new test for RunBenchmarkTests and update expected steps of GTK+ perf bot.
+        (RunBenchmarkTest):
+        (RunBenchmarkTest.assertResults):
+        (RunBenchmarkTest.test_success):
+        (RunBenchmarkTest.test_tests_failed):
+        * Scripts/webkitpy/benchmark_runner/benchmark_runner.py: Log both the current iteration as also the total iterations for the current benchmark/plan.
+        (BenchmarkRunner._run_benchmark):
+        * Scripts/webkitpy/benchmark_runner/browser_driver/gtk_minibrowser_driver.py: Use python logging also for errors.
+        (GTKMiniBrowserDriver.close_browsers):
+        * Scripts/webkitpy/benchmark_runner/run_benchmark.py: Implement support for running all available benchmark plans.
+        (parse_args):
+        (start):
+        (main):
+
</ins><span class="cx"> 2016-03-03  Gyuyoung Kim  &lt;gyuyoung.kim@webkit.org&gt;
</span><span class="cx"> 
</span><span class="cx">         [EFL] Remove u-szeged.hu SVN mirror in EFL perf bot
</span></span></pre></div>
<a id="trunkToolsScriptswebkitpybenchmark_runnerbenchmark_runnerpy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py (197504 => 197505)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py        2016-03-03 14:09:22 UTC (rev 197504)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py        2016-03-03 16:40:19 UTC (rev 197505)
</span><span class="lines">@@ -62,7 +62,7 @@
</span><span class="cx">     def _run_benchmark(self, count, web_root):
</span><span class="cx">         results = []
</span><span class="cx">         for iteration in xrange(1, count + 1):
</span><del>-            _log.info('Start the iteration {current_iteration} of current benchmark'.format(current_iteration=iteration))
</del><ins>+            _log.info('Start the iteration {current_iteration} of {iterations} for current benchmark'.format(current_iteration=iteration, iterations=count))
</ins><span class="cx">             try:
</span><span class="cx">                 result = None
</span><span class="cx">                 self._http_server_driver.serve(web_root)
</span><span class="lines">@@ -78,7 +78,7 @@
</span><span class="cx">                 self._browser_driver.restore_env()
</span><span class="cx">                 self._browser_driver.close_browsers()
</span><span class="cx">                 self._http_server_driver.kill_server()
</span><del>-            _log.info('End of {current_iteration} iteration of current benchmark'.format(current_iteration=iteration))
</del><ins>+            _log.info('End the iteration {current_iteration} of {iterations} for current benchmark'.format(current_iteration=iteration, iterations=count))
</ins><span class="cx">         results = self._wrap(results)
</span><span class="cx">         self._dump(results, self._output_file if self._output_file else self._plan['output_file'])
</span><span class="cx">         self.show_results(results, self._scale_unit)
</span></span></pre></div>
<a id="trunkToolsScriptswebkitpybenchmark_runnerbrowser_drivergtk_minibrowser_driverpy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/gtk_minibrowser_driver.py (197504 => 197505)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/gtk_minibrowser_driver.py        2016-03-03 14:09:22 UTC (rev 197504)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/gtk_minibrowser_driver.py        2016-03-03 16:40:19 UTC (rev 197505)
</span><span class="lines">@@ -49,4 +49,5 @@
</span><span class="cx">     def close_browsers(self):
</span><span class="cx">         super(GTKMiniBrowserDriver, self).close_browsers()
</span><span class="cx">         if self._minibrowser_process and self._minibrowser_process.returncode:
</span><del>-            sys.exit('MiniBrowser crashed with exitcode %d' % self._minibrowser_process.returncode)
</del><ins>+            _log.error('MiniBrowser crashed with exitcode %d' % self._minibrowser_process.returncode)
+            sys.exit(1)
</ins></span></pre></div>
<a id="trunkToolsScriptswebkitpybenchmark_runnerrun_benchmarkpy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py (197504 => 197505)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py        2016-03-03 14:09:22 UTC (rev 197504)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py        2016-03-03 16:40:19 UTC (rev 197505)
</span><span class="lines">@@ -28,6 +28,7 @@
</span><span class="cx">     mutual_group = parser.add_mutually_exclusive_group(required=True)
</span><span class="cx">     mutual_group.add_argument('--read-results-json', dest='json_file', help='Specify file you want to format')
</span><span class="cx">     mutual_group.add_argument('--plan', dest='plan', help='Benchmark plan to run. e.g. speedometer, jetstream')
</span><ins>+    mutual_group.add_argument('--allplans', action='store_true', help='Run all available benchmark plans sequentially')
</ins><span class="cx"> 
</span><span class="cx">     args = parser.parse_args()
</span><span class="cx"> 
</span><span class="lines">@@ -45,6 +46,26 @@
</span><span class="cx">     if args.json_file:
</span><span class="cx">         BenchmarkRunner.show_results(json.load(open(args.json_file, 'r')), args.scale_unit)
</span><span class="cx">         return
</span><ins>+    if args.allplans:
+        failed = []
+        plandir = os.path.join(os.path.dirname(__file__), 'data/plans')
+        planlist = [os.path.splitext(f)[0] for f in os.listdir(plandir) if f.endswith('.plan')]
+        if not planlist:
+            raise Exception('Cant find any .plan file in directory %s' % plandir)
+        for plan in sorted(planlist):
+            _log.info('Starting benchmark plan: %s' % plan)
+            try:
+                runner = BenchmarkRunner(plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.scale_unit, args.device_id)
+                runner.execute()
+                _log.info('Finished benchmark plan: %s' % plan)
+            except KeyboardInterrupt:
+                raise
+            except:
+                failed.append(plan)
+                _log.error('Error running benchmark plan: %s' % plan)
+        if failed:
+            _log.error('The following benchmark plans have failed: %s' % failed)
+        return len(failed)
</ins><span class="cx">     runner = BenchmarkRunner(args.plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.scale_unit, args.device_id)
</span><span class="cx">     runner.execute()
</span><span class="cx"> 
</span><span class="lines">@@ -58,4 +79,4 @@
</span><span class="cx"> 
</span><span class="cx"> 
</span><span class="cx"> def main():
</span><del>-    start(parse_args())
</del><ins>+    return start(parse_args())
</ins></span></pre>
</div>
</div>

</body>
</html>