<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[175082] trunk/Tools</title>
</head>
<body>
<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; }
#msg dl a { font-weight: bold}
#msg dl a:link { color:#fc3; }
#msg dl a:active { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.webkit.org/projects/webkit/changeset/175082">175082</a></dd>
<dt>Author</dt> <dd>commit-queue@webkit.org</dd>
<dt>Date</dt> <dd>2014-10-22 19:54:24 -0700 (Wed, 22 Oct 2014)</dd>
</dl>
<h3>Log Message</h3>
<pre>commitqueuetask_unittest.py should be made more concise.
https://bugs.webkit.org/show_bug.cgi?id=137985
Patch by Jake Nielsen <jacob_nielsen@apple.com> on 2014-10-22
Reviewed by Alexey Proskuryakov.
* Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py:
Refactors tests to use MockSimpleTestPlanCommitQueue where possible.
(MockCommitQueue.__init__):
(MockCommitQueue.report_flaky_tests):
(MockCommitQueue):
(MockCommitQueue.get_reported_flaky_tests):
(MockSimpleTestPlanCommitQueue.__init__):
(MockSimpleTestPlanCommitQueue.run_command):
(MockSimpleTestPlanCommitQueue.did_run_clean_tests):
(CommitQueueTaskTest._run_and_expect_patch_analysis_result):
(test_land_failure):
(test_failed_archive):
(test_double_flaky_test_failure):
(test_test_failure):
(test_red_test_failure):
(test_very_red_tree_retry):
(test_red_tree_patch_rejection):
(test_one_flaky_test):
(test_tree_more_red_than_patch):
(MockCommitQueue.test_results): Deleted.
(test_simple_flaky_test_failure): Deleted.
(_expect_validate): Deleted.
(_expect_validate.MockDelegate): Deleted.
(_expect_validate.MockDelegate.refetch_patch): Deleted.
(_expect_validate.MockDelegate.expected_failures): Deleted.
(_mock_patch): Deleted.
(test_validate): Deleted.
* Scripts/webkitpy/tool/bot/patchanalysistask.py:
Fixes a bug that slipped through the cracks prior to this change.
(PatchAnalysisTask._test_patch):</pre>
<h3>Modified Paths</h3>
<ul>
<li><a href="#trunkToolsChangeLog">trunk/Tools/ChangeLog</a></li>
<li><a href="#trunkToolsScriptswebkitpytoolbotcommitqueuetask_unittestpy">trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py</a></li>
<li><a href="#trunkToolsScriptswebkitpytoolbotpatchanalysistaskpy">trunk/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py</a></li>
</ul>
</div>
<div id="patch">
<h3>Diff</h3>
<a id="trunkToolsChangeLog"></a>
<div class="modfile"><h4>Modified: trunk/Tools/ChangeLog (175081 => 175082)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/ChangeLog        2014-10-23 02:40:07 UTC (rev 175081)
+++ trunk/Tools/ChangeLog        2014-10-23 02:54:24 UTC (rev 175082)
</span><span class="lines">@@ -1,3 +1,41 @@
</span><ins>+2014-10-22 Jake Nielsen <jacob_nielsen@apple.com>
+
+ commitqueuetask_unittest.py should be made more concise.
+ https://bugs.webkit.org/show_bug.cgi?id=137985
+
+ Reviewed by Alexey Proskuryakov.
+
+ * Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py:
+ Refactors tests to use MockSimpleTestPlanCommitQueue where possible.
+ (MockCommitQueue.__init__):
+ (MockCommitQueue.report_flaky_tests):
+ (MockCommitQueue):
+ (MockCommitQueue.get_reported_flaky_tests):
+ (MockSimpleTestPlanCommitQueue.__init__):
+ (MockSimpleTestPlanCommitQueue.run_command):
+ (MockSimpleTestPlanCommitQueue.did_run_clean_tests):
+ (CommitQueueTaskTest._run_and_expect_patch_analysis_result):
+ (test_land_failure):
+ (test_failed_archive):
+ (test_double_flaky_test_failure):
+ (test_test_failure):
+ (test_red_test_failure):
+ (test_very_red_tree_retry):
+ (test_red_tree_patch_rejection):
+ (test_one_flaky_test):
+ (test_tree_more_red_than_patch):
+ (MockCommitQueue.test_results): Deleted.
+ (test_simple_flaky_test_failure): Deleted.
+ (_expect_validate): Deleted.
+ (_expect_validate.MockDelegate): Deleted.
+ (_expect_validate.MockDelegate.refetch_patch): Deleted.
+ (_expect_validate.MockDelegate.expected_failures): Deleted.
+ (_mock_patch): Deleted.
+ (test_validate): Deleted.
+ * Scripts/webkitpy/tool/bot/patchanalysistask.py:
+ Fixes a bug that slipped through the cracks prior to this change.
+ (PatchAnalysisTask._test_patch):
+
</ins><span class="cx"> 2014-10-22 Alexey Proskuryakov <ap@apple.com>
</span><span class="cx">
</span><span class="cx"> Flakiness dashboard should support OS X Yosemite
</span></span></pre></div>
<a id="trunkToolsScriptswebkitpytoolbotcommitqueuetask_unittestpy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py (175081 => 175082)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py        2014-10-23 02:40:07 UTC (rev 175081)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py        2014-10-23 02:54:24 UTC (rev 175082)
</span><span class="lines">@@ -47,6 +47,7 @@
</span><span class="cx"> def __init__(self, error_plan):
</span><span class="cx"> self._error_plan = error_plan
</span><span class="cx"> self._failure_status_id = 0
</span><ins>+ self._flaky_tests = []
</ins><span class="cx">
</span><span class="cx"> def run_command(self, command):
</span><span class="cx"> _log.info("run_webkit_patch: %s" % command)
</span><span class="lines">@@ -72,9 +73,12 @@
</span><span class="cx"> return LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
</span><span class="cx">
</span><span class="cx"> def report_flaky_tests(self, patch, flaky_results, results_archive):
</span><del>- flaky_tests = [result.test_name for result in flaky_results]
- _log.info("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), flaky_tests, results_archive.filename))
</del><ins>+ self._flaky_tests = [result.test_name for result in flaky_results]
+ _log.info("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), self._flaky_tests, results_archive.filename))
</ins><span class="cx">
</span><ins>+ def get_reported_flaky_tests(self):
+ return self._flaky_tests
+
</ins><span class="cx"> def archive_last_test_results(self, patch):
</span><span class="cx"> _log.info("archive_last_test_results: patch='%s'" % patch.id())
</span><span class="cx"> archive = Mock()
</span><span class="lines">@@ -120,6 +124,7 @@
</span><span class="cx"> class MockSimpleTestPlanCommitQueue(MockCommitQueue):
</span><span class="cx"> def __init__(self, first_test_failures, second_test_failures, clean_test_failures):
</span><span class="cx"> MockCommitQueue.__init__(self, [])
</span><ins>+ self._did_run_clean_tests = False
</ins><span class="cx"> self._patch_test_results = [first_test_failures, second_test_failures]
</span><span class="cx"> self._clean_test_results = [clean_test_failures]
</span><span class="cx"> self._current_test_results = []
</span><span class="lines">@@ -131,6 +136,7 @@
</span><span class="cx"> self._current_test_results = self._patch_test_results.pop(0)
</span><span class="cx"> else:
</span><span class="cx"> self._current_test_results = self._clean_test_results.pop(0)
</span><ins>+ self._did_run_clean_tests = True
</ins><span class="cx">
</span><span class="cx"> if self._current_test_results:
</span><span class="cx"> raise ScriptError("MOCK test failure")
</span><span class="lines">@@ -142,7 +148,10 @@
</span><span class="cx"> assert(isinstance(self._current_test_results, list))
</span><span class="cx"> return LayoutTestResults(test_results=map(self._mock_test_result, self._current_test_results), did_exceed_test_failure_limit=(len(self._current_test_results) >= 10))
</span><span class="cx">
</span><ins>+ def did_run_clean_tests(self):
+ return self._did_run_clean_tests
</ins><span class="cx">
</span><ins>+
</ins><span class="cx"> # We use GoldenScriptError to make sure that the code under test throws the
</span><span class="cx"> # correct (i.e., golden) exception.
</span><span class="cx"> class GoldenScriptError(ScriptError):
</span><span class="lines">@@ -150,7 +159,7 @@
</span><span class="cx">
</span><span class="cx">
</span><span class="cx"> class CommitQueueTaskTest(unittest.TestCase):
</span><del>- def _run_and_expect_patch_analysis_result(self, commit_queue, expected_analysis_result):
</del><ins>+ def _run_and_expect_patch_analysis_result(self, commit_queue, expected_analysis_result, expected_reported_flaky_tests=[], expect_clean_tests_to_run=False, expected_failure_status_id=0):
</ins><span class="cx"> tool = MockTool(log_executive=True)
</span><span class="cx"> patch = tool.bugs.fetch_attachment(10000)
</span><span class="cx"> task = CommitQueueTask(commit_queue, patch)
</span><span class="lines">@@ -165,7 +174,13 @@
</span><span class="cx"> analysis_result = PatchAnalysisResult.FAIL
</span><span class="cx">
</span><span class="cx"> self.assertEqual(analysis_result, expected_analysis_result)
</span><ins>+ self.assertEqual(commit_queue.get_reported_flaky_tests(), expected_reported_flaky_tests)
+ self.assertEqual(commit_queue.did_run_clean_tests(), expect_clean_tests_to_run)
</ins><span class="cx">
</span><ins>+ # The failure status only means anything if we actually failed.
+ if expected_analysis_result == PatchAnalysisResult.FAIL:
+ self.assertEqual(task.failure_status_id, expected_failure_status_id)
+
</ins><span class="cx"> def _run_through_task(self, commit_queue, expected_logs, expected_exception=None, expect_retry=False):
</span><span class="cx"> self.maxDiff = None
</span><span class="cx"> tool = MockTool(log_executive=True)
</span><span class="lines">@@ -314,17 +329,16 @@
</span><span class="cx"> """
</span><span class="cx"> self._run_through_task(commit_queue, expected_logs, expect_retry=True)
</span><span class="cx">
</span><del>- def test_flaky_test_failure(self):
</del><ins>+ def test_land_failure(self):
</ins><span class="cx"> commit_queue = MockCommitQueue([
</span><span class="cx"> None,
</span><span class="cx"> None,
</span><span class="cx"> None,
</span><span class="cx"> None,
</span><span class="cx"> None,
</span><del>- ScriptError("MOCK tests failure"),
</del><ins>+ None,
+ GoldenScriptError("MOCK land failure"),
</ins><span class="cx"> ])
</span><del>- # CommitQueueTask will only report flaky tests if we successfully parsed
- # results.json and returned a LayoutTestResults object, so we fake one.
</del><span class="cx"> expected_logs = """run_webkit_patch: ['clean']
</span><span class="cx"> command_passed: success_message='Cleaned working directory' patch='10000'
</span><span class="cx"> run_webkit_patch: ['update']
</span><span class="lines">@@ -336,307 +350,70 @@
</span><span class="cx"> run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
</span><span class="cx"> command_passed: success_message='Built patch' patch='10000'
</span><span class="cx"> run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
</span><del>-command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
</del><span class="cx"> command_passed: success_message='Passed tests' patch='10000'
</span><del>-report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip'
</del><span class="cx"> run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
</span><del>-command_passed: success_message='Landed patch' patch='10000'
</del><ins>+command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000'
</ins><span class="cx"> """
</span><del>- self._run_through_task(commit_queue, expected_logs)
</del><ins>+ # FIXME: This should really be expect_retry=True for a better user experiance.
+ self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
</ins><span class="cx">
</span><span class="cx"> def test_failed_archive(self):
</span><del>- commit_queue = MockCommitQueue([
- None,
- None,
- None,
- None,
- None,
- ScriptError("MOCK tests failure"),
- ])
- # It's possible delegate to fail to archive layout tests, don't try to report
- # flaky tests when that happens.
</del><ins>+ commit_queue = MockSimpleTestPlanCommitQueue(
+ first_test_failures=["Fail1"],
+ second_test_failures=[],
+ clean_test_failures=[])
+
+ # It's possible for the delegate to fail to archive layout tests,
+ # but we shouldn't try to report flaky tests when that happens.
</ins><span class="cx"> commit_queue.archive_last_test_results = lambda patch: None
</span><del>- expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_passed: success_message='Passed tests' patch='10000'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_passed: success_message='Landed patch' patch='10000'
-"""
- self._run_through_task(commit_queue, expected_logs)
</del><span class="cx">
</span><del>- def test_simple_flaky_test_failure(self):
- commit_queue = FailingTestCommitQueue([
- None,
- None,
- None,
- None,
- None,
- ScriptError("MOCK test failure"),
- None,
- ], [
- ["foo.html"],
- [],
- [],
- ])
</del><ins>+ self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS)
</ins><span class="cx">
</span><del>- self.maxDiff = None
-
- expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_passed: success_message='Passed tests' patch='10000'
-report_flaky_tests: patch='10000' flaky_tests='['foo.html']' archive='mock-archive-10000.zip'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_passed: success_message='Landed patch' patch='10000'
-"""
- tool = MockTool(log_executive=True)
- patch = tool.bugs.fetch_attachment(10000)
- task = CommitQueueTask(commit_queue, patch)
- success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs)
- self.assertTrue(success)
-
</del><span class="cx"> def test_double_flaky_test_failure(self):
</span><del>- commit_queue = FailingTestCommitQueue([
- None,
- None,
- None,
- None,
- None,
- ScriptError("MOCK test failure"),
- ScriptError("MOCK test failure again"),
- ], [
- ["foo.html"],
- ["bar.html"],
- ["foo.html"],
- ])
- # The (subtle) point of this test is that report_flaky_tests does not appear
- # in the expected_logs for this run.
</del><ins>+ commit_queue = MockSimpleTestPlanCommitQueue(
+ first_test_failures=["Fail1"],
+ second_test_failures=["Fail2"],
+ clean_test_failures=["Fail1"])
+
+ # The (subtle) point of this test is that report_flaky_tests does not get
+ # called for this run.
</ins><span class="cx"> # Note also that there is no attempt to run the tests w/o the patch.
</span><del>- expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
-"""
- tool = MockTool(log_executive=True)
- patch = tool.bugs.fetch_attachment(10000)
- task = CommitQueueTask(commit_queue, patch)
- success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs)
- self.assertFalse(success)
</del><ins>+ self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER)
</ins><span class="cx">
</span><span class="cx"> def test_test_failure(self):
</span><del>- commit_queue = MockCommitQueue([
- None,
- None,
- None,
- None,
- None,
- GoldenScriptError("MOCK test failure"),
- ScriptError("MOCK test failure again"),
- ])
- expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
-command_passed: success_message='Able to pass tests without patch' patch='10000'
-"""
- self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
</del><ins>+ commit_queue = MockSimpleTestPlanCommitQueue(
+ first_test_failures=["Fail1"],
+ second_test_failures=["Fail1"],
+ clean_test_failures=[])
</ins><span class="cx">
</span><ins>+ self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_failure_status_id=1)
+
</ins><span class="cx"> def test_red_test_failure(self):
</span><del>- commit_queue = FailingTestCommitQueue([
- None,
- None,
- None,
- None,
- None,
- ScriptError("MOCK test failure"),
- ScriptError("MOCK test failure again"),
- ScriptError("MOCK clean test failure"),
- ], [
- ["foo.html"],
- ["foo.html"],
- ["foo.html"],
- ])
</del><ins>+ commit_queue = MockSimpleTestPlanCommitQueue(
+ first_test_failures=["Fail1"],
+ second_test_failures=["Fail1"],
+ clean_test_failures=["Fail1"])
</ins><span class="cx">
</span><del>- # Tests always fail, and always return the same results, but we
- # should still be able to land in this case!
- expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
-command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_passed: success_message='Landed patch' patch='10000'
-"""
- self._run_through_task(commit_queue, expected_logs)
</del><ins>+ self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expect_clean_tests_to_run=True)
</ins><span class="cx">
</span><span class="cx"> def test_very_red_tree_retry(self):
</span><span class="cx"> lots_of_failing_tests = map(lambda num: "test-%s.html" % num, range(0, 100))
</span><del>- commit_queue = FailingTestCommitQueue([
- None,
- None,
- None,
- None,
- None,
- ScriptError("MOCK test failure"),
- ScriptError("MOCK clean test failure"),
- ], [
- lots_of_failing_tests,
- lots_of_failing_tests,
- ])
</del><ins>+ commit_queue = MockSimpleTestPlanCommitQueue(
+ first_test_failures=lots_of_failing_tests,
+ second_test_failures=lots_of_failing_tests,
+ clean_test_failures=lots_of_failing_tests)
</ins><span class="cx">
</span><del>- # Tests always fail, and return so many failures that we do not
- # trust the results (see ExpectedFailures._can_trust_results) so we
- # just give up and retry the patch.
- expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
-command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
-"""
- self._run_through_task(commit_queue, expected_logs, expect_retry=True)
</del><ins>+ self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True)
</ins><span class="cx">
</span><span class="cx"> def test_red_tree_patch_rejection(self):
</span><del>- commit_queue = FailingTestCommitQueue([
- None,
- None,
- None,
- None,
- None,
- GoldenScriptError("MOCK test failure"),
- ScriptError("MOCK test failure again"),
- ScriptError("MOCK clean test failure"),
- ], [
- ["foo.html", "bar.html"],
- ["foo.html", "bar.html"],
- ["foo.html"],
- ])
</del><ins>+ commit_queue = MockSimpleTestPlanCommitQueue(
+ first_test_failures=["Fail1", "Fail2"],
+ second_test_failures=["Fail1", "Fail2"],
+ clean_test_failures=["Fail1"])
</ins><span class="cx">
</span><del>- # Tests always fail, but the clean tree only fails one test
- # while the patch fails two. So we should reject the patch!
- expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
-command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
-"""
- task = self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
- self.assertEqual(task.results_from_patch_test_run(task._patch).failing_tests(), ["foo.html", "bar.html"])
</del><span class="cx"> # failure_status_id should be of the test with patch (1), not the test without patch (2).
</span><del>- self.assertEqual(task.failure_status_id, 1)
</del><ins>+ self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_failure_status_id=1)
</ins><span class="cx">
</span><del>- def test_land_failure(self):
- commit_queue = MockCommitQueue([
- None,
- None,
- None,
- None,
- None,
- None,
- GoldenScriptError("MOCK land failure"),
- ])
- expected_logs = """run_webkit_patch: ['clean']
-command_passed: success_message='Cleaned working directory' patch='10000'
-run_webkit_patch: ['update']
-command_passed: success_message='Updated working directory' patch='10000'
-run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
-command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
-command_passed: success_message='ChangeLog validated' patch='10000'
-run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
-command_passed: success_message='Built patch' patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_passed: success_message='Passed tests' patch='10000'
-run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
-command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000'
-"""
- # FIXME: This should really be expect_retry=True for a better user experiance.
- self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
-
</del><span class="cx"> def test_two_flaky_tests(self):
</span><span class="cx"> commit_queue = MockSimpleTestPlanCommitQueue(
</span><span class="cx"> first_test_failures=["Fail1"],
</span><span class="lines">@@ -652,7 +429,7 @@
</span><span class="cx"> second_test_failures=[],
</span><span class="cx"> clean_test_failures=[])
</span><span class="cx">
</span><del>- self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS)
</del><ins>+ self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expected_reported_flaky_tests=["Fail1"])
</ins><span class="cx">
</span><span class="cx"> def test_very_flaky_patch(self):
</span><span class="cx"> commit_queue = MockSimpleTestPlanCommitQueue(
</span><span class="lines">@@ -712,7 +489,7 @@
</span><span class="cx"> second_test_failures=["Fail1", "Fail2", "Fail3"],
</span><span class="cx"> clean_test_failures=["Fail1", "Fail2", "Fail3", "Fail4"])
</span><span class="cx">
</span><del>- self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS)
</del><ins>+ self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expect_clean_tests_to_run=True)
</ins><span class="cx">
</span><span class="cx"> def _expect_validate(self, patch, is_valid):
</span><span class="cx"> class MockDelegate(object):
</span></span></pre></div>
<a id="trunkToolsScriptswebkitpytoolbotpatchanalysistaskpy"></a>
<div class="modfile"><h4>Modified: trunk/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py (175081 => 175082)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py        2014-10-23 02:40:07 UTC (rev 175081)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py        2014-10-23 02:54:24 UTC (rev 175082)
</span><span class="lines">@@ -234,6 +234,7 @@
</span><span class="cx">
</span><span class="cx"> if self._build_and_test_without_patch():
</span><span class="cx"> # The error from the previous ._test() run is real, report it.
</span><ins>+ self.failure_status_id = first_failure_status_id
</ins><span class="cx"> return self.report_failure(first_results_archive, first_results, first_script_error)
</span><span class="cx">
</span><span class="cx"> self._clean_tree_results = self._delegate.test_results()
</span></span></pre>
</div>
</div>
</body>
</html>