<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[192292] trunk/Source/JavaScriptCore</title>
</head>
<body>

<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt;  }
#msg dl a { font-weight: bold}
#msg dl a:link    { color:#fc3; }
#msg dl a:active  { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff  {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.webkit.org/projects/webkit/changeset/192292">192292</a></dd>
<dt>Author</dt> <dd>benjamin@webkit.org</dd>
<dt>Date</dt> <dd>2015-11-10 21:10:43 -0800 (Tue, 10 Nov 2015)</dd>
</dl>

<h3>Log Message</h3>
<pre>Air should allocate registers
https://bugs.webkit.org/show_bug.cgi?id=150457

Patch by Benjamin Poulain &lt;bpoulain@apple.com&gt; on 2015-11-10
Reviewed by Filip Pizlo.

This is a direct implementation of the Iterated Register Coalescing allocator.

* JavaScriptCore.xcodeproj/project.pbxproj:
* b3/air/AirGenerate.cpp:
(JSC::B3::Air::generate):
* b3/air/AirInstInlines.h:
* b3/air/AirIteratedRegisterCoalescing.cpp: Added.
(JSC::B3::Air::MoveInstHelper&lt;Arg::GP&gt;::mayBeCoalescable):
(JSC::B3::Air::MoveInstHelper&lt;Arg::FP&gt;::mayBeCoalescable):
(JSC::B3::Air::AbsoluteTmpHelper&lt;Arg::GP&gt;::absoluteIndex):
(JSC::B3::Air::AbsoluteTmpHelper&lt;Arg::GP&gt;::tmpFromAbsoluteIndex):
(JSC::B3::Air::AbsoluteTmpHelper&lt;Arg::FP&gt;::absoluteIndex):
(JSC::B3::Air::AbsoluteTmpHelper&lt;Arg::FP&gt;::tmpFromAbsoluteIndex):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::IteratedRegisterCoalescingAllocator):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::build):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::allocate):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::getAlias):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::spilledTmp):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::allocatedReg):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::tmpArraySize):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::initializeDegrees):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::addEdges):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::addEdge):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::makeWorkList):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::simplify):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::forEachAdjacent):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::hasBeenSimplified):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::decrementDegree):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::forEachNodeMoves):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::isMoveRelated):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::enableMovesOnValue):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::enableMovesOnValueAndAdjacents):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::coalesce):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::canBeSafelyCoalesced):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::precoloredCoalescingHeuristic):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::conservativeHeuristic):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::addWorkList):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::combine):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::freeze):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::freezeMoves):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::selectSpill):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::assignColors):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::dumpInterferenceGraphInDot):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::dumpWorkLists):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::InterferenceEdge):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::first):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::second):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::operator==):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::isHashTableDeletedValue):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::hash):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdgeHash::hash):
(JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdgeHash::equal):
(JSC::B3::Air::isUselessMoveInst):
(JSC::B3::Air::assignRegisterToTmpInProgram):
(JSC::B3::Air::addSpillAndFillToProgram):
(JSC::B3::Air::iteratedRegisterCoalescingOnType):
(JSC::B3::Air::iteratedRegisterCoalescing):
* b3/air/AirIteratedRegisterCoalescing.h: Added.
* b3/air/AirTmp.h:
(JSC::B3::Air::Tmp::internalValue):
(JSC::B3::Air::Tmp::tmpForInternalValue):
* b3/testb3.cpp:
(JSC::B3::testSpillGP):
(JSC::B3::run):</pre>

<h3>Modified Paths</h3>
<ul>
<li><a href="#trunkSourceJavaScriptCoreChangeLog">trunk/Source/JavaScriptCore/ChangeLog</a></li>
<li><a href="#trunkSourceJavaScriptCoreJavaScriptCorexcodeprojprojectpbxproj">trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj</a></li>
<li><a href="#trunkSourceJavaScriptCoreb3airAirGeneratecpp">trunk/Source/JavaScriptCore/b3/air/AirGenerate.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreb3airAirInstInlinesh">trunk/Source/JavaScriptCore/b3/air/AirInstInlines.h</a></li>
<li><a href="#trunkSourceJavaScriptCoreb3airAirTmph">trunk/Source/JavaScriptCore/b3/air/AirTmp.h</a></li>
<li><a href="#trunkSourceJavaScriptCoreb3testb3cpp">trunk/Source/JavaScriptCore/b3/testb3.cpp</a></li>
</ul>

<h3>Added Paths</h3>
<ul>
<li><a href="#trunkSourceJavaScriptCoreb3airAirIteratedRegisterCoalescingcpp">trunk/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreb3airAirIteratedRegisterCoalescingh">trunk/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h</a></li>
</ul>

</div>
<div id="patch">
<h3>Diff</h3>
<a id="trunkSourceJavaScriptCoreChangeLog"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/ChangeLog (192291 => 192292)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/ChangeLog        2015-11-11 04:29:59 UTC (rev 192291)
+++ trunk/Source/JavaScriptCore/ChangeLog        2015-11-11 05:10:43 UTC (rev 192292)
</span><span class="lines">@@ -1,3 +1,75 @@
</span><ins>+2015-11-10  Benjamin Poulain  &lt;bpoulain@apple.com&gt;
+
+        Air should allocate registers
+        https://bugs.webkit.org/show_bug.cgi?id=150457
+
+        Reviewed by Filip Pizlo.
+
+        This is a direct implementation of the Iterated Register Coalescing allocator.
+
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * b3/air/AirGenerate.cpp:
+        (JSC::B3::Air::generate):
+        * b3/air/AirInstInlines.h:
+        * b3/air/AirIteratedRegisterCoalescing.cpp: Added.
+        (JSC::B3::Air::MoveInstHelper&lt;Arg::GP&gt;::mayBeCoalescable):
+        (JSC::B3::Air::MoveInstHelper&lt;Arg::FP&gt;::mayBeCoalescable):
+        (JSC::B3::Air::AbsoluteTmpHelper&lt;Arg::GP&gt;::absoluteIndex):
+        (JSC::B3::Air::AbsoluteTmpHelper&lt;Arg::GP&gt;::tmpFromAbsoluteIndex):
+        (JSC::B3::Air::AbsoluteTmpHelper&lt;Arg::FP&gt;::absoluteIndex):
+        (JSC::B3::Air::AbsoluteTmpHelper&lt;Arg::FP&gt;::tmpFromAbsoluteIndex):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::IteratedRegisterCoalescingAllocator):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::build):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::allocate):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::getAlias):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::spilledTmp):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::allocatedReg):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::tmpArraySize):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::initializeDegrees):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::addEdges):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::addEdge):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::makeWorkList):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::simplify):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::forEachAdjacent):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::hasBeenSimplified):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::decrementDegree):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::forEachNodeMoves):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::isMoveRelated):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::enableMovesOnValue):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::enableMovesOnValueAndAdjacents):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::coalesce):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::canBeSafelyCoalesced):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::precoloredCoalescingHeuristic):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::conservativeHeuristic):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::addWorkList):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::combine):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::freeze):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::freezeMoves):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::selectSpill):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::assignColors):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::dumpInterferenceGraphInDot):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::dumpWorkLists):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::InterferenceEdge):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::first):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::second):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::operator==):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::isHashTableDeletedValue):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdge::hash):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdgeHash::hash):
+        (JSC::B3::Air::IteratedRegisterCoalescingAllocator::InterferenceEdgeHash::equal):
+        (JSC::B3::Air::isUselessMoveInst):
+        (JSC::B3::Air::assignRegisterToTmpInProgram):
+        (JSC::B3::Air::addSpillAndFillToProgram):
+        (JSC::B3::Air::iteratedRegisterCoalescingOnType):
+        (JSC::B3::Air::iteratedRegisterCoalescing):
+        * b3/air/AirIteratedRegisterCoalescing.h: Added.
+        * b3/air/AirTmp.h:
+        (JSC::B3::Air::Tmp::internalValue):
+        (JSC::B3::Air::Tmp::tmpForInternalValue):
+        * b3/testb3.cpp:
+        (JSC::B3::testSpillGP):
+        (JSC::B3::run):
+
</ins><span class="cx"> 2015-11-10  Filip Pizlo  &lt;fpizlo@apple.com&gt;
</span><span class="cx"> 
</span><span class="cx">         Unreviewed, add a FIXME referencing https://bugs.webkit.org/show_bug.cgi?id=151128.
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreJavaScriptCorexcodeprojprojectpbxproj"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj (192291 => 192292)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj        2015-11-11 04:29:59 UTC (rev 192291)
+++ trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj        2015-11-11 05:10:43 UTC (rev 192292)
</span><span class="lines">@@ -1074,6 +1074,8 @@
</span><span class="cx">                 1ACF7377171CA6FB00C9BB1E /* Weak.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1ACF7376171CA6FB00C9BB1E /* Weak.cpp */; };
</span><span class="cx">                 2600B5A6152BAAA70091EE5F /* JSStringJoiner.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2600B5A4152BAAA70091EE5F /* JSStringJoiner.cpp */; };
</span><span class="cx">                 2600B5A7152BAAA70091EE5F /* JSStringJoiner.h in Headers */ = {isa = PBXBuildFile; fileRef = 2600B5A5152BAAA70091EE5F /* JSStringJoiner.h */; };
</span><ins>+                26718BA41BE99F780052017B /* AirIteratedRegisterCoalescing.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 26718BA21BE99F780052017B /* AirIteratedRegisterCoalescing.cpp */; };
+                26718BA51BE99F780052017B /* AirIteratedRegisterCoalescing.h in Headers */ = {isa = PBXBuildFile; fileRef = 26718BA31BE99F780052017B /* AirIteratedRegisterCoalescing.h */; };
</ins><span class="cx">                 2A05ABD51961DF2400341750 /* JSPropertyNameEnumerator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2A05ABD31961DF2400341750 /* JSPropertyNameEnumerator.cpp */; };
</span><span class="cx">                 2A05ABD61961DF2400341750 /* JSPropertyNameEnumerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 2A05ABD41961DF2400341750 /* JSPropertyNameEnumerator.h */; };
</span><span class="cx">                 2A111245192FCE79005EE18D /* CustomGetterSetter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2A111243192FCE79005EE18D /* CustomGetterSetter.cpp */; };
</span><span class="lines">@@ -3083,6 +3085,8 @@
</span><span class="cx">                 2600B5A4152BAAA70091EE5F /* JSStringJoiner.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSStringJoiner.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 2600B5A5152BAAA70091EE5F /* JSStringJoiner.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSStringJoiner.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 264091FA1BE2FD4100684DB2 /* AirOpcode.opcodes */ = {isa = PBXFileReference; lastKnownFileType = text; name = AirOpcode.opcodes; path = b3/air/AirOpcode.opcodes; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><ins>+                26718BA21BE99F780052017B /* AirIteratedRegisterCoalescing.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = AirIteratedRegisterCoalescing.cpp; path = b3/air/AirIteratedRegisterCoalescing.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
+                26718BA31BE99F780052017B /* AirIteratedRegisterCoalescing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AirIteratedRegisterCoalescing.h; path = b3/air/AirIteratedRegisterCoalescing.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</ins><span class="cx">                 2A05ABD31961DF2400341750 /* JSPropertyNameEnumerator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSPropertyNameEnumerator.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 2A05ABD41961DF2400341750 /* JSPropertyNameEnumerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSPropertyNameEnumerator.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 2A111243192FCE79005EE18D /* CustomGetterSetter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CustomGetterSetter.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="lines">@@ -4566,6 +4570,8 @@
</span><span class="cx">                                 0FEC855A1BDACDC70080FF74 /* AirInst.cpp */,
</span><span class="cx">                                 0FEC855B1BDACDC70080FF74 /* AirInst.h */,
</span><span class="cx">                                 0FEC855C1BDACDC70080FF74 /* AirInstInlines.h */,
</span><ins>+                                26718BA21BE99F780052017B /* AirIteratedRegisterCoalescing.cpp */,
+                                26718BA31BE99F780052017B /* AirIteratedRegisterCoalescing.h */,
</ins><span class="cx">                                 0FEC855D1BDACDC70080FF74 /* AirLiveness.h */,
</span><span class="cx">                                 264091FA1BE2FD4100684DB2 /* AirOpcode.opcodes */,
</span><span class="cx">                                 0FEC855E1BDACDC70080FF74 /* AirPhaseScope.cpp */,
</span><span class="lines">@@ -6652,6 +6658,7 @@
</span><span class="cx">                                 0FEC85191BDACDAC0080FF74 /* B3Generate.h in Headers */,
</span><span class="cx">                                 0FEC851A1BDACDAC0080FF74 /* B3GenericFrequentedBlock.h in Headers */,
</span><span class="cx">                                 0FEC85C31BE167A00080FF74 /* B3HeapRange.h in Headers */,
</span><ins>+                                26718BA51BE99F780052017B /* AirIteratedRegisterCoalescing.h in Headers */,
</ins><span class="cx">                                 0FEC851B1BDACDAC0080FF74 /* B3IndexMap.h in Headers */,
</span><span class="cx">                                 0FEC851C1BDACDAC0080FF74 /* B3IndexSet.h in Headers */,
</span><span class="cx">                                 0FEC85BA1BE1462F0080FF74 /* B3InsertionSet.h in Headers */,
</span><span class="lines">@@ -8909,6 +8916,7 @@
</span><span class="cx">                                 7B0247561B8682E100542440 /* WASMFunctionParser.cpp in Sources */,
</span><span class="cx">                                 7B39F76D1B62DE2E00360FB4 /* WASMModuleParser.cpp in Sources */,
</span><span class="cx">                                 7B39F7721B63574D00360FB4 /* WASMReader.cpp in Sources */,
</span><ins>+                                26718BA41BE99F780052017B /* AirIteratedRegisterCoalescing.cpp in Sources */,
</ins><span class="cx">                                 FED94F2E171E3E2300BE77A4 /* Watchdog.cpp in Sources */,
</span><span class="cx">                                 0F919D2515853CE0004A4E7D /* Watchpoint.cpp in Sources */,
</span><span class="cx">                                 1ACF7377171CA6FB00C9BB1E /* Weak.cpp in Sources */,
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreb3airAirGeneratecpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/b3/air/AirGenerate.cpp (192291 => 192292)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/b3/air/AirGenerate.cpp        2015-11-11 04:29:59 UTC (rev 192291)
+++ trunk/Source/JavaScriptCore/b3/air/AirGenerate.cpp        2015-11-11 05:10:43 UTC (rev 192292)
</span><span class="lines">@@ -33,6 +33,7 @@
</span><span class="cx"> #include &quot;AirEliminateDeadCode.h&quot;
</span><span class="cx"> #include &quot;AirGenerationContext.h&quot;
</span><span class="cx"> #include &quot;AirHandleCalleeSaves.h&quot;
</span><ins>+#include &quot;AirIteratedRegisterCoalescing.h&quot;
</ins><span class="cx"> #include &quot;AirReportUsedRegisters.h&quot;
</span><span class="cx"> #include &quot;AirSimplifyCFG.h&quot;
</span><span class="cx"> #include &quot;AirSpillEverything.h&quot;
</span><span class="lines">@@ -68,8 +69,7 @@
</span><span class="cx"> 
</span><span class="cx">     // This is where we would have a real register allocator. Then, we could use spillEverything()
</span><span class="cx">     // in place of the register allocator only for testing.
</span><del>-    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150457
-    spillEverything(code);
</del><ins>+    iteratedRegisterCoalescing(code);
</ins><span class="cx"> 
</span><span class="cx">     // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
</span><span class="cx">     // does things like identify which callee-saves we're using and saves them.
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreb3airAirInstInlinesh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/b3/air/AirInstInlines.h (192291 => 192292)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/b3/air/AirInstInlines.h        2015-11-11 04:29:59 UTC (rev 192291)
+++ trunk/Source/JavaScriptCore/b3/air/AirInstInlines.h        2015-11-11 05:10:43 UTC (rev 192292)
</span><span class="lines">@@ -67,7 +67,7 @@
</span><span class="cx">                 // https://bugs.webkit.org/show_bug.cgi?id=151128
</span><span class="cx">                 
</span><span class="cx">                 functor(stackSlot, role, type);
</span><del>-                arg = Arg::stack(stackSlot);
</del><ins>+                arg = Arg::stack(stackSlot, arg.offset());
</ins><span class="cx">             });
</span><span class="cx">     }
</span><span class="cx"> };
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreb3airAirIteratedRegisterCoalescingcpp"></a>
<div class="addfile"><h4>Added: trunk/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp (0 => 192292)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp                                (rev 0)
+++ trunk/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.cpp        2015-11-11 05:10:43 UTC (rev 192292)
</span><span class="lines">@@ -0,0 +1,955 @@
</span><ins>+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include &quot;config.h&quot;
+#include &quot;AirIteratedRegisterCoalescing.h&quot;
+
+#if ENABLE(B3_JIT)
+
+#include &quot;AirCode.h&quot;
+#include &quot;AirInsertionSet.h&quot;
+#include &quot;AirInstInlines.h&quot;
+#include &quot;AirLiveness.h&quot;
+#include &quot;AirPhaseScope.h&quot;
+#include &quot;AirRegisterPriority.h&quot;
+#include &lt;wtf/ListHashSet.h&gt;
+
+namespace JSC { namespace B3 { namespace Air {
+
+static bool debug = false;
+static bool traceDebug = false;
+
+template&lt;Arg::Type type&gt;
+struct MoveInstHelper;
+
+template&lt;&gt;
+struct MoveInstHelper&lt;Arg::GP&gt; {
+    static bool mayBeCoalescable(const Inst&amp; inst)
+    {
+        bool isMove = inst.opcode == Move;
+        if (!isMove)
+            return false;
+
+        ASSERT_WITH_MESSAGE(inst.args.size() == 2, &quot;We assume coalecable moves only have two arguments in a few places.&quot;);
+        ASSERT(inst.args[0].isType(Arg::GP));
+        ASSERT(inst.args[1].isType(Arg::GP));
+
+        return inst.args[0].isTmp() &amp;&amp; inst.args[1].isTmp();
+    }
+};
+
+template&lt;&gt;
+struct MoveInstHelper&lt;Arg::FP&gt; {
+    static bool mayBeCoalescable(const Inst&amp; inst)
+    {
+        if (inst.opcode != MoveDouble)
+            return false;
+
+        ASSERT_WITH_MESSAGE(inst.args.size() == 2, &quot;We assume coalecable moves only have two arguments in a few places.&quot;);
+        ASSERT(inst.args[0].isType(Arg::FP));
+        ASSERT(inst.args[1].isType(Arg::FP));
+
+        return inst.args[0].isTmp() &amp;&amp; inst.args[1].isTmp();
+    }
+};
+
+
+// The speed of the alocator depends directly on how fast we can query information associated with a Tmp
+// and/or its ownership to a set.
+//
+// HashSet/HashMap operations are overly expensive for that.
+//
+// Instead of a Hash structure, Tmp are indexed directly by value in Arrays. The internal integer is used as the index
+// to reference them quickly. In some sets, we do not care about the colored regs, we still allocate the memory for them
+// and just do not use it.
+template&lt;Arg::Type type&gt;
+struct AbsoluteTmpHelper;
+
+template&lt;&gt;
+struct AbsoluteTmpHelper&lt;Arg::GP&gt; {
+    static unsigned absoluteIndex(const Tmp&amp; tmp)
+    {
+        ASSERT(tmp.isGP());
+        ASSERT(static_cast&lt;int&gt;(tmp.internalValue()) &gt; 0);
+        return tmp.internalValue();
+    }
+
+    static unsigned absoluteIndex(unsigned tmpIndex)
+    {
+        return absoluteIndex(Tmp::gpTmpForIndex(tmpIndex));
+    }
+
+    static Tmp tmpFromAbsoluteIndex(unsigned tmpIndex)
+    {
+        return Tmp::tmpForInternalValue(tmpIndex);
+    }
+};
+
+template&lt;&gt;
+struct AbsoluteTmpHelper&lt;Arg::FP&gt; {
+    static unsigned absoluteIndex(const Tmp&amp; tmp)
+    {
+        ASSERT(tmp.isFP());
+        ASSERT(static_cast&lt;int&gt;(tmp.internalValue()) &lt; 0);
+        return -tmp.internalValue();
+    }
+
+    static unsigned absoluteIndex(unsigned tmpIndex)
+    {
+        return absoluteIndex(Tmp::fpTmpForIndex(tmpIndex));
+    }
+
+    static Tmp tmpFromAbsoluteIndex(unsigned tmpIndex)
+    {
+        return Tmp::tmpForInternalValue(-tmpIndex);
+    }
+};
+
+template&lt;Arg::Type type&gt;
+struct Bank;
+
+template&lt;&gt;
+struct Bank&lt;Arg::GP&gt; {
+    typedef GPRInfo Info;
+};
+
+template&lt;&gt;
+struct Bank&lt;Arg::FP&gt; {
+    typedef FPRInfo Info;
+};
+
+template&lt;Arg::Type type&gt;
+class IteratedRegisterCoalescingAllocator {
+public:
+    IteratedRegisterCoalescingAllocator(Code&amp; code)
+    {
+        initializeDegrees(code);
+
+        unsigned tmpArraySize = this-&gt;tmpArraySize(code);
+        m_adjacencyList.resize(tmpArraySize);
+        m_moveList.resize(tmpArraySize);
+        m_coalescedTmps.resize(tmpArraySize);
+        m_isOnSelectStack.ensureSize(tmpArraySize);
+    }
+
+    void build(Inst&amp; inst, const Liveness&lt;Tmp&gt;::LocalCalc&amp; localCalc)
+    {
+        // All the Def()s interfere with eachother.
+        inst.forEachTmp([&amp;] (Tmp&amp; arg, Arg::Role role, Arg::Type argType) {
+            if (argType != type)
+                return;
+
+            if (Arg::isDef(role)) {
+                inst.forEachTmp([&amp;] (Tmp&amp; otherArg, Arg::Role role, Arg::Type) {
+                    if (argType != type)
+                        return;
+
+                    if (Arg::isDef(role))
+                        addEdge(arg, otherArg);
+                });
+            }
+        });
+
+        if (MoveInstHelper&lt;type&gt;::mayBeCoalescable(inst)) {
+            for (const Arg&amp; arg : inst.args) {
+                HashSet&lt;Inst*&gt;&amp; list = m_moveList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(arg.tmp())];
+                list.add(&amp;inst);
+            }
+            m_worklistMoves.add(&amp;inst);
+
+            // We do not want the Use() of this move to interfere with the Def(), even if it is live
+            // after the Move. If we were to add the interference edge, it would be impossible to
+            // coalesce the Move even if the two Tmp never interfere anywhere.
+            Tmp defTmp;
+            Tmp useTmp;
+            inst.forEachTmp([&amp;defTmp, &amp;useTmp] (Tmp&amp; argTmp, Arg::Role role, Arg::Type) {
+                if (Arg::isDef(role))
+                    defTmp = argTmp;
+                else {
+                    ASSERT(Arg::isUse(role));
+                    useTmp = argTmp;
+                }
+            });
+            ASSERT(defTmp);
+            ASSERT(useTmp);
+
+            for (const Tmp&amp; liveTmp : localCalc.live()) {
+                if (liveTmp != useTmp &amp;&amp; liveTmp.isGP() == (type == Arg::GP))
+                    addEdge(defTmp, liveTmp);
+            }
+        } else
+            addEdges(inst, localCalc.live());
+    }
+
+    void allocate()
+    {
+        makeWorkList();
+
+        if (debug) {
+            dumpInterferenceGraphInDot(WTF::dataFile());
+            dataLog(&quot;Initial work list\n&quot;);
+            dumpWorkLists(WTF::dataFile());
+        }
+
+        do {
+            if (traceDebug) {
+                dataLog(&quot;Before Graph simplification iteration\n&quot;);
+                dumpWorkLists(WTF::dataFile());
+            }
+
+            if (!m_simplifyWorklist.isEmpty())
+                simplify();
+            else if (!m_worklistMoves.isEmpty())
+                coalesce();
+            else if (!m_freezeWorklist.isEmpty())
+                freeze();
+            else if (!m_spillWorklist.isEmpty())
+                selectSpill();
+
+            if (traceDebug) {
+                dataLog(&quot;After Graph simplification iteration\n&quot;);
+                dumpWorkLists(WTF::dataFile());
+            }
+        } while (!m_simplifyWorklist.isEmpty() || !m_worklistMoves.isEmpty() || !m_freezeWorklist.isEmpty() || !m_spillWorklist.isEmpty());
+
+        assignColors();
+    }
+
+    Tmp getAlias(Tmp tmp) const
+    {
+        Tmp alias = tmp;
+        while (Tmp nextAlias = m_coalescedTmps[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(alias)])
+            alias = nextAlias;
+        return alias;
+    }
+
+    const HashSet&lt;Tmp&gt;&amp; spilledTmp() const { return m_spilledTmp; }
+    Reg allocatedReg(Tmp tmp) const
+    {
+        ASSERT(!tmp.isReg());
+        ASSERT(m_coloredTmp.size());
+        ASSERT(tmp.isGP() == (type == Arg::GP));
+
+        Reg reg = m_coloredTmp[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)];
+        if (!reg) {
+            // We only care about Tmps that interfere. A Tmp that never interfere with anything
+            // can take any register.
+            reg = regsInPriorityOrder(type).first();
+        }
+        return reg;
+    }
+
+private:
+    static unsigned tmpArraySize(Code&amp; code)
+    {
+        unsigned numTmps = code.numTmps(type);
+        return AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(numTmps);
+    }
+
+    void initializeDegrees(Code&amp; code)
+    {
+        unsigned tmpArraySize = this-&gt;tmpArraySize(code);
+        m_degrees.resize(tmpArraySize);
+
+        // All precolored registers have  an &quot;infinite&quot; degree.
+        unsigned firstNonRegIndex = AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(0);
+        for (unsigned i = 0; i &lt; firstNonRegIndex; ++i)
+            m_degrees[i] = std::numeric_limits&lt;unsigned&gt;::max();
+
+        bzero(m_degrees.data() + firstNonRegIndex, (tmpArraySize - firstNonRegIndex) * sizeof(unsigned));
+    }
+
+    void addEdges(Inst&amp; inst, const HashSet&lt;Tmp&gt;&amp; liveTmp)
+    {
+        // All the Def()s interfere with everthing live.
+        inst.forEachTmp([&amp;] (Tmp&amp; arg, Arg::Role role, Arg::Type argType) {
+            if (argType == type &amp;&amp; Arg::isDef(role)) {
+                for (const Tmp&amp; liveTmp : liveTmp) {
+                    if (liveTmp.isGP() == (type == Arg::GP))
+                        addEdge(arg, liveTmp);
+                }
+            }
+        });
+    }
+
+    void addEdge(const Tmp&amp; a, const Tmp&amp; b)
+    {
+        if (a == b)
+            return;
+
+        if (m_interferenceEdges.add(InterferenceEdge(a, b)).isNewEntry) {
+            if (!a.isReg()) {
+                ASSERT(!m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(a)].contains(b));
+                m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(a)].append(b);
+                m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(a)]++;
+            }
+
+            if (!b.isReg()) {
+                ASSERT(!m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(b)].contains(a));
+                m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(b)].append(a);
+                m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(b)]++;
+            }
+        }
+    }
+
+    void makeWorkList()
+    {
+        unsigned firstNonRegIndex = AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(0);
+        for (unsigned i = firstNonRegIndex; i &lt; m_degrees.size(); ++i) {
+            unsigned degree = m_degrees[i];
+            if (!degree)
+                continue;
+
+            Tmp tmp = AbsoluteTmpHelper&lt;type&gt;::tmpFromAbsoluteIndex(i);
+
+            if (degree &gt;= Bank&lt;type&gt;::Info::numberOfRegisters)
+                m_spillWorklist.add(tmp);
+            else if (!m_moveList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)].isEmpty())
+                m_freezeWorklist.add(tmp);
+            else
+                m_simplifyWorklist.append(tmp);
+        }
+    }
+
+    void simplify()
+    {
+        Tmp last = m_simplifyWorklist.takeLast();
+
+        ASSERT(!m_selectStack.contains(last));
+        ASSERT(!m_isOnSelectStack.get(AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(last)));
+        m_selectStack.append(last);
+        m_isOnSelectStack.quickSet(AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(last));
+
+        forEachAdjacent(last, [this](Tmp adjacentTmp) {
+            decrementDegree(adjacentTmp);
+        });
+    }
+
+    template&lt;typename Function&gt;
+    void forEachAdjacent(Tmp tmp, Function function)
+    {
+        for (Tmp adjacentTmp : m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)]) {
+            if (!hasBeenSimplified(adjacentTmp))
+                function(adjacentTmp);
+        }
+    }
+
+    bool hasBeenSimplified(Tmp tmp)
+    {
+        return m_isOnSelectStack.quickGet(AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)) || !!m_coalescedTmps[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)];
+    }
+
+    void decrementDegree(Tmp tmp)
+    {
+        ASSERT(m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)]);
+
+        unsigned oldDegree = m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)]--;
+        if (oldDegree == Bank&lt;type&gt;::Info::numberOfRegisters) {
+            enableMovesOnValueAndAdjacents(tmp);
+            m_spillWorklist.remove(tmp);
+            if (isMoveRelated(tmp))
+                m_freezeWorklist.add(tmp);
+            else
+                m_simplifyWorklist.append(tmp);
+        }
+    }
+
+    template&lt;typename Function&gt;
+    void forEachNodeMoves(Tmp tmp, Function function)
+    {
+        for (Inst* inst : m_moveList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)]) {
+            if (m_activeMoves.contains(inst) || m_worklistMoves.contains(inst))
+                function(*inst);
+        }
+    }
+
+    bool isMoveRelated(Tmp tmp)
+    {
+        for (Inst* inst : m_moveList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)]) {
+            if (m_activeMoves.contains(inst) || m_worklistMoves.contains(inst))
+                return true;
+        }
+        return false;
+    }
+
+    void enableMovesOnValue(Tmp tmp)
+    {
+        for (Inst* inst : m_moveList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)]) {
+            if (m_activeMoves.remove(inst))
+                m_worklistMoves.add(inst);
+        }
+    }
+
+    void enableMovesOnValueAndAdjacents(Tmp tmp)
+    {
+        enableMovesOnValue(tmp);
+
+        forEachAdjacent(tmp, [this] (Tmp adjacentTmp) {
+            enableMovesOnValue(adjacentTmp);
+        });
+    }
+
+    void coalesce()
+    {
+        Inst* moveInst = m_worklistMoves.takeLast();
+        ASSERT(moveInst-&gt;args.size() == 2);
+
+        Tmp u = moveInst-&gt;args[0].tmp();
+        u = getAlias(u);
+        Tmp v = moveInst-&gt;args[1].tmp();
+        v = getAlias(v);
+
+        if (v.isReg())
+            std::swap(u, v);
+
+        if (traceDebug)
+            dataLog(&quot;Coalescing &quot;, *moveInst, &quot; u = &quot;, u, &quot; v = &quot;, v, &quot;\n&quot;);
+
+        if (u == v) {
+            addWorkList(u);
+
+            if (traceDebug)
+                dataLog(&quot;    Coalesced\n&quot;);
+        } else if (v.isReg() || m_interferenceEdges.contains(InterferenceEdge(u, v))) {
+            addWorkList(u);
+            addWorkList(v);
+
+            if (traceDebug)
+                dataLog(&quot;    Constrained\n&quot;);
+        } else if (canBeSafelyCoalesced(u, v)) {
+            combine(u, v);
+            addWorkList(u);
+
+            if (traceDebug)
+                dataLog(&quot;    Safe Coalescing\n&quot;);
+        } else {
+            m_activeMoves.add(moveInst);
+
+            if (traceDebug)
+                dataLog(&quot;    Failed coalescing, added to active moves.\n&quot;);
+        }
+    }
+
+    bool canBeSafelyCoalesced(Tmp u, Tmp v)
+    {
+        ASSERT(!v.isReg());
+        if (u.isReg())
+            return precoloredCoalescingHeuristic(u, v);
+        return conservativeHeuristic(u, v);
+    }
+
+    bool precoloredCoalescingHeuristic(Tmp u, Tmp v)
+    {
+        ASSERT(u.isReg());
+        ASSERT(!v.isReg());
+
+        // If any adjacent of the non-colored node is not an adjacent of the colored node AND has a degree &gt;= K
+        // there is a risk that this node needs to have the same color as our precolored node. If we coalesce such
+        // move, we may create an uncolorable graph.
+        auto adjacentsOfV = m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(v)];
+        for (Tmp adjacentTmp : adjacentsOfV) {
+            if (!adjacentTmp.isReg()
+                &amp;&amp; !hasBeenSimplified(adjacentTmp)
+                &amp;&amp; m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(adjacentTmp)] &gt;= Bank&lt;type&gt;::Info::numberOfRegisters
+                &amp;&amp; !m_interferenceEdges.contains(InterferenceEdge(u, adjacentTmp)))
+                return false;
+        }
+        return true;
+    }
+
+    bool conservativeHeuristic(Tmp u, Tmp v)
+    {
+        // This is using the Briggs' conservative coalescing rule:
+        // If the number of combined adjacent node with a degree &gt;= K is less than K,
+        // it is safe to combine the two nodes. The reason is that we know that if the graph
+        // is colorable, we have fewer than K adjacents with high order and there is a color
+        // for the current node.
+        ASSERT(u != v);
+        ASSERT(!u.isReg());
+        ASSERT(!v.isReg());
+
+        auto adjacentsOfU = m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(u)];
+        auto adjacentsOfV = m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(v)];
+
+        if (adjacentsOfU.size() + adjacentsOfV.size() &lt; Bank&lt;type&gt;::Info::numberOfRegisters) {
+            // Shortcut: if the total number of adjacents is less than the number of register, the condition is always met.
+            return true;
+        }
+
+        HashSet&lt;Tmp&gt; highOrderAdjacents;
+
+        for (Tmp adjacentTmp : adjacentsOfU) {
+            ASSERT(adjacentTmp != v);
+            ASSERT(adjacentTmp != u);
+            if (!hasBeenSimplified(adjacentTmp) &amp;&amp; m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(adjacentTmp)] &gt;= Bank&lt;type&gt;::Info::numberOfRegisters) {
+                auto addResult = highOrderAdjacents.add(adjacentTmp);
+                if (addResult.isNewEntry &amp;&amp; highOrderAdjacents.size() &gt;= Bank&lt;type&gt;::Info::numberOfRegisters)
+                    return false;
+            }
+        }
+        for (Tmp adjacentTmp : adjacentsOfV) {
+            ASSERT(adjacentTmp != u);
+            ASSERT(adjacentTmp != v);
+            if (!hasBeenSimplified(adjacentTmp) &amp;&amp; m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(adjacentTmp)] &gt;= Bank&lt;type&gt;::Info::numberOfRegisters) {
+                auto addResult = highOrderAdjacents.add(adjacentTmp);
+                if (addResult.isNewEntry &amp;&amp; highOrderAdjacents.size() &gt;= Bank&lt;type&gt;::Info::numberOfRegisters)
+                    return false;
+            }
+        }
+
+        ASSERT(highOrderAdjacents.size() &lt; Bank&lt;type&gt;::Info::numberOfRegisters);
+        return true;
+    }
+
+    void addWorkList(Tmp tmp)
+    {
+        if (!tmp.isReg() &amp;&amp; m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)] &lt; Bank&lt;type&gt;::Info::numberOfRegisters &amp;&amp; !isMoveRelated(tmp)) {
+            m_freezeWorklist.remove(tmp);
+            m_simplifyWorklist.append(tmp);
+        }
+    }
+
+    void combine(Tmp u, Tmp v)
+    {
+        if (!m_freezeWorklist.remove(v))
+            m_spillWorklist.remove(v);
+
+        ASSERT(!m_coalescedTmps[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(v)]);
+        m_coalescedTmps[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(v)] = u;
+
+        HashSet&lt;Inst*&gt;&amp; vMoves = m_moveList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(v)];
+        m_moveList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(u)].add(vMoves.begin(), vMoves.end());
+
+        forEachAdjacent(v, [this, u] (Tmp adjacentTmp) {
+            addEdge(adjacentTmp, u);
+            decrementDegree(adjacentTmp);
+        });
+
+        if (m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(u)] &gt;= Bank&lt;type&gt;::Info::numberOfRegisters &amp;&amp; m_freezeWorklist.remove(u))
+            m_spillWorklist.add(u);
+    }
+
+    void freeze()
+    {
+        Tmp victim = m_freezeWorklist.takeAny();
+        m_simplifyWorklist.append(victim);
+        freezeMoves(victim);
+    }
+
+    void freezeMoves(Tmp tmp)
+    {
+        forEachNodeMoves(tmp, [this, tmp] (Inst&amp; inst) {
+            if (!m_activeMoves.remove(&amp;inst))
+                m_worklistMoves.remove(&amp;inst);
+
+            Tmp otherTmp = inst.args[0].tmp() != tmp ? inst.args[0].tmp() : inst.args[1].tmp();
+            if (m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(otherTmp)] &lt; Bank&lt;type&gt;::Info::numberOfRegisters &amp;&amp; !isMoveRelated(otherTmp)) {
+                m_freezeWorklist.remove(otherTmp);
+                m_simplifyWorklist.append(otherTmp);
+            }
+        });
+    }
+
+    void selectSpill()
+    {
+        // FIXME: we should select a good candidate based on all the information we have.
+        // FIXME: we should never select a spilled tmp as we would never converge.
+
+        auto iterator = m_spillWorklist.begin();
+
+        auto victimIterator = iterator;
+        unsigned maxDegree = m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(*iterator)];
+
+        ++iterator;
+        for (;iterator != m_spillWorklist.end(); ++iterator) {
+            unsigned tmpDegree = m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(*iterator)];
+            if (tmpDegree &gt; maxDegree) {
+                victimIterator = iterator;
+                maxDegree = tmpDegree;
+            }
+        }
+
+        Tmp victimTmp = *victimIterator;
+        m_spillWorklist.remove(victimIterator);
+        m_simplifyWorklist.append(victimTmp);
+        freezeMoves(victimTmp);
+    }
+
+    void assignColors()
+    {
+        ASSERT(m_simplifyWorklist.isEmpty());
+        ASSERT(m_worklistMoves.isEmpty());
+        ASSERT(m_freezeWorklist.isEmpty());
+        ASSERT(m_spillWorklist.isEmpty());
+
+        // Reclaim as much memory as possible.
+        m_interferenceEdges.clear();
+        m_degrees.clear();
+        m_moveList.clear();
+        m_worklistMoves.clear();
+        m_activeMoves.clear();
+        m_simplifyWorklist.clear();
+        m_spillWorklist.clear();
+        m_freezeWorklist.clear();
+
+        // Try to color the Tmp on the stack.
+        m_coloredTmp.resize(m_adjacencyList.size());
+        const auto&amp; registersInPriorityOrder = regsInPriorityOrder(type);
+
+        while (!m_selectStack.isEmpty()) {
+            Tmp tmp = m_selectStack.takeLast();
+            ASSERT(!tmp.isReg());
+            ASSERT(!m_coloredTmp[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)]);
+
+            RegisterSet coloredRegisters;
+            for (Tmp adjacentTmp : m_adjacencyList[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)]) {
+                Tmp aliasTmp = getAlias(adjacentTmp);
+                if (aliasTmp.isReg()) {
+                    coloredRegisters.set(aliasTmp.reg());
+                    continue;
+                }
+
+                Reg reg = m_coloredTmp[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(aliasTmp)];
+                if (reg)
+                    coloredRegisters.set(reg);
+            }
+
+            bool colorAssigned = false;
+            for (Reg reg : registersInPriorityOrder) {
+                if (!coloredRegisters.get(reg)) {
+                    m_coloredTmp[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)] = reg;
+                    colorAssigned = true;
+                    break;
+                }
+            }
+
+            if (!colorAssigned)
+                m_spilledTmp.add(tmp);
+        }
+        m_selectStack.clear();
+
+        if (!m_spilledTmp.isEmpty())
+            m_coloredTmp.clear();
+    }
+
+#pragma mark - Debugging helpers.
+
+    void dumpInterferenceGraphInDot(PrintStream&amp; out)
+    {
+        out.print(&quot;graph InterferenceGraph { \n&quot;);
+
+        HashSet&lt;Tmp&gt; tmpsWithInterferences;
+        for (const auto&amp; edge : m_interferenceEdges) {
+            tmpsWithInterferences.add(edge.first());
+            tmpsWithInterferences.add(edge.second());
+        }
+
+        for (const auto&amp; tmp : tmpsWithInterferences)
+            out.print(&quot;    &quot;, tmp.internalValue(), &quot; [label=\&quot;&quot;, tmp, &quot; (&quot;, m_degrees[AbsoluteTmpHelper&lt;type&gt;::absoluteIndex(tmp)], &quot;)\&quot;];\n&quot;);
+
+        for (const auto&amp; edge : m_interferenceEdges)
+            out.print(&quot;    &quot;, edge.first().internalValue(), &quot; -- &quot;, edge.second().internalValue(), &quot;;\n&quot;);
+        out.print(&quot;}\n&quot;);
+    }
+
+    void dumpWorkLists(PrintStream&amp; out)
+    {
+        out.print(&quot;Simplify work list:\n&quot;);
+        for (Tmp tmp : m_simplifyWorklist)
+            out.print(&quot;    &quot;, tmp, &quot;\n&quot;);
+        out.print(&quot;Moves work list:\n&quot;);
+        for (Inst* inst : m_worklistMoves)
+            out.print(&quot;    &quot;, *inst, &quot;\n&quot;);
+        out.print(&quot;Freeze work list:\n&quot;);
+        for (Tmp tmp : m_freezeWorklist)
+            out.print(&quot;    &quot;, tmp, &quot;\n&quot;);
+        out.print(&quot;Spill work list:\n&quot;);
+        for (Tmp tmp : m_spillWorklist)
+            out.print(&quot;    &quot;, tmp, &quot;\n&quot;);
+    }
+
+#pragma mark -
+
+    // Interference edges are not directed. An edge between any two Tmps is represented
+    // by the concatenated values of the smallest Tmp followed by the bigger Tmp.
+    class InterferenceEdge {
+    public:
+        InterferenceEdge()
+        {
+        }
+
+        InterferenceEdge(Tmp a, Tmp b)
+        {
+            ASSERT(a.internalValue());
+            ASSERT(b.internalValue());
+            ASSERT_WITH_MESSAGE(a != b, &quot;A Tmp can never interfere with itself. Doing so would force it to be the superposition of two registers.&quot;);
+
+            unsigned aInternal = a.internalValue();
+            unsigned bInternal = b.internalValue();
+            if (bInternal &lt; aInternal)
+                std::swap(aInternal, bInternal);
+            m_value = static_cast&lt;uint64_t&gt;(aInternal) &lt;&lt; 32 | bInternal;
+        }
+
+        InterferenceEdge(WTF::HashTableDeletedValueType)
+            : m_value(std::numeric_limits&lt;uint64_t&gt;::max())
+        {
+        }
+
+        Tmp first() const
+        {
+            return Tmp::tmpForInternalValue(m_value &gt;&gt; 32);
+        }
+
+        Tmp second() const
+        {
+            return Tmp::tmpForInternalValue(m_value &amp; 0xffffffff);
+        }
+
+        bool operator==(const InterferenceEdge other) const
+        {
+            return m_value == other.m_value;
+        }
+
+        bool isHashTableDeletedValue() const
+        {
+            return *this == InterferenceEdge(WTF::HashTableDeletedValue);
+        }
+
+        unsigned hash() const
+        {
+            return WTF::IntHash&lt;uint64_t&gt;::hash(m_value);
+        }
+
+    private:
+        uint64_t m_value { 0 };
+    };
+
+    struct InterferenceEdgeHash {
+        static unsigned hash(const InterferenceEdge&amp; key) { return key.hash(); }
+        static bool equal(const InterferenceEdge&amp; a, const InterferenceEdge&amp; b) { return a == b; }
+        static const bool safeToCompareToEmptyOrDeleted = true;
+    };
+    typedef SimpleClassHashTraits&lt;InterferenceEdge&gt; InterferenceEdgeHashTraits;
+
+    // The interference graph.
+    HashSet&lt;InterferenceEdge, InterferenceEdgeHash, InterferenceEdgeHashTraits&gt; m_interferenceEdges;
+    Vector&lt;Vector&lt;Tmp, 0, UnsafeVectorOverflow, 4&gt;, 0, UnsafeVectorOverflow&gt; m_adjacencyList;
+    Vector&lt;unsigned, 0, UnsafeVectorOverflow&gt; m_degrees;
+
+    // List of every move instruction associated with a Tmp.
+    Vector&lt;HashSet&lt;Inst*&gt;&gt; m_moveList;
+
+    // Colors.
+    Vector&lt;Reg, 0, UnsafeVectorOverflow&gt; m_coloredTmp;
+    HashSet&lt;Tmp&gt; m_spilledTmp;
+
+    // Values that have been coalesced with an other value.
+    Vector&lt;Tmp&gt; m_coalescedTmps;
+
+    // The stack of Tmp removed from the graph and ready for coloring.
+    BitVector m_isOnSelectStack;
+    Vector&lt;Tmp&gt; m_selectStack;
+
+    // Work lists.
+    // Set of &quot;move&quot; enabled for possible coalescing.
+    ListHashSet&lt;Inst*&gt; m_worklistMoves;
+    // Set of &quot;move&quot; not yet ready for coalescing.
+    HashSet&lt;Inst*&gt; m_activeMoves;
+    // Low-degree, non-Move related.
+    Vector&lt;Tmp&gt; m_simplifyWorklist;
+    // High-degree Tmp.
+    HashSet&lt;Tmp&gt; m_spillWorklist;
+    // Low-degree, Move related.
+    HashSet&lt;Tmp&gt; m_freezeWorklist;
+};
+
+template&lt;Arg::Type type&gt;
+static bool isUselessMoveInst(const Inst&amp; inst)
+{
+    return MoveInstHelper&lt;type&gt;::mayBeCoalescable(inst) &amp;&amp; inst.args[0].tmp() == inst.args[1].tmp();
+}
+
+template&lt;Arg::Type type&gt;
+static void assignRegisterToTmpInProgram(Code&amp; code, const IteratedRegisterCoalescingAllocator&lt;type&gt;&amp; allocator)
+{
+    for (BasicBlock* block : code) {
+        // Give Tmp a valid register.
+        for (unsigned instIndex = 0; instIndex &lt; block-&gt;size(); ++instIndex) {
+            Inst&amp; inst = block-&gt;at(instIndex);
+            inst.forEachTmpFast([&amp;] (Tmp&amp; tmp) {
+                if (tmp.isReg() || tmp.isGP() == (type != Arg::GP))
+                    return;
+
+                Tmp aliasTmp = allocator.getAlias(tmp);
+                Tmp assignedTmp;
+                if (aliasTmp.isReg())
+                    assignedTmp = Tmp(aliasTmp.reg());
+                else {
+                    auto reg = allocator.allocatedReg(aliasTmp);
+                    ASSERT(reg);
+                    assignedTmp = Tmp(reg);
+                }
+                ASSERT(assignedTmp.isReg());
+                tmp = assignedTmp;
+            });
+        }
+
+        // Remove all the useless moves we created in this block.
+        block-&gt;insts().removeAllMatching(isUselessMoveInst&lt;type&gt;);
+    }
+}
+
+template&lt;Arg::Type type&gt;
+static void addSpillAndFillToProgram(Code&amp; code, const HashSet&lt;Tmp&gt;&amp; spilledTmp)
+{
+    // Allocate stack slot for each spilled value.
+    HashMap&lt;Tmp, StackSlot*&gt; stackSlots;
+    for (Tmp tmp : spilledTmp) {
+        bool isNewTmp = stackSlots.add(tmp, code.addStackSlot(8, StackSlotKind::Anonymous)).isNewEntry;
+        ASSERT_UNUSED(isNewTmp, isNewTmp);
+    }
+
+    // Rewrite the program to get rid of the spilled Tmp.
+    InsertionSet insertionSet(code);
+    for (BasicBlock* block : code) {
+        for (unsigned instIndex = 0; instIndex &lt; block-&gt;size(); ++instIndex) {
+            Inst&amp; inst = block-&gt;at(instIndex);
+
+            // Try to replace the register use by memory use when possible.
+            for (unsigned i = 0; i &lt; inst.args.size(); ++i) {
+                Arg&amp; arg = inst.args[i];
+                if (arg.isTmp() &amp;&amp; arg.type() == type &amp;&amp; !arg.isReg()) {
+                    auto stackSlotEntry = stackSlots.find(arg.tmp());
+                    if (stackSlotEntry == stackSlots.end())
+                        continue;
+
+                    if (inst.admitsStack(i)) {
+                        arg = Arg::stack(stackSlotEntry-&gt;value);
+                        continue;
+                    }
+                }
+            }
+
+            // For every other case, add Load/Store as needed.
+            inst.forEachTmp([&amp;] (Tmp&amp; tmp, Arg::Role role, Arg::Type argType) {
+                if (tmp.isReg() || argType != type)
+                    return;
+
+                auto stackSlotEntry = stackSlots.find(tmp);
+                if (stackSlotEntry == stackSlots.end())
+                    return;
+
+                Arg arg = Arg::stack(stackSlotEntry-&gt;value);
+                Opcode move = type == Arg::GP ? Move : MoveDouble;
+
+                if (Arg::isUse(role)) {
+                    Tmp newTmp = code.newTmp(type);
+                    insertionSet.insert(instIndex, move, inst.origin, arg, newTmp);
+                    tmp = newTmp;
+                }
+                if (Arg::isDef(role))
+                    insertionSet.insert(instIndex + 1, move, inst.origin, tmp, arg);
+            });
+        }
+        insertionSet.execute(block);
+    }
+}
+
+template&lt;Arg::Type type&gt;
+static void iteratedRegisterCoalescingOnType(Code&amp; code)
+{
+    while (true) {
+        IteratedRegisterCoalescingAllocator&lt;type&gt; allocator(code);
+        Liveness&lt;Tmp&gt; liveness(code);
+        for (BasicBlock* block : code) {
+            Liveness&lt;Tmp&gt;::LocalCalc localCalc(liveness, block);
+            for (unsigned instIndex = block-&gt;size(); instIndex--;) {
+                Inst&amp; inst = block-&gt;at(instIndex);
+                allocator.build(inst, localCalc);
+                localCalc.execute(inst);
+            }
+        }
+
+        allocator.allocate();
+        if (allocator.spilledTmp().isEmpty()) {
+            assignRegisterToTmpInProgram(code, allocator);
+            return;
+        }
+        addSpillAndFillToProgram&lt;type&gt;(code, allocator.spilledTmp());
+    }
+}
+
+void iteratedRegisterCoalescing(Code&amp; code)
+{
+    PhaseScope phaseScope(code, &quot;iteratedRegisterCoalescing&quot;);
+
+    bool gpIsColored = false;
+    bool fpIsColored = false;
+
+    // First we run both allocator together as long as they both spill.
+    while (!gpIsColored &amp;&amp; !fpIsColored) {
+        IteratedRegisterCoalescingAllocator&lt;Arg::GP&gt; gpAllocator(code);
+        IteratedRegisterCoalescingAllocator&lt;Arg::FP&gt; fpAllocator(code);
+
+        // Liveness Analysis can be prohibitively expensive. It is shared
+        // between the two allocators to avoid doing it twice.
+        Liveness&lt;Tmp&gt; liveness(code);
+        for (BasicBlock* block : code) {
+            Liveness&lt;Tmp&gt;::LocalCalc localCalc(liveness, block);
+            for (unsigned instIndex = block-&gt;size(); instIndex--;) {
+                Inst&amp; inst = block-&gt;at(instIndex);
+
+                gpAllocator.build(inst, localCalc);
+                fpAllocator.build(inst, localCalc);
+
+                localCalc.execute(inst);
+            }
+        }
+
+        gpAllocator.allocate();
+        if (gpAllocator.spilledTmp().isEmpty()) {
+            assignRegisterToTmpInProgram(code, gpAllocator);
+            gpIsColored = true;
+        } else
+            addSpillAndFillToProgram&lt;Arg::GP&gt;(code, gpAllocator.spilledTmp());
+
+        fpAllocator.allocate();
+        if (fpAllocator.spilledTmp().isEmpty()) {
+            assignRegisterToTmpInProgram(code, fpAllocator);
+            fpIsColored = true;
+        } else
+            addSpillAndFillToProgram&lt;Arg::FP&gt;(code, fpAllocator.spilledTmp());
+    };
+
+    if (!gpIsColored)
+        iteratedRegisterCoalescingOnType&lt;Arg::GP&gt;(code);
+    if (!fpIsColored)
+        iteratedRegisterCoalescingOnType&lt;Arg::FP&gt;(code);
+}
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
</ins></span></pre></div>
<a id="trunkSourceJavaScriptCoreb3airAirIteratedRegisterCoalescingh"></a>
<div class="addfile"><h4>Added: trunk/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h (0 => 192292)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h                                (rev 0)
+++ trunk/Source/JavaScriptCore/b3/air/AirIteratedRegisterCoalescing.h        2015-11-11 05:10:43 UTC (rev 192292)
</span><span class="lines">@@ -0,0 +1,44 @@
</span><ins>+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AirIteratedRegisterCoalescing_h
+#define AirIteratedRegisterCoalescing_h
+
+#if ENABLE(B3_JIT)
+
+namespace JSC { namespace B3 { namespace Air {
+
+class Code;
+
+// This is a register allocation phase based on Andrew Appel's Iterated Register Coalescing
+// http://www.cs.cmu.edu/afs/cs/academic/class/15745-s07/www/papers/george.pdf
+void iteratedRegisterCoalescing(Code&amp;);
+
+} } } // namespace JSC::B3::Air
+
+#endif // ENABLE(B3_JIT)
+
+#endif // AirIteratedRegisterCoalescing_h
+
</ins></span></pre></div>
<a id="trunkSourceJavaScriptCoreb3airAirTmph"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/b3/air/AirTmp.h (192291 => 192292)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/b3/air/AirTmp.h        2015-11-11 04:29:59 UTC (rev 192291)
+++ trunk/Source/JavaScriptCore/b3/air/AirTmp.h        2015-11-11 05:10:43 UTC (rev 192292)
</span><span class="lines">@@ -172,7 +172,16 @@
</span><span class="cx">     {
</span><span class="cx">         return WTF::IntHash&lt;int&gt;::hash(m_value);
</span><span class="cx">     }
</span><del>-    
</del><ins>+
+    unsigned internalValue() const { return static_cast&lt;unsigned&gt;(m_value); }
+
+    static Tmp tmpForInternalValue(unsigned index)
+    {
+        Tmp result;
+        result.m_value = static_cast&lt;int&gt;(index);
+        return result;
+    }
+
</ins><span class="cx"> private:
</span><span class="cx">     static int encodeGP(unsigned index)
</span><span class="cx">     {
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreb3testb3cpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/b3/testb3.cpp (192291 => 192292)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/b3/testb3.cpp        2015-11-11 04:29:59 UTC (rev 192291)
+++ trunk/Source/JavaScriptCore/b3/testb3.cpp        2015-11-11 05:10:43 UTC (rev 192292)
</span><span class="lines">@@ -1808,6 +1808,29 @@
</span><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+void testSpillGP()
+{
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Vector&lt;Value*&gt; sources;
+    sources.append(root-&gt;appendNew&lt;ArgumentRegValue&gt;(proc, Origin(), GPRInfo::argumentGPR0));
+    sources.append(root-&gt;appendNew&lt;ArgumentRegValue&gt;(proc, Origin(), GPRInfo::argumentGPR1));
+
+    for (unsigned i = 0; i &lt; 30; ++i) {
+        sources.append(
+            root-&gt;appendNew&lt;Value&gt;(proc, Add, Origin(), sources[sources.size() - 1], sources[sources.size() - 2])
+        );
+    }
+
+    Value* total = root-&gt;appendNew&lt;Const64Value&gt;(proc, Origin(), 0);
+    for (Value* value : sources)
+        total = root-&gt;appendNew&lt;Value&gt;(proc, Add, Origin(), total, value);
+
+    root-&gt;appendNew&lt;ControlValue&gt;(proc, Return, Origin(), total);
+    compileAndRun&lt;int&gt;(proc, 1, 2);
+}
+
</ins><span class="cx"> void testBranch()
</span><span class="cx"> {
</span><span class="cx">     Procedure proc;
</span><span class="lines">@@ -3302,6 +3325,8 @@
</span><span class="cx">     RUN(testLoad&lt;uint16_t&gt;(Load16Z, 1000000000));
</span><span class="cx">     RUN(testLoad&lt;uint16_t&gt;(Load16Z, -1000000000));
</span><span class="cx"> 
</span><ins>+    RUN(testSpillGP());
+
</ins><span class="cx">     RUN(testCallSimple(1, 2));
</span><span class="cx">     RUN(testCallFunctionWithHellaArguments());
</span><span class="cx"> 
</span></span></pre>
</div>
</div>

</body>
</html>