<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[188937] branches/jsc-tailcall/Source/JavaScriptCore</title>
</head>
<body>

<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt;  }
#msg dl a { font-weight: bold}
#msg dl a:link    { color:#fc3; }
#msg dl a:active  { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff  {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.webkit.org/projects/webkit/changeset/188937">188937</a></dd>
<dt>Author</dt> <dd>basile_clement@apple.com</dd>
<dt>Date</dt> <dd>2015-08-25 15:59:36 -0700 (Tue, 25 Aug 2015)</dd>
</dl>

<h3>Log Message</h3>
<pre>jsc-tailcall: We should reuse the frame efficiently in the DFG instead of doing a memmove
https://bugs.webkit.org/show_bug.cgi?id=147508

Reviewed by Michael Saboff.

This introduces a new class (CallFrameShuffler) that is responsible for
efficiently building the new frames when performing a tail call. In
order for Repatch to know about the position of arguments on the
stack/registers (e.g. for polymorphic call inline caches), we store a
CallFrameShuffleData in the CallLinkInfo. Otherwise, the JIT and DFG
compiler are now using CallFrameShuffler instead of
CCallHelpers::prepareForTailCallSlow() to build the frame for a tail
call.

When taking a slow path, we still build the frame as if doing a regular
call, because we could throw an exception and need the caller's frame
at that point. This means that for virtual calls, we don't benefit from
the efficient frame move. We will take care of this in a subsequent patch.

* CMakeLists.txt:
* JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
* JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
* JavaScriptCore.xcodeproj/project.pbxproj:
* assembler/AbortReason.h:
* bytecode/CallLinkInfo.h:
(JSC::CallLinkInfo::setFrameShuffleData):
(JSC::CallLinkInfo::frameShuffleData):
* dfg/DFGGenerationInfo.h:
(JSC::DFG::GenerationInfo::recovery):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
* jit/CallFrameShuffleData.cpp: Added.
(JSC::CallFrameShuffleData::setupCalleeSaveRegisters):
* jit/CallFrameShuffleData.h: Added.
* jit/CallFrameShuffler.cpp: Added.
(JSC::CallFrameShuffler::CallFrameShuffler):
(JSC::CallFrameShuffler::dump):
(JSC::CallFrameShuffler::getCachedRecovery):
(JSC::CallFrameShuffler::setCachedRecovery):
(JSC::CallFrameShuffler::spill):
(JSC::CallFrameShuffler::emitDeltaCheck):
(JSC::CallFrameShuffler::prepareForSlowPath):
(JSC::CallFrameShuffler::prepareForTailCall):
(JSC::CallFrameShuffler::tryWrites):
(JSC::CallFrameShuffler::performSafeWrites):
(JSC::CallFrameShuffler::prepareAny):
* jit/CallFrameShuffler.h: Added.
(JSC::CachedRecovery::CachedRecovery):
(JSC::CachedRecovery::targets):
(JSC::CachedRecovery::addTarget):
(JSC::CachedRecovery::removeTarget):
(JSC::CachedRecovery::clearTargets):
(JSC::CachedRecovery::setWantedJSValueRegs):
(JSC::CachedRecovery::boxingRequiresGPR):
(JSC::CachedRecovery::boxingRequiresFPR):
(JSC::CachedRecovery::recovery):
(JSC::CachedRecovery::setRecovery):
(JSC::CachedRecovery::wantedJSValueRegs):
(JSC::CallFrameShuffler::lockGPR):
(JSC::CallFrameShuffler::acquireGPR):
(JSC::CallFrameShuffler::releaseGPR):
(JSC::CallFrameShuffler::snapshot):
(JSC::CallFrameShuffler::setCalleeJSValueRegs):
(JSC::CallFrameShuffler::assumeCalleeIsCell):
(JSC::CallFrameShuffler::canBox):
(JSC::CallFrameShuffler::ensureBox):
(JSC::CallFrameShuffler::ensureLoad):
(JSC::CallFrameShuffler::canLoadAndBox):
(JSC::CallFrameShuffler::updateRecovery):
(JSC::CallFrameShuffler::clearCachedRecovery):
(JSC::CallFrameShuffler::addCachedRecovery):
(JSC::CallFrameShuffler::numLocals):
(JSC::CallFrameShuffler::getOld):
(JSC::CallFrameShuffler::setOld):
(JSC::CallFrameShuffler::firstOld):
(JSC::CallFrameShuffler::lastOld):
(JSC::CallFrameShuffler::isValidOld):
(JSC::CallFrameShuffler::argCount):
(JSC::CallFrameShuffler::getNew):
(JSC::CallFrameShuffler::setNew):
(JSC::CallFrameShuffler::addNew):
(JSC::CallFrameShuffler::firstNew):
(JSC::CallFrameShuffler::lastNew):
(JSC::CallFrameShuffler::isValidNew):
(JSC::CallFrameShuffler::newAsOld):
(JSC::CallFrameShuffler::getFreeRegister):
(JSC::CallFrameShuffler::getFreeGPR):
(JSC::CallFrameShuffler::getFreeFPR):
(JSC::CallFrameShuffler::hasFreeRegister):
(JSC::CallFrameShuffler::ensureRegister):
(JSC::CallFrameShuffler::ensureGPR):
(JSC::CallFrameShuffler::ensureFPR):
(JSC::CallFrameShuffler::addressForOld):
(JSC::CallFrameShuffler::isUndecided):
(JSC::CallFrameShuffler::isSlowPath):
(JSC::CallFrameShuffler::addressForNew):
(JSC::CallFrameShuffler::dangerFrontier):
(JSC::CallFrameShuffler::isDangerNew):
(JSC::CallFrameShuffler::updateDangerFrontier):
(JSC::CallFrameShuffler::hasOnlySafeWrites):
* jit/CallFrameShuffler32_64.cpp: Added.
(JSC::CallFrameShuffler::emitStore):
(JSC::CallFrameShuffler::emitBox):
(JSC::CallFrameShuffler::emitLoad):
(JSC::CallFrameShuffler::canLoad):
(JSC::CachedRecovery::loadsIntoFPR):
(JSC::CachedRecovery::loadsIntoGPR):
(JSC::CallFrameShuffler::emitDisplace):
* jit/CallFrameShuffler64.cpp: Added.
(JSC::CallFrameShuffler::emitStore):
(JSC::CallFrameShuffler::emitBox):
(JSC::CallFrameShuffler::emitLoad):
(JSC::CallFrameShuffler::canLoad):
(JSC::CachedRecovery::loadsIntoFPR):
(JSC::CachedRecovery::loadsIntoGPR):
(JSC::CallFrameShuffler::emitDisplace):
* jit/JITCall.cpp:
(JSC::JIT::compileOpCall):
(JSC::JIT::compileOpCallSlowCase):
* jit/RegisterMap.h:
* jit/Repatch.cpp:
(JSC::linkPolymorphicCall):</pre>

<h3>Modified Paths</h3>
<ul>
<li><a href="#branchesjsctailcallSourceJavaScriptCoreCMakeListstxt">branches/jsc-tailcall/Source/JavaScriptCore/CMakeLists.txt</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCoreChangeLog">branches/jsc-tailcall/Source/JavaScriptCore/ChangeLog</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCoreJavaScriptCorevcxprojJavaScriptCorevcxproj">branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCoreJavaScriptCorevcxprojJavaScriptCorevcxprojfilters">branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCoreJavaScriptCorexcodeprojprojectpbxproj">branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCoreassemblerAbortReasonh">branches/jsc-tailcall/Source/JavaScriptCore/assembler/AbortReason.h</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorebytecodeCallLinkInfoh">branches/jsc-tailcall/Source/JavaScriptCore/bytecode/CallLinkInfo.h</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCoredfgDFGGenerationInfoh">branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGGenerationInfo.h</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCoredfgDFGSpeculativeJIT32_64cpp">branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCoredfgDFGSpeculativeJIT64cpp">branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitJITCallcpp">branches/jsc-tailcall/Source/JavaScriptCore/jit/JITCall.cpp</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitRegisterMaph">branches/jsc-tailcall/Source/JavaScriptCore/jit/RegisterMap.h</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitRepatchcpp">branches/jsc-tailcall/Source/JavaScriptCore/jit/Repatch.cpp</a></li>
</ul>

<h3>Added Paths</h3>
<ul>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitCallFrameShuffleDatacpp">branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitCallFrameShuffleDatah">branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffleData.h</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitCallFrameShufflercpp">branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler.cpp</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitCallFrameShufflerh">branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler.h</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitCallFrameShuffler32_64cpp">branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp</a></li>
<li><a href="#branchesjsctailcallSourceJavaScriptCorejitCallFrameShuffler64cpp">branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp</a></li>
</ul>

</div>
<div id="patch">
<h3>Diff</h3>
<a id="branchesjsctailcallSourceJavaScriptCoreCMakeListstxt"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/CMakeLists.txt (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/CMakeLists.txt        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/CMakeLists.txt        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -344,6 +344,10 @@
</span><span class="cx">     jit/AccessorCallJITStubRoutine.cpp
</span><span class="cx">     jit/AssemblyHelpers.cpp
</span><span class="cx">     jit/BinarySwitch.cpp
</span><ins>+    jit/CallFrameShuffleData.cpp
+    jit/CallFrameShuffler.cpp
+    jit/CallFrameShuffler32_64.cpp
+    jit/CallFrameShuffler64.cpp
</ins><span class="cx">     jit/ExecutableAllocationFuzz.cpp
</span><span class="cx">     jit/ExecutableAllocator.cpp
</span><span class="cx">     jit/ExecutableAllocatorFixedVMPool.cpp
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCoreChangeLog"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/ChangeLog (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/ChangeLog        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/ChangeLog        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -1,3 +1,130 @@
</span><ins>+2015-08-24  Basile Clement  &lt;basile_clement@apple.com&gt;
+
+        jsc-tailcall: We should reuse the frame efficiently in the DFG instead of doing a memmove
+        https://bugs.webkit.org/show_bug.cgi?id=147508
+
+        Reviewed by Michael Saboff.
+
+        This introduces a new class (CallFrameShuffler) that is responsible for
+        efficiently building the new frames when performing a tail call. In
+        order for Repatch to know about the position of arguments on the
+        stack/registers (e.g. for polymorphic call inline caches), we store a
+        CallFrameShuffleData in the CallLinkInfo. Otherwise, the JIT and DFG
+        compiler are now using CallFrameShuffler instead of
+        CCallHelpers::prepareForTailCallSlow() to build the frame for a tail
+        call.
+
+        When taking a slow path, we still build the frame as if doing a regular
+        call, because we could throw an exception and need the caller's frame
+        at that point. This means that for virtual calls, we don't benefit from
+        the efficient frame move. We will take care of this in a subsequent patch.
+
+        * CMakeLists.txt:
+        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
+        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * assembler/AbortReason.h:
+        * bytecode/CallLinkInfo.h:
+        (JSC::CallLinkInfo::setFrameShuffleData):
+        (JSC::CallLinkInfo::frameShuffleData):
+        * dfg/DFGGenerationInfo.h:
+        (JSC::DFG::GenerationInfo::recovery):
+        * dfg/DFGSpeculativeJIT32_64.cpp:
+        (JSC::DFG::SpeculativeJIT::emitCall):
+        * dfg/DFGSpeculativeJIT64.cpp:
+        (JSC::DFG::SpeculativeJIT::emitCall):
+        * jit/CallFrameShuffleData.cpp: Added.
+        (JSC::CallFrameShuffleData::setupCalleeSaveRegisters):
+        * jit/CallFrameShuffleData.h: Added.
+        * jit/CallFrameShuffler.cpp: Added.
+        (JSC::CallFrameShuffler::CallFrameShuffler):
+        (JSC::CallFrameShuffler::dump):
+        (JSC::CallFrameShuffler::getCachedRecovery):
+        (JSC::CallFrameShuffler::setCachedRecovery):
+        (JSC::CallFrameShuffler::spill):
+        (JSC::CallFrameShuffler::emitDeltaCheck):
+        (JSC::CallFrameShuffler::prepareForSlowPath):
+        (JSC::CallFrameShuffler::prepareForTailCall):
+        (JSC::CallFrameShuffler::tryWrites):
+        (JSC::CallFrameShuffler::performSafeWrites):
+        (JSC::CallFrameShuffler::prepareAny):
+        * jit/CallFrameShuffler.h: Added.
+        (JSC::CachedRecovery::CachedRecovery):
+        (JSC::CachedRecovery::targets):
+        (JSC::CachedRecovery::addTarget):
+        (JSC::CachedRecovery::removeTarget):
+        (JSC::CachedRecovery::clearTargets):
+        (JSC::CachedRecovery::setWantedJSValueRegs):
+        (JSC::CachedRecovery::boxingRequiresGPR):
+        (JSC::CachedRecovery::boxingRequiresFPR):
+        (JSC::CachedRecovery::recovery):
+        (JSC::CachedRecovery::setRecovery):
+        (JSC::CachedRecovery::wantedJSValueRegs):
+        (JSC::CallFrameShuffler::lockGPR):
+        (JSC::CallFrameShuffler::acquireGPR):
+        (JSC::CallFrameShuffler::releaseGPR):
+        (JSC::CallFrameShuffler::snapshot):
+        (JSC::CallFrameShuffler::setCalleeJSValueRegs):
+        (JSC::CallFrameShuffler::assumeCalleeIsCell):
+        (JSC::CallFrameShuffler::canBox):
+        (JSC::CallFrameShuffler::ensureBox):
+        (JSC::CallFrameShuffler::ensureLoad):
+        (JSC::CallFrameShuffler::canLoadAndBox):
+        (JSC::CallFrameShuffler::updateRecovery):
+        (JSC::CallFrameShuffler::clearCachedRecovery):
+        (JSC::CallFrameShuffler::addCachedRecovery):
+        (JSC::CallFrameShuffler::numLocals):
+        (JSC::CallFrameShuffler::getOld):
+        (JSC::CallFrameShuffler::setOld):
+        (JSC::CallFrameShuffler::firstOld):
+        (JSC::CallFrameShuffler::lastOld):
+        (JSC::CallFrameShuffler::isValidOld):
+        (JSC::CallFrameShuffler::argCount):
+        (JSC::CallFrameShuffler::getNew):
+        (JSC::CallFrameShuffler::setNew):
+        (JSC::CallFrameShuffler::addNew):
+        (JSC::CallFrameShuffler::firstNew):
+        (JSC::CallFrameShuffler::lastNew):
+        (JSC::CallFrameShuffler::isValidNew):
+        (JSC::CallFrameShuffler::newAsOld):
+        (JSC::CallFrameShuffler::getFreeRegister):
+        (JSC::CallFrameShuffler::getFreeGPR):
+        (JSC::CallFrameShuffler::getFreeFPR):
+        (JSC::CallFrameShuffler::hasFreeRegister):
+        (JSC::CallFrameShuffler::ensureRegister):
+        (JSC::CallFrameShuffler::ensureGPR):
+        (JSC::CallFrameShuffler::ensureFPR):
+        (JSC::CallFrameShuffler::addressForOld):
+        (JSC::CallFrameShuffler::isUndecided):
+        (JSC::CallFrameShuffler::isSlowPath):
+        (JSC::CallFrameShuffler::addressForNew):
+        (JSC::CallFrameShuffler::dangerFrontier):
+        (JSC::CallFrameShuffler::isDangerNew):
+        (JSC::CallFrameShuffler::updateDangerFrontier):
+        (JSC::CallFrameShuffler::hasOnlySafeWrites):
+        * jit/CallFrameShuffler32_64.cpp: Added.
+        (JSC::CallFrameShuffler::emitStore):
+        (JSC::CallFrameShuffler::emitBox):
+        (JSC::CallFrameShuffler::emitLoad):
+        (JSC::CallFrameShuffler::canLoad):
+        (JSC::CachedRecovery::loadsIntoFPR):
+        (JSC::CachedRecovery::loadsIntoGPR):
+        (JSC::CallFrameShuffler::emitDisplace):
+        * jit/CallFrameShuffler64.cpp: Added.
+        (JSC::CallFrameShuffler::emitStore):
+        (JSC::CallFrameShuffler::emitBox):
+        (JSC::CallFrameShuffler::emitLoad):
+        (JSC::CallFrameShuffler::canLoad):
+        (JSC::CachedRecovery::loadsIntoFPR):
+        (JSC::CachedRecovery::loadsIntoGPR):
+        (JSC::CallFrameShuffler::emitDisplace):
+        * jit/JITCall.cpp:
+        (JSC::JIT::compileOpCall):
+        (JSC::JIT::compileOpCallSlowCase):
+        * jit/RegisterMap.h:
+        * jit/Repatch.cpp:
+        (JSC::linkPolymorphicCall):
+
</ins><span class="cx"> 2015-08-25  Basile Clement  &lt;basile_clement@apple.com&gt;
</span><span class="cx"> 
</span><span class="cx">         jsc-tailcall: Get rid of FTLValueFormat
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCoreJavaScriptCorevcxprojJavaScriptCorevcxproj"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -606,6 +606,10 @@
</span><span class="cx">     &lt;ClCompile Include=&quot;..\jit\AccessorCallJITStubRoutine.cpp&quot; /&gt;
</span><span class="cx">     &lt;ClCompile Include=&quot;..\jit\AssemblyHelpers.cpp&quot; /&gt;
</span><span class="cx">     &lt;ClCompile Include=&quot;..\jit\BinarySwitch.cpp&quot; /&gt;
</span><ins>+    &lt;ClCompile Include=&quot;..\jit\CallFrameShuffleData.cpp&quot; /&gt;
+    &lt;ClCompile Include=&quot;..\jit\CallFrameShuffler.cpp&quot; /&gt;
+    &lt;ClCompile Include=&quot;..\jit\CallFrameShuffler32_64.cpp&quot; /&gt;
+    &lt;ClCompile Include=&quot;..\jit\CallFrameShuffler64.cpp&quot; /&gt;
</ins><span class="cx">     &lt;ClCompile Include=&quot;..\jit\ExecutableAllocationFuzz.cpp&quot; /&gt;
</span><span class="cx">     &lt;ClCompile Include=&quot;..\jit\ExecutableAllocator.cpp&quot; /&gt;
</span><span class="cx">     &lt;ClCompile Include=&quot;..\jit\ExecutableAllocatorFixedVMPool.cpp&quot; /&gt;
</span><span class="lines">@@ -1369,6 +1373,8 @@
</span><span class="cx">     &lt;ClInclude Include=&quot;..\jit\AccessorCallJITStubRoutine.h&quot; /&gt;
</span><span class="cx">     &lt;ClInclude Include=&quot;..\jit\AssemblyHelpers.h&quot; /&gt;
</span><span class="cx">     &lt;ClInclude Include=&quot;..\jit\BinarySwitch.h&quot; /&gt;
</span><ins>+    &lt;ClInclude Include=&quot;..\jit\CallFrameShuffleData.h&quot; /&gt;
+    &lt;ClInclude Include=&quot;..\jit\CallFrameShuffler.h&quot; /&gt;
</ins><span class="cx">     &lt;ClInclude Include=&quot;..\jit\CCallHelpers.h&quot; /&gt;
</span><span class="cx">     &lt;ClInclude Include=&quot;..\jit\CompactJITCodeMap.h&quot; /&gt;
</span><span class="cx">     &lt;ClInclude Include=&quot;..\jit\ExecutableAllocationFuzz.h&quot; /&gt;
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCoreJavaScriptCorevcxprojJavaScriptCorevcxprojfilters"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -381,6 +381,18 @@
</span><span class="cx">     &lt;ClCompile Include=&quot;..\jit\HostCallReturnValue.cpp&quot;&gt;
</span><span class="cx">       &lt;Filter&gt;jit&lt;/Filter&gt;
</span><span class="cx">     &lt;/ClCompile&gt;
</span><ins>+    &lt;ClCompile Include=&quot;..\jit\CallFrameShuffleData.cpp&quot;&gt;
+      &lt;Filter&gt;jit&lt;/Filter&gt;
+    &lt;/ClCompile&gt;
+    &lt;ClCompile Include=&quot;..\jit\CallFrameShuffler.cpp&quot;&gt;
+      &lt;Filter&gt;jit&lt;/Filter&gt;
+    &lt;/ClCompile&gt;
+    &lt;ClCompile Include=&quot;..\jit\CallFrameShuffler32_64.cpp&quot;&gt;
+      &lt;Filter&gt;jit&lt;/Filter&gt;
+    &lt;/ClCompile&gt;
+    &lt;ClCompile Include=&quot;..\jit\CallFrameShuffler64.cpp&quot;&gt;
+      &lt;Filter&gt;jit&lt;/Filter&gt;
+    &lt;/ClCompile&gt;
</ins><span class="cx">     &lt;ClCompile Include=&quot;..\jit\JIT.cpp&quot;&gt;
</span><span class="cx">       &lt;Filter&gt;jit&lt;/Filter&gt;
</span><span class="cx">     &lt;/ClCompile&gt;
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCoreJavaScriptCorexcodeprojprojectpbxproj"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -963,7 +963,13 @@
</span><span class="cx">                 627673241B680C1E00FD9F2E /* CallMode.h in Headers */ = {isa = PBXBuildFile; fileRef = 627673221B680C1E00FD9F2E /* CallMode.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><span class="cx">                 62D2D38F1ADF103F000206C1 /* FunctionRareData.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */; };
</span><span class="cx">                 62D2D3901ADF103F000206C1 /* FunctionRareData.h in Headers */ = {isa = PBXBuildFile; fileRef = 62D2D38E1ADF103F000206C1 /* FunctionRareData.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><ins>+                62D755D41B84FB3D001801FA /* CallFrameShuffler64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D755D31B84FB39001801FA /* CallFrameShuffler64.cpp */; };
+                62D755D51B84FB40001801FA /* CallFrameShuffler32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D755D21B84FB39001801FA /* CallFrameShuffler32_64.cpp */; };
+                62D755D61B84FB46001801FA /* CallFrameShuffler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D755D01B84FB39001801FA /* CallFrameShuffler.cpp */; };
+                62D755D71B84FB4A001801FA /* CallFrameShuffler.h in Headers */ = {isa = PBXBuildFile; fileRef = 62D755D11B84FB39001801FA /* CallFrameShuffler.h */; };
</ins><span class="cx">                 62E3D5F01B8D0B7300B868BB /* DataFormat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62E3D5EF1B8D0B7300B868BB /* DataFormat.cpp */; };
</span><ins>+                62EC9BB61B7EB07C00303AD1 /* CallFrameShuffleData.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62EC9BB41B7EB07C00303AD1 /* CallFrameShuffleData.cpp */; };
+                62EC9BB71B7EB07C00303AD1 /* CallFrameShuffleData.h in Headers */ = {isa = PBXBuildFile; fileRef = 62EC9BB51B7EB07C00303AD1 /* CallFrameShuffleData.h */; settings = {ATTRIBUTES = (Private, ); }; };
</ins><span class="cx">                 62F2AA371B0BEDE300610C7A /* DFGLazyNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62A9A29E1B0BED4800BD54CA /* DFGLazyNode.cpp */; };
</span><span class="cx">                 62F2AA381B0BEDE300610C7A /* DFGLazyNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 62A9A29F1B0BED4800BD54CA /* DFGLazyNode.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><span class="cx">                 6507D29E0E871E5E00D7D896 /* JSTypeInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 6507D2970E871E4A00D7D896 /* JSTypeInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><span class="lines">@@ -2681,7 +2687,13 @@
</span><span class="cx">                 62A9A29F1B0BED4800BD54CA /* DFGLazyNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGLazyNode.h; path = dfg/DFGLazyNode.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FunctionRareData.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 62D2D38E1ADF103F000206C1 /* FunctionRareData.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FunctionRareData.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><ins>+                62D755D01B84FB39001801FA /* CallFrameShuffler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallFrameShuffler.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
+                62D755D11B84FB39001801FA /* CallFrameShuffler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallFrameShuffler.h; sourceTree = &quot;&lt;group&gt;&quot;; };
+                62D755D21B84FB39001801FA /* CallFrameShuffler32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallFrameShuffler32_64.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
+                62D755D31B84FB39001801FA /* CallFrameShuffler64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallFrameShuffler64.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</ins><span class="cx">                 62E3D5EF1B8D0B7300B868BB /* DataFormat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DataFormat.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><ins>+                62EC9BB41B7EB07C00303AD1 /* CallFrameShuffleData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallFrameShuffleData.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
+                62EC9BB51B7EB07C00303AD1 /* CallFrameShuffleData.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallFrameShuffleData.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</ins><span class="cx">                 6507D2970E871E4A00D7D896 /* JSTypeInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSTypeInfo.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 651122E5140469BA002B101D /* testRegExp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = testRegExp.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 6511230514046A4C002B101D /* testRegExp */ = {isa = PBXFileReference; explicitFileType = &quot;compiled.mach-o.executable&quot;; includeInIndex = 0; path = testRegExp; sourceTree = BUILT_PRODUCTS_DIR; };
</span><span class="lines">@@ -3899,16 +3911,22 @@
</span><span class="cx">                 1429D92C0ED22D7000B89619 /* jit */ = {
</span><span class="cx">                         isa = PBXGroup;
</span><span class="cx">                         children = (
</span><del>-                                0FF054F71AC35B4400E5BE57 /* ExecutableAllocationFuzz.cpp */,
-                                0FF054F81AC35B4400E5BE57 /* ExecutableAllocationFuzz.h */,
</del><span class="cx">                                 0F7576D018E1FEE9002EF4CD /* AccessorCallJITStubRoutine.cpp */,
</span><span class="cx">                                 0F7576D118E1FEE9002EF4CD /* AccessorCallJITStubRoutine.h */,
</span><span class="cx">                                 0F24E53B17EA9F5900ABB217 /* AssemblyHelpers.cpp */,
</span><span class="cx">                                 0F24E53C17EA9F5900ABB217 /* AssemblyHelpers.h */,
</span><span class="cx">                                 0F64B26F1A784BAF006E4E66 /* BinarySwitch.cpp */,
</span><span class="cx">                                 0F64B2701A784BAF006E4E66 /* BinarySwitch.h */,
</span><ins>+                                62EC9BB41B7EB07C00303AD1 /* CallFrameShuffleData.cpp */,
+                                62EC9BB51B7EB07C00303AD1 /* CallFrameShuffleData.h */,
+                                62D755D01B84FB39001801FA /* CallFrameShuffler.cpp */,
+                                62D755D11B84FB39001801FA /* CallFrameShuffler.h */,
+                                62D755D21B84FB39001801FA /* CallFrameShuffler32_64.cpp */,
+                                62D755D31B84FB39001801FA /* CallFrameShuffler64.cpp */,
</ins><span class="cx">                                 0F24E53D17EA9F5900ABB217 /* CCallHelpers.h */,
</span><span class="cx">                                 0FD82E37141AB14200179C94 /* CompactJITCodeMap.h */,
</span><ins>+                                0FF054F71AC35B4400E5BE57 /* ExecutableAllocationFuzz.cpp */,
+                                0FF054F81AC35B4400E5BE57 /* ExecutableAllocationFuzz.h */,
</ins><span class="cx">                                 A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */,
</span><span class="cx">                                 A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */,
</span><span class="cx">                                 86DB64630F95C6FC00D7D921 /* ExecutableAllocatorFixedVMPool.cpp */,
</span><span class="lines">@@ -5654,6 +5672,7 @@
</span><span class="cx">                                 C4F4B6F41A05C944005CAB76 /* cpp_generator.py in Headers */,
</span><span class="cx">                                 A532439418569709002ED692 /* generate-combined-inspector-json.py in Headers */,
</span><span class="cx">                                 A5840E27187C981E00843B10 /* cssmin.py in Headers */,
</span><ins>+                                62D755D71B84FB4A001801FA /* CallFrameShuffler.h in Headers */,
</ins><span class="cx">                                 A5EA710419F6DE720098F5EC /* generate_objc_backend_dispatcher_implementation.py in Headers */,
</span><span class="cx">                                 C4703CD5192844CC0013FBEA /* generator_templates.py in Headers */,
</span><span class="cx">                                 A5EA710519F6DE740098F5EC /* generate_objc_configuration_header.py in Headers */,
</span><span class="lines">@@ -5809,6 +5828,7 @@
</span><span class="cx">                                 0F3B3A281544C997003ED0FF /* DFGCFGSimplificationPhase.h in Headers */,
</span><span class="cx">                                 A77A424017A0BBFD00A8DB81 /* DFGClobberize.h in Headers */,
</span><span class="cx">                                 A77A424217A0BBFD00A8DB81 /* DFGClobberSet.h in Headers */,
</span><ins>+                                62EC9BB71B7EB07C00303AD1 /* CallFrameShuffleData.h in Headers */,
</ins><span class="cx">                                 0F7B294D14C3CD4C007C3DB1 /* DFGCommon.h in Headers */,
</span><span class="cx">                                 0FEA0A32170D40BF00BB722C /* DFGCommonData.h in Headers */,
</span><span class="cx">                                 0F38B01817CFE75500B144D3 /* DFGCompilationKey.h in Headers */,
</span><span class="lines">@@ -7104,6 +7124,7 @@
</span><span class="cx">                                 0FF0F19D16B72A08005DF95B /* DFGCommon.cpp in Sources */,
</span><span class="cx">                                 0FEA0A31170D40BF00BB722C /* DFGCommonData.cpp in Sources */,
</span><span class="cx">                                 0F38B01717CFE75500B144D3 /* DFGCompilationKey.cpp in Sources */,
</span><ins>+                                62EC9BB61B7EB07C00303AD1 /* CallFrameShuffleData.cpp in Sources */,
</ins><span class="cx">                                 0F38B01917CFE75500B144D3 /* DFGCompilationMode.cpp in Sources */,
</span><span class="cx">                                 0F3B3A1A153E68F2003ED0FF /* DFGConstantFoldingPhase.cpp in Sources */,
</span><span class="cx">                                 0FBE0F7216C1DB030082C5E8 /* DFGCPSRethreadingPhase.cpp in Sources */,
</span><span class="lines">@@ -7153,6 +7174,7 @@
</span><span class="cx">                                 0FF0F19C16B72A03005DF95B /* DFGNode.cpp in Sources */,
</span><span class="cx">                                 0FA581BA150E952C00B9A2D9 /* DFGNodeFlags.cpp in Sources */,
</span><span class="cx">                                 86EC9DCF1328DF82002B2AD7 /* DFGOperations.cpp in Sources */,
</span><ins>+                                62D755D61B84FB46001801FA /* CallFrameShuffler.cpp in Sources */,
</ins><span class="cx">                                 A7D89CFD17A0B8CC00773AD8 /* DFGOSRAvailabilityAnalysisPhase.cpp in Sources */,
</span><span class="cx">                                 0FD82E56141DAF0800179C94 /* DFGOSREntry.cpp in Sources */,
</span><span class="cx">                                 0FD8A32517D51F5700CA2C40 /* DFGOSREntrypointCreationPhase.cpp in Sources */,
</span><span class="lines">@@ -7274,6 +7296,7 @@
</span><span class="cx">                                 147F39CB107EC37600427A48 /* FunctionConstructor.cpp in Sources */,
</span><span class="cx">                                 0FF0F19F16B72A17005DF95B /* FunctionExecutableDump.cpp in Sources */,
</span><span class="cx">                                 52B310FD1974AE870080857C /* FunctionHasExecutedCache.cpp in Sources */,
</span><ins>+                                62D755D51B84FB40001801FA /* CallFrameShuffler32_64.cpp in Sources */,
</ins><span class="cx">                                 147F39CC107EC37600427A48 /* FunctionPrototype.cpp in Sources */,
</span><span class="cx">                                 0F64B2711A784BAF006E4E66 /* BinarySwitch.cpp in Sources */,
</span><span class="cx">                                 0F766D2F15A8DCE0008F363E /* GCAwareJITStubRoutine.cpp in Sources */,
</span><span class="lines">@@ -7395,6 +7418,7 @@
</span><span class="cx">                                 A72700900DAC6BBC00E548D7 /* JSNotAnObject.cpp in Sources */,
</span><span class="cx">                                 147F39D4107EC37600427A48 /* JSObject.cpp in Sources */,
</span><span class="cx">                                 1482B7E40A43076000517CFC /* JSObjectRef.cpp in Sources */,
</span><ins>+                                62D755D41B84FB3D001801FA /* CallFrameShuffler64.cpp in Sources */,
</ins><span class="cx">                                 A7F993600FD7325100A0B2D0 /* JSONObject.cpp in Sources */,
</span><span class="cx">                                 0FFB6C381AF48DDC00DB1BF7 /* TypeofType.cpp in Sources */,
</span><span class="cx">                                 95F6E6950E5B5F970091E860 /* JSProfilerPrivate.cpp in Sources */,
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCoreassemblerAbortReasonh"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/assembler/AbortReason.h (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/assembler/AbortReason.h        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/assembler/AbortReason.h        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -63,6 +63,7 @@
</span><span class="cx">     JITGetByValResultIsNotEmpty                       = 250,
</span><span class="cx">     JITNotSupported                                   = 260,
</span><span class="cx">     JITOffsetIsNotOutOfLine                           = 270,
</span><ins>+    JITUnexpectedCallFrameSize                        = 275,
</ins><span class="cx">     JITUnreasonableLoopHintJumpTarget                 = 280,
</span><span class="cx">     RPWUnreasonableJumpTarget                         = 290,
</span><span class="cx">     RepatchIneffectiveWatchpoint                      = 300,
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorebytecodeCallLinkInfoh"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/bytecode/CallLinkInfo.h (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/bytecode/CallLinkInfo.h        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/bytecode/CallLinkInfo.h        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -26,6 +26,7 @@
</span><span class="cx"> #ifndef CallLinkInfo_h
</span><span class="cx"> #define CallLinkInfo_h
</span><span class="cx"> 
</span><ins>+#include &quot;CallFrameShuffleData.h&quot;
</ins><span class="cx"> #include &quot;CallMode.h&quot;
</span><span class="cx"> #include &quot;CodeLocation.h&quot;
</span><span class="cx"> #include &quot;CodeSpecializationKind.h&quot;
</span><span class="lines">@@ -329,6 +330,16 @@
</span><span class="cx"> 
</span><span class="cx">     void visitWeak(RepatchBuffer&amp;);
</span><span class="cx"> 
</span><ins>+    void setFrameShuffleData(const CallFrameShuffleData&amp; shuffleData)
+    {
+        m_frameShuffleData = std::make_unique&lt;CallFrameShuffleData&gt;(shuffleData);
+    }
+
+    const CallFrameShuffleData* frameShuffleData()
+    {
+        return m_frameShuffleData.get();
+    }
+
</ins><span class="cx"> private:
</span><span class="cx">     CodeLocationNearCall m_callReturnLocation;
</span><span class="cx">     CodeLocationDataLabelPtr m_hotPathBegin;
</span><span class="lines">@@ -337,6 +348,7 @@
</span><span class="cx">     WriteBarrier&lt;JSFunction&gt; m_lastSeenCallee;
</span><span class="cx">     RefPtr&lt;PolymorphicCallStubRoutine&gt; m_stub;
</span><span class="cx">     RefPtr&lt;JITStubRoutine&gt; m_slowStub;
</span><ins>+    std::unique_ptr&lt;CallFrameShuffleData&gt; m_frameShuffleData;
</ins><span class="cx">     unsigned m_registerPreservationMode : 1; // Real type is RegisterPreservationMode
</span><span class="cx">     bool m_hasSeenShouldRepatch : 1;
</span><span class="cx">     bool m_hasSeenClosure : 1;
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCoredfgDFGGenerationInfoh"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGGenerationInfo.h (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGGenerationInfo.h        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGGenerationInfo.h        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -378,6 +378,29 @@
</span><span class="cx">         return m_useCount;
</span><span class="cx">     }
</span><span class="cx"> 
</span><ins>+    ValueRecovery recovery(VirtualRegister spillSlot) const
+    {
+        if (m_isConstant)
+            return ValueRecovery::constant(m_node-&gt;constant()-&gt;value());
+
+        if (m_registerFormat == DataFormatDouble)
+            return ValueRecovery::inFPR(u.fpr, DataFormatDouble);
+
+#if USE(JSVALUE32_64)
+        if (m_registerFormat &amp; DataFormatJS) {
+            if (m_registerFormat == DataFormatJS)
+                return ValueRecovery::inPair(u.v.tagGPR, u.v.payloadGPR);
+            return ValueRecovery::inGPR(u.v.payloadGPR, static_cast&lt;DataFormat&gt;(m_registerFormat &amp; ~DataFormatJS));
+        }
+#endif
+        if (m_registerFormat)
+            return ValueRecovery::inGPR(u.gpr, m_registerFormat);
+
+        ASSERT(m_spillFormat);
+
+        return ValueRecovery::displacedInJSStack(spillSlot, m_spillFormat);
+    }
+
</ins><span class="cx"> private:
</span><span class="cx">     void appendBirth(VariableEventStream&amp; stream)
</span><span class="cx">     {
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCoredfgDFGSpeculativeJIT32_64cpp"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -30,6 +30,7 @@
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="cx"> 
</span><span class="cx"> #include &quot;ArrayPrototype.h&quot;
</span><ins>+#include &quot;CallFrameShuffler.h&quot;
</ins><span class="cx"> #include &quot;DFGAbstractInterpreterInlines.h&quot;
</span><span class="cx"> #include &quot;DFGCallArrayAllocatorSlowPathGenerator.h&quot;
</span><span class="cx"> #include &quot;DFGOperations.h&quot;
</span><span class="lines">@@ -703,6 +704,9 @@
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     Edge calleeEdge = m_jit.graph().child(node, 0);
</span><ins>+    GPRReg calleeTagGPR;
+    GPRReg calleePayloadGPR;
+    CallFrameShuffleData shuffleData;
</ins><span class="cx">     
</span><span class="cx">     // Gotta load the arguments somehow. Varargs is trickier.
</span><span class="cx">     if (isVarargs || isForwardVarargs) {
</span><span class="lines">@@ -794,33 +798,48 @@
</span><span class="cx">         // receiver (method call). subsequent children are the arguments.
</span><span class="cx">         int numPassedArgs = node-&gt;numChildren() - 1;
</span><span class="cx"> 
</span><del>-        m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(JSStack::ArgumentCount));
</del><ins>+        if (node-&gt;op() == TailCall) {
+            JSValueOperand callee(this, calleeEdge);
+            calleeTagGPR = callee.tagGPR();
+            calleePayloadGPR = callee.payloadGPR();
+            use(calleeEdge);
+
+            shuffleData.numLocals = m_jit.graph().frameRegisterCount();
+            shuffleData.callee = ValueRecovery::inPair(calleeTagGPR, calleePayloadGPR);
+            shuffleData.args.resize(numPassedArgs);
+
+            for (int i = 0; i &lt; numPassedArgs; ++i) {
+                Edge argEdge = m_jit.graph().varArgChild(node, i + 1);
+                GenerationInfo&amp; info = generationInfo(argEdge.node());
+                use(argEdge);
+                shuffleData.args[i] = info.recovery(argEdge-&gt;virtualRegister());
+            }
+        } else {
+            m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(JSStack::ArgumentCount));
</ins><span class="cx">         
</span><del>-        for (int i = 0; i &lt; numPassedArgs; i++) {
-            Edge argEdge = m_jit.graph().m_varArgChildren[node-&gt;firstChild() + 1 + i];
-            JSValueOperand arg(this, argEdge);
-            GPRReg argTagGPR = arg.tagGPR();
-            GPRReg argPayloadGPR = arg.payloadGPR();
-            use(argEdge);
</del><ins>+            for (int i = 0; i &lt; numPassedArgs; i++) {
+                Edge argEdge = m_jit.graph().m_varArgChildren[node-&gt;firstChild() + 1 + i];
+                JSValueOperand arg(this, argEdge);
+                GPRReg argTagGPR = arg.tagGPR();
+                GPRReg argPayloadGPR = arg.payloadGPR();
+                use(argEdge);
</ins><span class="cx">             
</span><del>-            m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i));
-            m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i));
</del><ins>+                m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i));
+                m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i));
+            }
</ins><span class="cx">         }
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    JSValueOperand callee(this, calleeEdge);
-    GPRReg calleeTagGPR = callee.tagGPR();
-    GPRReg calleePayloadGPR = callee.payloadGPR();
-    use(calleeEdge);
-    m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(JSStack::Callee));
-    m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(JSStack::Callee));
</del><ins>+    if (node-&gt;op() != TailCall) {
+        JSValueOperand callee(this, calleeEdge);
+        calleeTagGPR = callee.tagGPR();
+        calleePayloadGPR = callee.payloadGPR();
+        use(calleeEdge);
+        m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(JSStack::Callee));
+        m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(JSStack::Callee));
</ins><span class="cx"> 
</span><del>-    // FIXME: We should do an efficient move of the arguments into
-    // their target stack position instead of building then memmoving
-    // the callee frame.
-    // https://bugs.webkit.org/show_bug.cgi?id=147508
-    if (!isTail)
</del><span class="cx">         flushRegisters();
</span><ins>+    }
</ins><span class="cx"> 
</span><span class="cx">     GPRFlushedCallResult resultPayload(this);
</span><span class="cx">     GPRFlushedCallResult2 resultTag(this);
</span><span class="lines">@@ -840,16 +859,18 @@
</span><span class="cx">     
</span><span class="cx">     CallLinkInfo* info = m_jit.codeBlock()-&gt;addCallLinkInfo();
</span><span class="cx"> 
</span><del>-    slowPath.append(m_jit.branchIfNotCell(callee.jsValueRegs()));
</del><ins>+    if (isTail &amp;&amp; node-&gt;op() != TailCall)
+        m_jit.emitRestoreCalleeSaves();
+
+    slowPath.append(m_jit.branchIfNotCell(JSValueRegs(calleeTagGPR, calleePayloadGPR)));
</ins><span class="cx">     slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
</span><span class="cx"> 
</span><span class="cx">     if (isTail) {
</span><del>-        m_jit.emitRestoreCalleeSaves();
-        // FIXME: We should do an efficient move of the arguments into
-        // their target stack position instead of building then memmoving
-        // the callee frame.
-        // https://bugs.webkit.org/show_bug.cgi?id=147508
-        m_jit.prepareForTailCallSlow();
</del><ins>+        if (node-&gt;op() == TailCall) {
+            info-&gt;setFrameShuffleData(shuffleData);
+            CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
+        } else
+            m_jit.prepareForTailCallSlow();
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall();
</span><span class="lines">@@ -858,18 +879,29 @@
</span><span class="cx"> 
</span><span class="cx">     slowPath.link(&amp;m_jit);
</span><span class="cx"> 
</span><del>-    // Callee payload needs to be in regT0, tag in regT1
-    if (calleeTagGPR == GPRInfo::regT0) {
-        if (calleePayloadGPR == GPRInfo::regT1)
-            m_jit.swap(GPRInfo::regT1, GPRInfo::regT0);
-        else {
</del><ins>+    if (node-&gt;op() == TailCall) {
+        CallFrameShuffler callFrameShuffler(m_jit, shuffleData);
+        callFrameShuffler.setCalleeJSValueRegs(JSValueRegs(
+            GPRInfo::regT1, GPRInfo::regT0));
+        callFrameShuffler.prepareForSlowPath();
+    } else {
+        // Callee payload needs to be in regT0, tag in regT1
+        if (calleeTagGPR == GPRInfo::regT0) {
+            if (calleePayloadGPR == GPRInfo::regT1)
+                m_jit.swap(GPRInfo::regT1, GPRInfo::regT0);
+            else {
+                m_jit.move(calleeTagGPR, GPRInfo::regT1);
+                m_jit.move(calleePayloadGPR, GPRInfo::regT0);
+            }
+        } else {
+            m_jit.move(calleePayloadGPR, GPRInfo::regT0);
</ins><span class="cx">             m_jit.move(calleeTagGPR, GPRInfo::regT1);
</span><del>-            m_jit.move(calleePayloadGPR, GPRInfo::regT0);
</del><span class="cx">         }
</span><del>-    } else {
-        m_jit.move(calleePayloadGPR, GPRInfo::regT0);
-        m_jit.move(calleeTagGPR, GPRInfo::regT1);
</del><span class="cx">     }
</span><ins>+
+    if (isTail &amp;&amp; node-&gt;op() != TailCall)
+        m_jit.emitRestoreCalleeSaves();
+
</ins><span class="cx">     m_jit.move(MacroAssembler::TrustedImmPtr(info), GPRInfo::regT2);
</span><span class="cx">     JITCompiler::Call slowCall = m_jit.nearCall();
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCoredfgDFGSpeculativeJIT64cpp"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -29,6 +29,7 @@
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="cx"> 
</span><span class="cx"> #include &quot;ArrayPrototype.h&quot;
</span><ins>+#include &quot;CallFrameShuffler.h&quot;
</ins><span class="cx"> #include &quot;DFGAbstractInterpreterInlines.h&quot;
</span><span class="cx"> #include &quot;DFGCallArrayAllocatorSlowPathGenerator.h&quot;
</span><span class="cx"> #include &quot;DFGOperations.h&quot;
</span><span class="lines">@@ -688,7 +689,8 @@
</span><span class="cx">         break;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    Edge calleeEdge = m_jit.graph().child(node, 0);
</del><ins>+    GPRReg calleeGPR;
+    CallFrameShuffleData shuffleData;
</ins><span class="cx">     
</span><span class="cx">     // Gotta load the arguments somehow. Varargs is trickier.
</span><span class="cx">     if (isVarargs || isForwardVarargs) {
</span><span class="lines">@@ -775,33 +777,48 @@
</span><span class="cx">         // arguments.
</span><span class="cx">         int numPassedArgs = node-&gt;numChildren() - 1;
</span><span class="cx"> 
</span><del>-        m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), JITCompiler::calleeFramePayloadSlot(JSStack::ArgumentCount));
-    
-        for (int i = 0; i &lt; numPassedArgs; i++) {
-            Edge argEdge = m_jit.graph().m_varArgChildren[node-&gt;firstChild() + 1 + i];
-            JSValueOperand arg(this, argEdge);
-            GPRReg argGPR = arg.gpr();
-            use(argEdge);
-        
-            m_jit.store64(argGPR, JITCompiler::calleeArgumentSlot(i));
</del><ins>+        if (node-&gt;op() == TailCall) {
+            Edge calleeEdge = m_jit.graph().child(node, 0);
+            JSValueOperand callee(this, calleeEdge);
+            calleeGPR = callee.gpr();
+            callee.use();
+
+            shuffleData.numLocals = m_jit.graph().frameRegisterCount();
+            shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);
+            shuffleData.args.resize(numPassedArgs);
+            
+            for (int i = 0; i &lt; numPassedArgs; ++i) {
+                Edge argEdge = m_jit.graph().varArgChild(node, i + 1);
+                GenerationInfo&amp; info = generationInfo(argEdge.node());
+                use(argEdge);
+                shuffleData.args[i] = info.recovery(argEdge-&gt;virtualRegister());
+            }
+
+            shuffleData.setupCalleeSaveRegisters(m_jit.codeBlock());
+        } else {
+            m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), JITCompiler::calleeFramePayloadSlot(JSStack::ArgumentCount));
+
+            for (int i = 0; i &lt; numPassedArgs; i++) {
+                Edge argEdge = m_jit.graph().m_varArgChildren[node-&gt;firstChild() + 1 + i];
+                JSValueOperand arg(this, argEdge);
+                GPRReg argGPR = arg.gpr();
+                use(argEdge);
+
+                m_jit.store64(argGPR, JITCompiler::calleeArgumentSlot(i));
+            }
</ins><span class="cx">         }
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    JSValueOperand callee(this, calleeEdge);
-    GPRReg calleeGPR = callee.gpr();
-    callee.use();
-    m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(JSStack::Callee));
</del><ins>+    if (node-&gt;op() != TailCall) {
+        Edge calleeEdge = m_jit.graph().child(node, 0);
+        JSValueOperand callee(this, calleeEdge);
+        calleeGPR = callee.gpr();
+        callee.use();
+        m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(JSStack::Callee));
</ins><span class="cx"> 
</span><del>-    // FIXME: We should do an efficient move of the arguments into
-    // their target stack position instead of building then memmoving
-    // the callee frame.
-    // https://bugs.webkit.org/show_bug.cgi?id=147508
-    if (!isTail)
</del><span class="cx">         flushRegisters();
</span><ins>+    }
</ins><span class="cx"> 
</span><del>-    GPRFlushedCallResult result(this);
-    GPRReg resultGPR = result.gpr();
-
</del><span class="cx">     CodeOrigin staticOrigin = node-&gt;origin.semantic;
</span><span class="cx">     ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame-&gt;getCallerSkippingDeadFrames());
</span><span class="cx">     ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame &amp;&amp; staticOrigin.inlineCallFrame-&gt;getCallerSkippingDeadFrames()));
</span><span class="lines">@@ -812,16 +829,18 @@
</span><span class="cx">     
</span><span class="cx">     CallLinkInfo* callLinkInfo = m_jit.codeBlock()-&gt;addCallLinkInfo();
</span><span class="cx"> 
</span><ins>+    if (isTail &amp;&amp; node-&gt;op() != TailCall)
+        m_jit.emitRestoreCalleeSaves();
+
</ins><span class="cx">     JITCompiler::DataLabelPtr targetToCheck;
</span><span class="cx">     JITCompiler::Jump slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0));
</span><span class="cx"> 
</span><span class="cx">     if (isTail) {
</span><del>-        m_jit.emitRestoreCalleeSaves();
-        // FIXME: We should do an efficient move of the arguments into
-        // their target stack position instead of building then memmoving
-        // the callee frame.
-        // https://bugs.webkit.org/show_bug.cgi?id=147508
-        m_jit.prepareForTailCallSlow();
</del><ins>+        if (node-&gt;op() == TailCall) {
+            callLinkInfo-&gt;setFrameShuffleData(shuffleData);
+            CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
+        } else
+            m_jit.prepareForTailCallSlow();
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall();
</span><span class="lines">@@ -829,11 +848,14 @@
</span><span class="cx">     JITCompiler::Jump done = m_jit.jump();
</span><span class="cx">     
</span><span class="cx">     slowPath.link(&amp;m_jit);
</span><del>-    
-    if (isTail)
-        m_jit.emitRestoreCalleeSaves();
</del><span class="cx"> 
</span><del>-    m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0
</del><ins>+    if (node-&gt;op() == TailCall) {
+        CallFrameShuffler callFrameShuffler(m_jit, shuffleData);
+        callFrameShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
+        callFrameShuffler.prepareForSlowPath();
+    } else
+        m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0
+
</ins><span class="cx">     m_jit.move(MacroAssembler::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2
</span><span class="cx">     JITCompiler::Call slowCall = m_jit.nearCall();
</span><span class="cx"> 
</span><span class="lines">@@ -842,6 +864,8 @@
</span><span class="cx">     if (isTail)
</span><span class="cx">         m_jit.abortWithReason(JITDidReturnFromTailCall);
</span><span class="cx">     else {
</span><ins>+        GPRFlushedCallResult result(this);
+        GPRReg resultGPR = result.gpr();
</ins><span class="cx">         m_jit.move(GPRInfo::returnValueGPR, resultGPR);
</span><span class="cx"> 
</span><span class="cx">         jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitCallFrameShuffleDatacpp"></a>
<div class="addfile"><h4>Added: branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp (0 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp                                (rev 0)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -0,0 +1,73 @@
</span><ins>+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include &quot;config.h&quot;
+#include &quot;CallFrameShuffleData.h&quot;
+
+#if ENABLE(JIT)
+
+#include &quot;CCallHelpers.h&quot;
+#include &quot;CodeBlock.h&quot;
+
+namespace JSC {
+
+#if USE(JSVALUE64)
+
+void CallFrameShuffleData::setupCalleeSaveRegisters(CodeBlock* codeBlock)
+{
+    RegisterSet calleeSaveRegisters { RegisterSet::allVMCalleeSaveRegisters() };
+    RegisterAtOffsetList* registerSaveLocations = codeBlock-&gt;calleeSaveRegisters();
+
+    for (size_t i = 0; i &lt; registerSaveLocations-&gt;size(); ++i) {
+        RegisterAtOffset entry { registerSaveLocations-&gt;at(i) };
+        if (!calleeSaveRegisters.get(entry.reg()))
+            continue;
+
+        ASSERT(entry.reg().isGPR());
+
+        VirtualRegister saveSlot { entry.offsetAsIndex() };
+        registers[entry.reg()]
+            = ValueRecovery::displacedInJSStack(saveSlot, DataFormatJS);
+    }
+
+    for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+        if (!calleeSaveRegisters.get(reg))
+            continue;
+
+        if (registers[reg])
+            continue;
+
+        ASSERT(reg.isGPR());
+
+        registers[reg]
+            = ValueRecovery::inGPR(reg.gpr(), DataFormatJS);
+    }
+}
+
+#endif // USE(JSVALUE64)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
</ins></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitCallFrameShuffleDatah"></a>
<div class="addfile"><h4>Added: branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffleData.h (0 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffleData.h                                (rev 0)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffleData.h        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -0,0 +1,51 @@
</span><ins>+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef CallFrameShuffleData_h
+#define CallFrameShuffleData_h
+
+#if ENABLE(JIT)
+
+#include &quot;RegisterMap.h&quot;
+#include &quot;ValueRecovery.h&quot;
+
+namespace JSC {
+
+struct CallFrameShuffleData {
+    unsigned numLocals;
+    ValueRecovery callee;
+    Vector&lt;ValueRecovery&gt; args;
+#if USE(JSVALUE64)
+    RegisterMap&lt;ValueRecovery&gt; registers;
+
+    void setupCalleeSaveRegisters(CodeBlock*);
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // CallFrameShuffleData_h
</ins></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitCallFrameShufflercpp"></a>
<div class="addfile"><h4>Added: branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler.cpp (0 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler.cpp                                (rev 0)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler.cpp        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -0,0 +1,704 @@
</span><ins>+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include &quot;config.h&quot;
+#include &quot;CallFrameShuffler.h&quot;
+
+#if ENABLE(JIT)
+
+#include &quot;CCallHelpers.h&quot;
+#include &quot;CodeBlock.h&quot;
+
+namespace JSC {
+
+CallFrameShuffler::CallFrameShuffler(CCallHelpers&amp; jit, const CallFrameShuffleData&amp; data)
+    : m_jit(jit)
+    , m_oldFrame(data.numLocals + JSStack::CallerFrameAndPCSize, nullptr)
+    , m_newFrame(data.args.size() + JSStack::CallFrameHeaderSize, nullptr)
+    , m_alignedOldFrameSize(JSStack::CallFrameHeaderSize
+        + roundArgumentCountToAlignFrame(jit.codeBlock()-&gt;numParameters()))
+    , m_alignedNewFrameSize(JSStack::CallFrameHeaderSize
+        + roundArgumentCountToAlignFrame(data.args.size()))
+    , m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize)
+    , m_lockedRegisters(RegisterSet::allRegisters())
+{
+    // We are allowed all the usual registers...
+    for (unsigned i = GPRInfo::numberOfRegisters; i--; )
+        m_lockedRegisters.clear(GPRInfo::toRegister(i));
+    for (unsigned i = FPRInfo::numberOfRegisters; i--; )
+        m_lockedRegisters.clear(FPRInfo::toRegister(i));
+    // ... as well as the runtime registers.
+    m_lockedRegisters.exclude(RegisterSet::runtimeRegisters());
+
+    ASSERT(!data.callee.isInJSStack() || data.callee.virtualRegister().isLocal());
+    addNew(VirtualRegister(JSStack::Callee), data.callee);
+
+    for (size_t i = 0; i &lt; data.args.size(); ++i) {
+        ASSERT(!data.args[i].isInJSStack() || data.args[i].virtualRegister().isLocal());
+        addNew(virtualRegisterForArgument(i), data.args[i]);
+    }
+
+#if USE(JSVALUE64)
+    for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+        if (!data.registers[reg].isSet())
+            continue;
+
+        RELEASE_ASSERT(reg.isGPR());
+
+        addNew(JSValueRegs(reg.gpr()),
+            data.registers[reg]);
+    }
+#endif
+}
+
+void CallFrameShuffler::dump(PrintStream&amp; out) const
+{
+    static const char* delimiter             = &quot; +-------------------------------+ &quot;;
+    static const char* dangerDelimiter       = &quot; X-------------------------------X &quot;;
+    static const char* dangerBoundsDelimiter = &quot; XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX &quot;;
+    static const char* emptySpace            = &quot;                                   &quot;;
+    ASSERT(m_alignedNewFrameSize &lt;= numLocals());
+    out.print(&quot;          &quot;);
+    out.print(&quot;           Old frame               &quot;);
+    out.print(&quot;           New frame               &quot;);
+    out.print(&quot;\n&quot;);
+    for (int i = 0; i &lt; m_alignedOldFrameSize + numLocals() + 3; ++i) {
+        VirtualRegister old { m_alignedOldFrameSize - i - 1 };
+        VirtualRegister newReg { old + m_frameDelta };
+
+        if (!isValidOld(old) &amp;&amp; old != firstOld() - 1
+            &amp;&amp; !isValidNew(newReg) &amp;&amp; newReg != firstNew() - 1)
+            continue;
+
+        out.print(&quot;        &quot;);
+        if (dangerFrontier() &gt;= firstNew()
+            &amp;&amp; (newReg == dangerFrontier() || newReg == firstNew() - 1))
+            out.print(dangerBoundsDelimiter);
+        else if (isValidOld(old))
+            out.print(isValidNew(newReg) &amp;&amp; isDangerNew(newReg) ? dangerDelimiter : delimiter);
+        else if (old == firstOld() - 1)
+            out.print(delimiter);
+        else
+            out.print(emptySpace);
+        if (dangerFrontier() &gt;= firstNew()
+            &amp;&amp; (newReg == dangerFrontier() || newReg == firstNew() - 1))
+            out.print(dangerBoundsDelimiter);
+        else if (isValidNew(newReg) || newReg == firstNew() - 1)
+            out.print(isDangerNew(newReg) ? dangerDelimiter : delimiter);
+        else
+            out.print(emptySpace);
+        out.print(&quot;\n&quot;);
+        if (old == firstOld())
+            out.print(&quot; sp --&gt; &quot;);
+        else if (!old.offset())
+            out.print(&quot; fp --&gt; &quot;);
+        else
+            out.print(&quot;        &quot;);
+        if (isValidOld(old)) {
+            if (getOld(old)) {
+                auto str = toCString(old);
+                if (isValidNew(newReg) &amp;&amp; isDangerNew(newReg))
+                    out.printf(&quot; X      %18s       X &quot;, str.data());
+                else
+                    out.printf(&quot; |      %18s       | &quot;, str.data());
+            } else if (isValidNew(newReg) &amp;&amp; isDangerNew(newReg))
+                out.printf(&quot; X%30s X &quot;, &quot;&quot;);
+            else
+                out.printf(&quot; |%30s | &quot;, &quot;&quot;);
+        } else
+            out.print(emptySpace);
+        if (isValidNew(newReg)) {
+            const char d = isDangerNew(newReg) ? 'X' : '|';
+            auto str = toCString(newReg);
+            if (getNew(newReg)) {
+                if (getNew(newReg)-&gt;recovery().isConstant())
+                    out.printf(&quot; %c%8s &lt;-           constant %c &quot;, d, str.data(), d);
+                else {
+                    auto recoveryStr = toCString(getNew(newReg)-&gt;recovery());
+                    out.printf(&quot; %c%8s &lt;- %18s %c &quot;, d, str.data(),
+                        recoveryStr.data(), d);
+                }
+            } else if (newReg == VirtualRegister { JSStack::ArgumentCount })
+                out.printf(&quot; %c%8s &lt;- %18zu %c &quot;, d, str.data(), argCount(), d);
+            else
+                out.printf(&quot; %c%30s %c &quot;, d, &quot;&quot;, d);
+        } else
+            out.print(emptySpace);
+        if (newReg == firstNew() - m_newFrameOffset &amp;&amp; !isSlowPath())
+            out.print(&quot; &lt;-- new sp before jump (current &quot;, m_newFrameBase, &quot;) &quot;);
+        if (newReg == firstNew())
+            out.print(&quot; &lt;-- new fp after prologue&quot;);
+        out.print(&quot;\n&quot;);
+    }
+    out.print(&quot;          &quot;);
+    out.print(&quot;        Live registers             &quot;);
+    out.print(&quot;        Wanted registers           &quot;);
+    out.print(&quot;\n&quot;);
+    for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+        if (!reg.isGPR())
+            continue;
+
+        CachedRecovery* oldCachedRecovery { m_registers[reg.gpr()] };
+        CachedRecovery* newCachedRecovery { m_newRegisters[reg.gpr()] };
+        if (!oldCachedRecovery &amp;&amp; !newCachedRecovery)
+            continue;
+        out.print(&quot;          &quot;);
+        if (oldCachedRecovery) {
+            auto str = toCString(reg.gpr());
+            out.printf(&quot;         %8s                  &quot;, str.data());
+        } else
+            out.print(emptySpace);
+#if USE(JSVALUE32_64)
+        if (newCachedRecovery) {
+            JSValueRegs wantedJSValueRegs { newCachedRecovery-&gt;wantedJSValueRegs() };
+            if (reg.gpr() == wantedJSValueRegs.tagGPR())
+                out.print(reg.gpr(), &quot; &lt;- tag(&quot;, newCachedRecovery-&gt;recovery(), &quot;)&quot;);
+            else
+                out.print(reg.gpr(), &quot; &lt;- payload(&quot;, newCachedRecovery-&gt;recovery(), &quot;)&quot;);
+        }
+#else
+        if (newCachedRecovery)
+            out.print(&quot;         &quot;, reg.gpr(), &quot; &lt;- &quot;, newCachedRecovery-&gt;recovery());
+#endif
+        out.print(&quot;\n&quot;);
+    }
+    out.print(&quot;  Locked registers: &quot;);
+    bool firstLocked { true };
+    for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+        if (!reg.isGPR())
+            continue;
+
+        if (m_lockedRegisters.get(reg.gpr())) {
+            out.print(firstLocked ? &quot;&quot; : &quot;, &quot;, reg.gpr());
+            firstLocked = false;
+        }
+    }
+    out.print(&quot;\n&quot;);
+
+    if (isSlowPath())
+        out.print(&quot;  Using fp-relative addressing for slow path call\n&quot;);
+    else
+        out.print(&quot;  Using sp-relative addressing for jump (using &quot;, m_newFrameBase, &quot; as new sp)\n&quot;);
+    if (m_oldFrameOffset)
+        out.print(&quot;   Old frame offset is &quot;, m_oldFrameOffset, &quot;\n&quot;);
+    if (m_newFrameOffset)
+        out.print(&quot;   New frame offset is &quot;, m_newFrameOffset, &quot;\n&quot;);
+}
+
+CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery)
+{
+    ASSERT(!recovery.isConstant());
+    if (recovery.isInGPR())
+        return m_registers[recovery.gpr()];
+    if (recovery.isInFPR())
+        return m_registers[recovery.fpr()];
+#if USE(JSVALUE32_64)
+    if (recovery.technique() == InPair) {
+        ASSERT(m_registers[recovery.tagGPR()] == m_registers[recovery.payloadGPR()]);
+        return m_registers[recovery.payloadGPR()];
+    }
+#endif
+    ASSERT(recovery.isInJSStack());
+    return getOld(recovery.virtualRegister());
+}
+
+CachedRecovery* CallFrameShuffler::setCachedRecovery(ValueRecovery recovery, CachedRecovery* cachedRecovery)
+{
+    ASSERT(!recovery.isConstant());
+    if (recovery.isInGPR())
+        return m_registers[recovery.gpr()] = cachedRecovery;
+    if (recovery.isInFPR())
+        return m_registers[recovery.fpr()] = cachedRecovery;
+#if USE(JSVALUE32_64)
+    if (recovery.technique() == InPair) {
+        m_registers[recovery.tagGPR()] = cachedRecovery;
+        return m_registers[recovery.payloadGPR()] = cachedRecovery;
+    }
+#endif
+    ASSERT(recovery.isInJSStack());
+    setOld(recovery.virtualRegister(), cachedRecovery);
+    return cachedRecovery;
+}
+
+void CallFrameShuffler::spill(CachedRecovery&amp; cachedRecovery)
+{
+    ASSERT(!isSlowPath());
+    ASSERT(cachedRecovery.recovery().isInRegisters());
+
+    VirtualRegister spillSlot { 0 };
+    for (VirtualRegister slot = firstOld(); slot &lt;= lastOld(); slot -= 1) {
+        ASSERT(slot &lt; newAsOld(firstNew()));
+        if (getOld(slot))
+            continue;
+
+        spillSlot = slot;
+        break;
+    }
+    // We must have enough slots to be able to fit the whole
+    // callee's frame for the slow path.
+    RELEASE_ASSERT(spillSlot.isLocal());
+
+    if (verbose)
+        dataLog(&quot;   * Spilling &quot;, cachedRecovery.recovery(), &quot; into &quot;, spillSlot, &quot;\n&quot;);
+    auto format = emitStore(cachedRecovery, addressForOld(spillSlot));
+    ASSERT(format != DataFormatNone);
+    updateRecovery(cachedRecovery, ValueRecovery::displacedInJSStack(spillSlot, format));
+}
+
+void CallFrameShuffler::emitDeltaCheck()
+{
+    if (!ASSERT_DISABLED)
+        return;
+
+    GPRReg scratchGPR { getFreeGPR() };
+    if (scratchGPR != InvalidGPRReg) {
+        if (verbose)
+            dataLog(&quot;  Using &quot;, scratchGPR, &quot; for the fp-sp delta check\n&quot;);
+        m_jit.move(GPRInfo::callFrameRegister, scratchGPR);
+        m_jit.subPtr(MacroAssembler::stackPointerRegister, scratchGPR);
+        MacroAssembler::Jump ok = m_jit.branch32(
+            MacroAssembler::Equal, scratchGPR,
+            MacroAssembler::TrustedImm32(
+                static_cast&lt;size_t&gt;(sizeof(Register) * numLocals())));
+        m_jit.abortWithReason(JITUnexpectedCallFrameSize);
+        ok.link(&amp;m_jit);
+    } else if (verbose)
+        dataLog(&quot;  Skipping the fp-sp delta check since there is too much pressure&quot;);
+}
+
+void CallFrameShuffler::prepareForSlowPath()
+{
+    ASSERT(isUndecided());
+    emitDeltaCheck();
+
+    m_frameDelta = numLocals() + JSStack::CallerFrameAndPCSize;
+    m_newFrameBase = MacroAssembler::stackPointerRegister;
+    m_newFrameOffset = -JSStack::CallerFrameAndPCSize;
+
+    if (verbose)
+        dataLog(&quot;\n\nPreparing frame for slow path call:\n&quot;, *this);
+
+    prepareAny();
+
+    if (verbose)
+        dataLog(&quot;Ready for slow path call!\n&quot;);
+}
+
+void CallFrameShuffler::prepareForTailCall()
+{
+    ASSERT(isUndecided());
+    emitDeltaCheck();
+
+    // We'll use sp-based indexing so that we can load the
+    // caller's frame pointer into the fpr immediately
+    m_oldFrameBase = MacroAssembler::stackPointerRegister;
+    m_oldFrameOffset = numLocals();
+    m_newFrameBase = acquireGPR();
+#if CPU(X86)
+    // We load the frame pointer manually, but we need to ask the
+    // algorithm to move the return PC for us (it'd probably
+    // require a write to the danger zone). Since it'd be awkward
+    // to ask for half a value move, we ask that the whole thing
+    // be moved for us.
+    addNew(VirtualRegister { 0 },
+        ValueRecovery::displacedInJSStack(VirtualRegister(0), DataFormatJS));
+
+    // sp will point to head0 and we will move it up half a slot
+    // manually
+    m_newFrameOffset = 0;
+#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
+    // We load the the frame pointer and link register
+    // manually. We could ask the algorithm to load them for us,
+    // and it would allow us to use the link register as an extra
+    // temporary - but it'd mean that the frame pointer can also
+    // be used as an extra temporary, so we keep the link register
+    // locked instead.
+
+    // sp will point to head1 since the callee's prologue pushes
+    // the call frame and link register.
+    m_newFrameOffset = -1;
+#elif CPU(ARM64)
+    // We load the frame pointer and link register manually. We
+    // could ask the algorithm to load the link register for us
+    // (which would allow for its use as an extra temporary), but
+    // since its not in GPRInfo, we can't do it.
+
+    // sp will point to head2 since the callee's prologue pushes the
+    // call frame and link register
+    m_newFrameOffset = -2;
+#elif CPU(X86_64)
+    // We load the frame pointer manually, but we ask the
+    // algorithm to move the return PC for us (it'd probably
+    // require a write in the danger zone)
+    addNew(VirtualRegister { 1 },
+        ValueRecovery::displacedInJSStack(VirtualRegister(1), DataFormatJS));
+
+    // sp will point to head1 since the callee's prologue pushes
+    // the call frame register
+    m_newFrameOffset = -1;
+#else
+    UNREACHABLE_FOR_PLATFORM();
+#endif
+
+    if (verbose)
+        dataLog(&quot;  Emitting code for computing the new frame base\n&quot;);
+
+    // We compute the new frame base by first computing the top of the
+    // old frame (taking into account an argument count higher than
+    // the number of parameters), then substracting to it the aligned
+    // new frame size (adjusted).
+    m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, JSStack::ArgumentCount * static_cast&lt;int&gt;(sizeof(Register)) + PayloadOffset), m_newFrameBase);
+    MacroAssembler::Jump argumentCountOK =
+        m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase,
+            MacroAssembler::TrustedImm32(m_jit.codeBlock()-&gt;numParameters()));
+    m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + JSStack::CallFrameHeaderSize), m_newFrameBase);
+    m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase);
+    m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase);
+    MacroAssembler::Jump done = m_jit.jump();
+    argumentCountOK.link(&amp;m_jit);
+    m_jit.move(
+        MacroAssembler::TrustedImm32(m_alignedOldFrameSize * sizeof(Register)),
+        m_newFrameBase);
+    done.link(&amp;m_jit);
+
+    m_jit.addPtr(GPRInfo::callFrameRegister, m_newFrameBase);
+    m_jit.subPtr(
+        MacroAssembler::TrustedImm32(
+            (m_alignedNewFrameSize + m_newFrameOffset) * sizeof(Register)), 
+        m_newFrameBase);
+
+    // We want the frame pointer to always point to a valid frame, and
+    // we are going to trash the current one. Let's make it point to
+    // our caller's frame, since that's what we want to end up with.
+    m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister),
+        MacroAssembler::framePointerRegister);
+
+    // We load the link register manually for architectures that have one
+#if CPU(ARM) || CPU(SH4) || CPU(ARM64)
+    m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
+        MacroAssembler::linkRegister);
+#elif CPU(MIPS)
+    m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
+        MacroAssembler::returnAddressRegister);
+#endif
+
+    if (verbose)
+        dataLog(&quot;Preparing frame for tail call:\n&quot;, *this);
+
+    prepareAny();
+
+#if CPU(X86)
+    if (verbose)
+        dataLog(&quot;  Simulating pop of the call frame register\n&quot;);
+    m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*)), MacroAssembler::stackPointerRegister);
+#endif
+
+    if (verbose)
+        dataLog(&quot;Ready for tail call!\n&quot;);
+}
+
+bool CallFrameShuffler::tryWrites(CachedRecovery&amp; cachedRecovery)
+{
+    ASSERT(m_newFrameBase != InvalidGPRReg);
+
+    // If the value is already set up correctly, we don't have
+    // anything to do.
+    if (isSlowPath() &amp;&amp; cachedRecovery.recovery().isInJSStack()
+        &amp;&amp; cachedRecovery.targets().size() == 1
+        &amp;&amp; (newAsOld(cachedRecovery.targets()[0])
+            == cachedRecovery.recovery().virtualRegister())) {
+        cachedRecovery.clearTargets();
+        if (!cachedRecovery.wantedJSValueRegs())
+            clearCachedRecovery(cachedRecovery.recovery());
+        return true;
+    }
+
+    if (!canLoadAndBox(cachedRecovery))
+        return false;
+
+    emitLoad(cachedRecovery);
+    emitBox(cachedRecovery);
+    ASSERT(cachedRecovery.recovery().isInRegisters()
+        || cachedRecovery.recovery().isConstant());
+
+    if (verbose)
+        dataLog(&quot;   * Storing &quot;, cachedRecovery.recovery());
+    for (size_t i = 0; i &lt; cachedRecovery.targets().size(); ++i) {
+        VirtualRegister target { cachedRecovery.targets()[i] };
+        ASSERT(!isDangerNew(target));
+        if (verbose)
+            dataLog(!i ? &quot; into &quot; : &quot;, and &quot;, &quot;NEW &quot;, target);
+        emitStore(cachedRecovery, addressForNew(target));
+        setNew(target, nullptr);
+    }
+    if (verbose)
+        dataLog(&quot;\n&quot;);
+    cachedRecovery.clearTargets();
+    if (!cachedRecovery.wantedJSValueRegs())
+        clearCachedRecovery(cachedRecovery.recovery());
+
+    return true;
+}
+
+bool CallFrameShuffler::performSafeWrites()
+{
+    VirtualRegister firstSafe;
+    VirtualRegister end { lastNew() + 1 };
+    Vector&lt;VirtualRegister&gt; failures;
+
+    // For all cachedRecoveries that writes to the safe zone, if it
+    // doesn't also write to the danger zone, we try to perform
+    // the writes. This may free up danger slots, so we iterate
+    // again until it doesn't happen anymore.
+    //
+    // Note that even though we have a while block, we look at
+    // each slot of the new call frame at most once since in each
+    // iteration beyond the first, we only load up the portion of
+    // the new call frame that was dangerous and became safe due
+    // to the previous iteration.
+    do {
+        firstSafe = dangerFrontier() + 1;
+        if (verbose)
+            dataLog(&quot;  Trying safe writes (between NEW &quot;, firstSafe, &quot; and NEW &quot;, end - 1, &quot;)\n&quot;);
+        bool didProgress = false;
+        for (VirtualRegister reg = firstSafe; reg &lt; end; reg += 1) {
+            CachedRecovery* cachedRecovery = getNew(reg);
+            if (!cachedRecovery) {
+                if (verbose)
+                    dataLog(&quot;   + &quot;, reg, &quot; is OK.\n&quot;);
+                continue;
+            }
+            if (!hasOnlySafeWrites(*cachedRecovery)) {
+                if (verbose) {
+                    dataLog(&quot;   - &quot;, cachedRecovery-&gt;recovery(), &quot; writes to NEW &quot;, reg,
+                        &quot; but also has dangerous writes.\n&quot;);
+                }
+                continue;
+            }
+            if (cachedRecovery-&gt;wantedJSValueRegs()) {
+                if (verbose) {
+                    dataLog(&quot;   - &quot;, cachedRecovery-&gt;recovery(), &quot; writes to NEW &quot;, reg,
+                        &quot; but is also needed in registers.\n&quot;);
+                }
+                continue;
+            }
+            if (!tryWrites(*cachedRecovery)) {
+                if (verbose)
+                    dataLog(&quot;   - Unable to write to NEW &quot;, reg, &quot; from &quot;, cachedRecovery-&gt;recovery(), &quot;\n&quot;);
+                failures.append(reg);
+            }
+            didProgress = true;
+        }
+        end = firstSafe;
+
+        // If we have cachedRecoveries that failed to write, it is
+        // because they are on the stack and we didn't have enough
+        // registers available at the time to load them into. If
+        // we have a free register, we should try again because it
+        // could free up some danger slots.
+        if (didProgress &amp;&amp; hasFreeRegister()) {
+            Vector&lt;VirtualRegister&gt; stillFailing;
+            for (VirtualRegister failed : failures) {
+                CachedRecovery* cachedRecovery = getNew(failed);
+                // It could have been handled later if it had
+                // several targets
+                if (!cachedRecovery)
+                    continue;
+
+                ASSERT(hasOnlySafeWrites(*cachedRecovery)
+                    &amp;&amp; !cachedRecovery-&gt;wantedJSValueRegs());
+                if (!tryWrites(*cachedRecovery))
+                    stillFailing.append(failed);
+            }
+            failures = WTF::move(stillFailing);
+        }
+        if (verbose &amp;&amp; firstSafe != dangerFrontier() + 1)
+            dataLog(&quot;  We freed up danger slots!\n&quot;);
+    } while (firstSafe != dangerFrontier() + 1);
+
+    return failures.isEmpty();
+}
+
+void CallFrameShuffler::prepareAny()
+{
+    ASSERT(!isUndecided());
+
+    updateDangerFrontier();
+
+    // First, we try to store any value that goes above the danger
+    // frontier. This will never use more registers since we are only
+    // loading+storing if we ensure that any register used for the load
+    // will be freed up after the stores (i.e., all stores are above
+    // the danger frontier, and there is no wanted register).
+    performSafeWrites();
+
+    // At this point, we couldn't have more available registers than
+    // we have withouth spilling: all values currently in registers
+    // either require a write to the danger zone, or have a wanted
+    // register, which means that in any case they will have to go
+    // through registers again.
+
+    // We now slowly free up the danger zone by first loading the old
+    // value on the danger frontier, spilling as many registers as
+    // needed to do so and ensuring that the corresponding slot in the
+    // new frame is now ready to be written. Then, we store the old
+    // value to its target location if possible (we could have failed
+    // to load it previously due to high pressure). Finally, we write
+    // to any of the newly safe slots that we can, which could free up
+    // registers (hence why we do it eagerly).
+    for (VirtualRegister reg = dangerFrontier(); reg &gt;= firstNew(); reg -= 1) {
+        if (reg == dangerFrontier()) {
+            if (verbose)
+                dataLog(&quot;  Next slot (NEW &quot;, reg, &quot;) is the danger frontier\n&quot;);
+            CachedRecovery* cachedRecovery { getOld(newAsOld(dangerFrontier())) };
+            ASSERT(cachedRecovery);
+            ensureLoad(*cachedRecovery);
+            emitLoad(*cachedRecovery);
+            ensureBox(*cachedRecovery);
+            emitBox(*cachedRecovery);
+            if (hasOnlySafeWrites(*cachedRecovery))
+                tryWrites(*cachedRecovery);
+        } else if (verbose)
+            dataLog(&quot;  Next slot is NEW &quot;, reg, &quot;\n&quot;);
+
+        ASSERT(!isDangerNew(reg));
+        CachedRecovery* cachedRecovery = getNew(reg);
+        // This could be one of the header slots we don't care about.
+        if (!cachedRecovery) {
+            if (verbose)
+                dataLog(&quot;   + &quot;, reg, &quot; is OK\n&quot;);
+            continue;
+        }
+
+        if (canLoadAndBox(*cachedRecovery) &amp;&amp; hasOnlySafeWrites(*cachedRecovery)
+            &amp;&amp; !cachedRecovery-&gt;wantedJSValueRegs()) {
+            emitLoad(*cachedRecovery);
+            emitBox(*cachedRecovery);
+            bool writesOK = tryWrites(*cachedRecovery);
+            ASSERT(writesOK);
+        } else if (verbose)
+            dataLog(&quot;   - &quot;, cachedRecovery-&gt;recovery(), &quot; can't be handled just yet.\n&quot;);
+    }
+    ASSERT(dangerFrontier() &lt; firstNew());
+
+    // Now, the danger zone is empty, but we still have a couple of
+    // things to do:
+    //
+    // 1) There could be remaining safe writes that failed earlier due
+    //    to high register pressure and had nothing to do with the
+    //    danger zone whatsoever.
+    //
+    // 2) Some wanted registers could have to be loaded (this could
+    //    happen either when making a call to a new function with a
+    //    lower number of arguments - since above here, we only load
+    //    wanted registers when they are at the danger frontier -, or
+    //    if a wanted register got spilled).
+    //
+    // 3) Some wanted registers could have been loaded in the wrong
+    //    registers
+    //
+    // 4) We have to take care of some bookkeeping - namely, storing
+    //    the argument count and updating the stack pointer.
+
+    // At this point, we must have enough registers available for
+    // handling 1). None of the loads can fail because we have been
+    // eagerly freeing up registers in all the previous phases - so
+    // the only values that are in registers at this point must have
+    // wanted registers.
+    if (verbose)
+        dataLog(&quot;  Danger zone is clear, performing remaining writes.\n&quot;);
+    for (VirtualRegister reg = firstNew(); reg &lt;= lastNew(); reg += 1) {
+        CachedRecovery* cachedRecovery { getNew(reg) };
+        if (!cachedRecovery)
+            continue;
+
+        emitLoad(*cachedRecovery);
+        emitBox(*cachedRecovery);
+        bool writesOK = tryWrites(*cachedRecovery);
+        ASSERT(writesOK);
+    }
+
+    // Handle 2) by loading all registers. We don't have to do any
+    // writes, since they have been taken care of above.
+    if (verbose)
+        dataLog(&quot;  Loading wanted GPRs into registers\n&quot;);
+    for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+        CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+        if (!cachedRecovery)
+            continue;
+        ASSERT(reg.isGPR());
+
+        emitLoad(*cachedRecovery);
+        emitBox(*cachedRecovery);
+        ASSERT(cachedRecovery-&gt;targets().isEmpty());
+    }
+
+    // At this point, we have read everything we cared about from the
+    // stack, and written everything we had to to the stack.
+    if (verbose)
+        dataLog(&quot;  Callee frame is fully set up\n&quot;);
+    if (!ASSERT_DISABLED) {
+        for (VirtualRegister reg = firstNew(); reg &lt;= lastNew(); reg += 1)
+            ASSERT_UNUSED(reg, !getNew(reg));
+
+        for (CachedRecovery* cachedRecovery : m_cachedRecoveries) {
+            ASSERT_UNUSED(cachedRecovery, cachedRecovery-&gt;targets().isEmpty());
+            ASSERT(!cachedRecovery-&gt;recovery().isInJSStack());
+        }
+    }
+
+    // We need to handle 4) first because it implies releasing
+    // m_newFrameBase, which could be a wanted register.
+    if (verbose)
+        dataLog(&quot;   * Storing the argument count into &quot;, VirtualRegister { JSStack::ArgumentCount }, &quot;\n&quot;);
+    m_jit.store32(MacroAssembler::TrustedImm32(0),
+        addressForNew(VirtualRegister { JSStack::ArgumentCount }).withOffset(TagOffset));
+    m_jit.store32(MacroAssembler::TrustedImm32(argCount()),
+        addressForNew(VirtualRegister { JSStack::ArgumentCount }).withOffset(PayloadOffset));
+
+    if (!isSlowPath()) {
+        ASSERT(m_newFrameBase != MacroAssembler::stackPointerRegister);
+        if (verbose)
+            dataLog(&quot;  Releasing the new frame base pointer\n&quot;);
+        m_jit.move(m_newFrameBase, MacroAssembler::stackPointerRegister);
+        releaseGPR(m_newFrameBase);
+    }
+
+    // Finally we handle 3)
+    if (verbose)
+        dataLog(&quot;  Ensuring wanted registers are in the right register\n&quot;);
+    for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+        CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+        if (!cachedRecovery)
+            continue;
+        ASSERT(reg.isGPR());
+
+        emitDisplace(*cachedRecovery);
+    }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
</ins></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitCallFrameShufflerh"></a>
<div class="addfile"><h4>Added: branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler.h (0 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler.h                                (rev 0)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler.h        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -0,0 +1,809 @@
</span><ins>+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef CallFrameShuffler_h
+#define CallFrameShuffler_h
+
+#if ENABLE(JIT)
+
+#include &quot;CallFrameShuffleData.h&quot;
+#include &quot;MacroAssembler.h&quot;
+#include &quot;RegisterSet.h&quot;
+#include &quot;StackAlignment.h&quot;
+#include &quot;ValueRecovery.h&quot;
+#include &quot;VirtualRegister.h&quot;
+#include &lt;wtf/Vector.h&gt;
+
+namespace JSC {
+
+// A CachedRecovery is a wrapper around a ValueRecovery that records where said
+// value should go on the stack and/or in registers. Whenever we perform an
+// operation changing the ValueRecovery, we update the CachedRecovery's member
+// in place.
+class CachedRecovery {
+public:
+    CachedRecovery(ValueRecovery recovery)
+        : m_recovery { recovery }
+    {
+    }
+
+    CachedRecovery(CachedRecovery&amp;) = delete;
+    CachedRecovery(CachedRecovery&amp;&amp;) = delete;
+    CachedRecovery&amp; operator=(CachedRecovery&amp;) = delete;
+    CachedRecovery&amp; operator=(CachedRecovery&amp;&amp;) = delete;
+
+    const Vector&lt;VirtualRegister, 1&gt;&amp; targets() const { return m_targets; }
+
+    void addTarget(VirtualRegister reg)
+    {
+        ASSERT(m_targets.isEmpty() || m_targets.last() &lt; reg);
+        m_targets.append(reg);
+    }
+
+    void removeTarget(VirtualRegister reg)
+    {
+        ASSERT(m_targets.last() == reg);
+        m_targets.shrink(m_targets.size() - 1);
+    }
+
+    void clearTargets()
+    {
+        m_targets.clear();
+    }
+
+    void setWantedJSValueRegs(JSValueRegs jsValueRegs)
+    {
+        m_wantedJSValueRegs = jsValueRegs;
+    }
+
+    // Determine whether converting this recovery into a JSValue will
+    // require additional GPRs and/or FPRs.
+    // This is guaranteed to only depend on the DataFormat, and the
+    // result of these calls will stay valid after loads and/or stores.
+    bool boxingRequiresGPR() const
+    {
+#if USE(JSVALUE64)
+        return recovery().dataFormat() == DataFormatDouble;
+#else
+        return false;
+#endif
+    }
+    bool boxingRequiresFPR() const
+    {
+#if USE(JSVALUE64)
+        switch (recovery().dataFormat()) {
+        case DataFormatInt52:
+        case DataFormatStrictInt52:
+            return true;
+
+        default:
+            return false;
+        }
+#else
+        return false;
+#endif
+    }
+    
+    // This is used to determine what kind of register we need to be
+    // able to load a recovery. We only use it when a direct load is
+    // currently impossible, to determine whether we should spill a
+    // GPR or an FPR for loading this value.
+    bool loadsIntoGPR() const;
+    bool loadsIntoFPR() const;
+
+    ValueRecovery recovery() const { return m_recovery; }
+
+    void setRecovery(ValueRecovery recovery) { m_recovery = recovery; }
+
+    JSValueRegs wantedJSValueRegs() const { return m_wantedJSValueRegs; }
+private:
+    ValueRecovery m_recovery;
+    JSValueRegs m_wantedJSValueRegs;
+    Vector&lt;VirtualRegister, 1&gt; m_targets;
+};
+
+class CallFrameShuffler {
+public:
+    CallFrameShuffler(CCallHelpers&amp;, const CallFrameShuffleData&amp;);
+
+    void dump(PrintStream&amp;) const;
+
+    // Any register that has been locked or acquired must be released
+    // before calling prepareForTailCall() or prepareForSlowPath().
+    void lockGPR(GPRReg gpr)
+    {
+        ASSERT(!m_lockedRegisters.get(gpr));
+        m_lockedRegisters.set(gpr);
+        if (verbose)
+            dataLog(&quot;   * Locking &quot;, gpr, &quot;\n&quot;);
+    }
+
+    GPRReg acquireGPR()
+    {
+        ensureGPR();
+        GPRReg gpr { getFreeGPR() };
+        ASSERT(!m_registers[gpr]);
+        lockGPR(gpr);
+        return gpr;
+    }
+
+    void releaseGPR(GPRReg gpr)
+    {
+        if (verbose) {
+            if (m_lockedRegisters.get(gpr))
+                dataLog(&quot;   * Releasing &quot;, gpr, &quot;\n&quot;);
+            else
+                dataLog(&quot;   * &quot;, gpr, &quot; was not locked\n&quot;);
+        }
+        m_lockedRegisters.clear(gpr);
+    }
+
+    // You can only take a snapshot if the recovery has not started
+    // yet. The only operations that are valid before taking a
+    // snapshot are lockGPR(), acquireGPR() and releaseGPR().
+    //
+    // Locking status is *NOT* preserved by the snapshot: it only
+    // contains information about where the
+    // arguments/callee/callee-save registers are by taking into
+    // account any spilling that acquireGPR() could have done.
+    CallFrameShuffleData snapshot() const
+    {
+        ASSERT(isUndecided());
+
+        CallFrameShuffleData data;
+        data.numLocals = numLocals();
+        data.callee = getNew(VirtualRegister { JSStack::Callee })-&gt;recovery();
+        data.args.resize(argCount());
+        for (size_t i = 0; i &lt; argCount(); ++i)
+            data.args[i] = getNew(virtualRegisterForArgument(i))-&gt;recovery();
+        for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+            CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+            if (!cachedRecovery)
+                continue;
+
+#if USE(JSVALUE64)
+            data.registers[reg] = cachedRecovery-&gt;recovery();
+#else
+            RELEASE_ASSERT_NOT_REACHED();
+#endif
+        }
+        return WTF::move(data);
+    }
+
+    // Ask the shuffler to put the callee into some registers once the
+    // shuffling is done. You should call this before any of the
+    // prepare() methods, and must not take a snapshot afterwards, as
+    // this would crash 32bits platforms.
+    void setCalleeJSValueRegs(JSValueRegs jsValueRegs)
+    {
+        ASSERT(isUndecided());
+        ASSERT(!getNew(jsValueRegs));
+        CachedRecovery* cachedRecovery { getNew(VirtualRegister(JSStack::Callee)) };
+        ASSERT(cachedRecovery);
+        addNew(jsValueRegs, cachedRecovery-&gt;recovery());
+    }
+
+    // Ask the suhffler to assume the callee has already be checked to
+    // be a cell. This is a no-op on 64bit platforms, but allows to
+    // free up a GPR on 32bit platforms.
+    // You obviously must have ensured that this is the case before
+    // running any of the prepare methods.
+    void assumeCalleeIsCell()
+    {
+#if USE(JSVALUE32_64)
+        CachedRecovery&amp; calleeCachedRecovery { *getNew(VirtualRegister(JSStack::Callee)) };
+        switch (calleeCachedRecovery.recovery().technique()) {
+        case InPair:
+            updateRecovery(
+                calleeCachedRecovery,
+                ValueRecovery::inGPR(
+                    calleeCachedRecovery.recovery().payloadGPR(),
+                    DataFormatCell));
+            break;
+        case DisplacedInJSStack:
+            updateRecovery(
+                calleeCachedRecovery,
+                ValueRecovery::displacedInJSStack(
+                    calleeCachedRecovery.recovery().virtualRegister(),
+                    DataFormatCell));
+            break;
+        case InFPR:
+        case UnboxedCellInGPR:
+        case CellDisplacedInJSStack:
+            break;
+        case Constant:
+            ASSERT(calleeCachedRecovery.recovery().constant().isCell());
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+#endif
+    }
+
+    // This will emit code to build the new frame over the old one.
+    void prepareForTailCall();
+
+    // This will emit code to build the new frame as if performing a
+    // regular call. However, the callee save registers will be
+    // restored, and any locals (not the header or arguments) of the
+    // current frame can be overwritten.
+    //
+    // A frame built using prepareForSlowPath() should be used either
+    // to throw an exception in, or destroyed using
+    // CCallHelpers::prepareForTailCallSlow() followed by a tail call.
+    void prepareForSlowPath();
+
+private:
+    static const bool verbose = false;
+
+    CCallHelpers&amp; m_jit;
+
+    void prepareAny();
+
+    void spill(CachedRecovery&amp;);
+
+    // &quot;box&quot; is arguably a bad name here. The meaning is that after
+    // calling emitBox(), your ensure that subsequently calling
+    // emitStore() will be able to store the value without additional
+    // transformation. In particular, this is a no-op for constants,
+    // and is a complete no-op on 32bits since any unboxed value can
+    // still be stored by storing the payload and a statically known
+    // tag.
+    void emitBox(CachedRecovery&amp;);
+
+    bool canBox(CachedRecovery&amp; cachedRecovery)
+    {
+        if (cachedRecovery.boxingRequiresGPR() &amp;&amp; getFreeGPR() == InvalidGPRReg)
+            return false;
+
+        if (cachedRecovery.boxingRequiresFPR() &amp;&amp; getFreeFPR() == InvalidFPRReg)
+            return false;
+
+        return true;
+    }
+
+    void ensureBox(CachedRecovery&amp; cachedRecovery)
+    {
+        if (canBox(cachedRecovery))
+            return;
+
+        if (cachedRecovery.boxingRequiresGPR())
+            ensureGPR();
+
+        if (cachedRecovery.boxingRequiresFPR())
+            ensureFPR();
+    }
+
+    void emitLoad(CachedRecovery&amp;);
+
+    bool canLoad(CachedRecovery&amp;);
+
+    void ensureLoad(CachedRecovery&amp; cachedRecovery)
+    {
+        if (canLoad(cachedRecovery))
+            return;
+
+        ASSERT(cachedRecovery.loadsIntoGPR() || cachedRecovery.loadsIntoFPR());
+
+        if (cachedRecovery.loadsIntoFPR()) {
+            if (cachedRecovery.loadsIntoGPR())
+                ensureRegister();
+            else
+                ensureFPR();
+        } else
+            ensureGPR();
+    }
+
+    bool canLoadAndBox(CachedRecovery&amp; cachedRecovery)
+    {
+        // We don't have interfering loads &amp; boxes
+        ASSERT(!cachedRecovery.loadsIntoFPR() || !cachedRecovery.boxingRequiresFPR());
+        ASSERT(!cachedRecovery.loadsIntoGPR() || !cachedRecovery.boxingRequiresGPR());
+
+        return canLoad(cachedRecovery) &amp;&amp; canBox(cachedRecovery);
+    }
+
+    DataFormat emitStore(CachedRecovery&amp;, MacroAssembler::Address);
+
+    void emitDisplace(CachedRecovery&amp;);
+
+    void emitDeltaCheck();
+
+    Bag&lt;CachedRecovery&gt; m_cachedRecoveries;
+
+    void updateRecovery(CachedRecovery&amp; cachedRecovery, ValueRecovery recovery)
+    {
+        clearCachedRecovery(cachedRecovery.recovery());
+        cachedRecovery.setRecovery(recovery);
+        setCachedRecovery(recovery, &amp;cachedRecovery);
+    }
+
+    CachedRecovery* getCachedRecovery(ValueRecovery);
+
+    CachedRecovery* setCachedRecovery(ValueRecovery, CachedRecovery*);
+
+    void clearCachedRecovery(ValueRecovery recovery)
+    {
+        if (!recovery.isConstant())
+            setCachedRecovery(recovery, nullptr);
+    }
+
+    CachedRecovery* addCachedRecovery(ValueRecovery recovery)
+    {
+        if (recovery.isConstant())
+            return m_cachedRecoveries.add(recovery);
+        CachedRecovery* cachedRecovery = getCachedRecovery(recovery);
+        if (!cachedRecovery)
+            return setCachedRecovery(recovery, m_cachedRecoveries.add(recovery));
+        return cachedRecovery;
+    }
+
+    // This is the current recoveries present in the old frame's
+    // slots. A null CachedRecovery means we can trash the current
+    // value as we don't care about it.
+    Vector&lt;CachedRecovery*&gt; m_oldFrame;
+
+    int numLocals() const
+    {
+        return m_oldFrame.size() - JSStack::CallerFrameAndPCSize;
+    }
+
+    CachedRecovery* getOld(VirtualRegister reg) const
+    {
+        return m_oldFrame[JSStack::CallerFrameAndPCSize - reg.offset() - 1];
+    }
+
+    void setOld(VirtualRegister reg, CachedRecovery* cachedRecovery)
+    {
+        m_oldFrame[JSStack::CallerFrameAndPCSize - reg.offset() - 1] = cachedRecovery;
+    }
+
+    VirtualRegister firstOld() const
+    {
+        return VirtualRegister { static_cast&lt;int&gt;(-numLocals()) };
+    }
+
+    VirtualRegister lastOld() const
+    {
+        return VirtualRegister { JSStack::CallerFrameAndPCSize - 1 };
+    }
+
+    bool isValidOld(VirtualRegister reg) const
+    {
+        return reg &gt;= firstOld() &amp;&amp; reg &lt;= lastOld();
+    }
+
+    // This stores, for each slot in the new frame, information about
+    // the recovery for the value that should eventually go into that
+    // slot.
+    //
+    // Once the slot has been written, the corresponding entry in
+    // m_newFrame will be empty.
+    Vector&lt;CachedRecovery*&gt; m_newFrame;
+
+    size_t argCount() const
+    {
+        return m_newFrame.size() - JSStack::CallFrameHeaderSize;
+    }
+
+    CachedRecovery* getNew(VirtualRegister newRegister) const
+    {
+        return m_newFrame[newRegister.offset()];
+    }
+
+    void setNew(VirtualRegister newRegister, CachedRecovery* cachedRecovery)
+    {
+        m_newFrame[newRegister.offset()] = cachedRecovery;
+    }
+
+    void addNew(VirtualRegister newRegister, ValueRecovery recovery)
+    {
+        CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
+        cachedRecovery-&gt;addTarget(newRegister);
+        setNew(newRegister, cachedRecovery);
+    }
+
+    VirtualRegister firstNew() const
+    {
+        return VirtualRegister { 0 };
+    }
+
+    VirtualRegister lastNew() const
+    {
+        return VirtualRegister { static_cast&lt;int&gt;(m_newFrame.size()) - 1 };
+    }
+
+    bool isValidNew(VirtualRegister reg) const
+    {
+        return reg &gt;= firstNew() &amp;&amp; reg &lt;= lastNew();
+    }
+
+
+    int m_alignedOldFrameSize;
+    int m_alignedNewFrameSize;
+
+    // This is the distance, in slots, between the base of the new
+    // frame and the base of the old frame. It could be negative when
+    // preparing for a tail call to a function with smaller argument
+    // count.
+    //
+    // We will overwrite this appropriately for slow path calls, but
+    // we initialize it as if doing a fast path for the spills we
+    // could do while undecided (typically while calling acquireGPR()
+    // for a polymorphic call).
+    int m_frameDelta;
+
+    VirtualRegister newAsOld(VirtualRegister reg) const
+    {
+        return reg - m_frameDelta;
+    }
+
+    // This stores the set of locked registers, i.e. registers for
+    // which we have an implicit requirement that they are not changed.
+    //
+    // This will usually contains the link register on architectures
+    // that have one, any scratch register used by the macro assembler
+    // (e.g. r11 on X86_64), as well as any register that we use for
+    // addressing (see m_oldFrameBase and m_newFrameBase).
+    //
+    // We also use this to lock registers temporarily, for instance to
+    // ensure that we have at least 2 available registers for loading
+    // a pair on 32bits.
+    RegisterSet m_lockedRegisters;
+
+    // This stores the current recoveries present in registers. A null
+    // CachedRecovery means we can trash the current value as we don't
+    // care about it. 
+    RegisterMap&lt;CachedRecovery*&gt; m_registers;
+
+    // This stores, for each register, information about the recovery
+    // for the value that should eventually go into that register. The
+    // only registers that have a target recovery will be callee-save
+    // registers, as well as possibly one JSValueRegs for holding the
+    // callee.
+    //
+    // Once the correct value has been put into the registers, and
+    // contrary to what we do with m_newFrame, we keep the entry in
+    // m_newRegisters to simplify spilling.
+    //
+    // FIXME: Contrary to what the name suggests, we only support GPRs
+    // for now.
+    RegisterMap&lt;CachedRecovery*&gt; m_newRegisters;
+
+    template&lt;typename CheckFunctor&gt;
+    Reg getFreeRegister(const CheckFunctor&amp; check) const
+    {
+        Reg nonTemp { };
+        for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+            if (m_lockedRegisters.get(reg))
+                continue;
+
+            if (!check(reg))
+                continue;
+
+            if (!m_registers[reg]) {
+                if (!m_newRegisters[reg])
+                    return reg;
+                if (!nonTemp)
+                    nonTemp = reg;
+            }
+        }
+        return nonTemp;
+    }
+
+    GPRReg getFreeGPR() const
+    {
+        Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) };
+        if (!freeGPR)
+            return InvalidGPRReg;
+        return freeGPR.gpr();
+    }
+
+    FPRReg getFreeFPR() const
+    {
+        Reg freeFPR { getFreeRegister([] (Reg reg) { return reg.isFPR(); }) };
+        if (!freeFPR)
+            return InvalidFPRReg;
+        return freeFPR.fpr();
+    }
+
+    bool hasFreeRegister() const
+    {
+        return static_cast&lt;bool&gt;(getFreeRegister([] (Reg) { return true; }));
+    }
+
+    // This frees up a register satisfying the check functor (this
+    // functor could theoretically have any kind of logic, but it must
+    // ensure that it will only return true for registers - spill
+    // assumes and asserts that it is passed a cachedRecovery stored in a
+    // register).
+    template&lt;typename CheckFunctor&gt;
+    void ensureRegister(const CheckFunctor&amp; check)
+    {
+        // If we can spill a callee-save, that's best, because it will
+        // free up a register that would otherwise been taken for the
+        // longest amount of time.
+        //
+        // We could try to bias towards those that are not in their
+        // target registers yet, but the gain is probably super
+        // small. Unless you have a huge number of argument (at least
+        // around twice the number of available registers on your
+        // architecture), no spilling is going to take place anyways.
+        for (Reg reg = Reg::first(); reg &lt;= Reg::last(); reg = reg.next()) {
+            if (m_lockedRegisters.get(reg))
+                continue;
+
+            CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+            if (!cachedRecovery)
+                continue;
+
+            if (check(*cachedRecovery)) {
+                if (verbose)
+                    dataLog(&quot;  &quot;, cachedRecovery-&gt;recovery(), &quot; looks like a good spill candidate\n&quot;);
+                spill(*cachedRecovery);
+                return;
+            }
+        }
+
+        // We use the cachedRecovery associated with the first new slot we
+        // can, because that is the one for which a write will be
+        // possible the latest, i.e. that is the one that we would
+        // have had to retain in registers for the longest.
+        for (VirtualRegister reg = firstNew(); reg &lt;= lastNew(); reg += 1) {
+            CachedRecovery* cachedRecovery { getNew(reg) };
+            if (!cachedRecovery)
+                continue;
+
+            if (check(*cachedRecovery)) {
+                spill(*cachedRecovery);
+                return;
+            }
+        }
+
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+
+    void ensureRegister()
+    {
+        if (hasFreeRegister())
+            return;
+
+        if (verbose)
+            dataLog(&quot;  Finding a register to spill\n&quot;);
+        ensureRegister(
+            [this] (const CachedRecovery&amp; cachedRecovery) {
+                if (cachedRecovery.recovery().isInGPR())
+                    return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
+                if (cachedRecovery.recovery().isInFPR())
+                    return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
+#if USE(JSVALUE32_64)
+                if (cachedRecovery.recovery().technique() == InPair) {
+                    return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+                        &amp;&amp; !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
+                }
+#endif
+                return false;
+            });
+    }
+
+    void ensureGPR()
+    {
+        if (getFreeGPR() != InvalidGPRReg)
+            return;
+
+        if (verbose)
+            dataLog(&quot;  Finding a GPR to spill\n&quot;);
+        ensureRegister(
+            [this] (const CachedRecovery&amp; cachedRecovery) {
+                if (cachedRecovery.recovery().isInGPR())
+                    return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
+#if USE(JSVALUE32_64)
+                if (cachedRecovery.recovery().technique() == InPair) {
+                    return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+                        &amp;&amp; !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
+                }
+#endif
+                return false;
+            });
+    }
+
+    void ensureFPR()
+    {
+        if (getFreeFPR() != InvalidFPRReg)
+            return;
+
+        if (verbose)
+            dataLog(&quot;  Finding an FPR to spill\n&quot;);
+        ensureRegister(
+            [this] (const CachedRecovery&amp; cachedRecovery) {
+                if (cachedRecovery.recovery().isInFPR())
+                    return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
+                return false;
+            });
+    }
+
+    CachedRecovery* getNew(JSValueRegs jsValueRegs) const
+    {
+#if USE(JSVALUE64)
+        return m_newRegisters[jsValueRegs.gpr()];
+#else
+        ASSERT(
+            jsValueRegs.tagGPR() == InvalidGPRReg || jsValueRegs.payloadGPR() == InvalidGPRReg
+            || m_newRegisters[jsValueRegs.payloadGPR()] == m_newRegisters[jsValueRegs.tagGPR()]);
+        if (jsValueRegs.payloadGPR() == InvalidGPRReg)
+            return m_newRegisters[jsValueRegs.tagGPR()];
+        return m_newRegisters[jsValueRegs.payloadGPR()];
+#endif
+    }
+
+    void addNew(JSValueRegs jsValueRegs, ValueRecovery recovery)
+    {
+        ASSERT(jsValueRegs &amp;&amp; !getNew(jsValueRegs));
+        CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
+        ASSERT(!cachedRecovery-&gt;wantedJSValueRegs());
+        cachedRecovery-&gt;setWantedJSValueRegs(jsValueRegs);
+#if USE(JSVALUE64)
+        m_newRegisters[jsValueRegs.gpr()] = cachedRecovery;
+#else
+        if (jsValueRegs.payloadGPR() != InvalidGPRReg)
+            m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery;
+        if (jsValueRegs.tagGPR() != InvalidGPRReg)
+            m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery;
+#endif
+    }
+
+    // m_oldFrameBase is the register relative to which we access
+    // slots in the old call frame, with an additional offset of
+    // m_oldFrameOffset.
+    //
+    //  - For an actual tail call, m_oldFrameBase is the stack
+    //    pointer, and m_oldFrameOffset is the number of locals of the
+    //    tail caller's frame. We use such stack pointer-based
+    //    addressing because it allows us to load the tail caller's
+    //    caller's frame pointer in the frame pointer register
+    //    immediately instead of awkwardly keeping it around on the
+    //    stack.
+    //
+    //  - For a slow path call, m_oldFrameBase is just the frame
+    //    pointer, and m_oldFrameOffset is 0.
+    GPRReg m_oldFrameBase { MacroAssembler::framePointerRegister };
+    int m_oldFrameOffset { 0 };
+
+    MacroAssembler::Address addressForOld(VirtualRegister reg) const
+    {
+        return MacroAssembler::Address(m_oldFrameBase,
+            (m_oldFrameOffset + reg.offset()) * sizeof(Register));
+    }
+
+    // m_newFrameBase is the register relative to which we access
+    // slots in the new call frame, and we always make it point to
+    // wherever the stack pointer will be right before making the
+    // actual call/jump. The actual base of the new frame is at offset
+    // m_newFrameOffset relative to m_newFrameBase.
+    //
+    //  - For an actual tail call, m_newFrameBase is computed
+    //    dynamically, and m_newFrameOffset varies between 0 and -2
+    //    depending on the architecture's calling convention (see
+    //    prepareForTailCall).
+    //
+    //  - For a slow path call, m_newFrameBase is the actual stack
+    //    pointer, and m_newFrameOffset is - CallerFrameAndPCSize,
+    //    following the convention for a regular call.
+    GPRReg m_newFrameBase { InvalidGPRReg };
+    int m_newFrameOffset { 0};
+
+    bool isUndecided() const
+    {
+        return m_newFrameBase == InvalidGPRReg;
+    }
+
+    bool isSlowPath() const
+    {
+        return m_newFrameBase == MacroAssembler::stackPointerRegister;
+    }
+
+    MacroAssembler::Address addressForNew(VirtualRegister reg) const
+    {
+        return MacroAssembler::Address(m_newFrameBase,
+            (m_newFrameOffset + reg.offset()) * sizeof(Register));
+    }
+
+    // We use a concept of &quot;danger zone&quot;. The danger zone consists of
+    // all the writes in the new frame that could overlap with reads
+    // in the old frame.
+    //
+    // Because we could have a higher actual number of arguments than
+    // parameters, when preparing a tail call, we need to assume that
+    // writing to a slot on the new frame could overlap not only with
+    // the corresponding slot in the old frame, but also with any slot
+    // above it. Thus, the danger zone consists of all writes between
+    // the first write and what I call the &quot;danger frontier&quot;: the
+    // highest slot in the old frame we still care about. Thus, the
+    // danger zone contains all the slots between the first slot of
+    // the new frame and the danger frontier. Because the danger
+    // frontier is related to the new frame, it is stored as a virtual
+    // register *in the new frame*.
+    VirtualRegister m_dangerFrontier;
+
+    VirtualRegister dangerFrontier() const
+    {
+        ASSERT(!isUndecided());
+
+        return m_dangerFrontier;
+    }
+
+    bool isDangerNew(VirtualRegister reg) const
+    {
+        ASSERT(!isUndecided() &amp;&amp; isValidNew(reg));
+        return reg &lt;= dangerFrontier();
+    }
+
+    void updateDangerFrontier()
+    {
+        ASSERT(!isUndecided());
+
+        m_dangerFrontier = firstNew() - 1;
+        for (VirtualRegister reg = lastNew(); reg &gt;= firstNew(); reg -= 1) {
+            if (!getNew(reg) || !isValidOld(newAsOld(reg)) || !getOld(newAsOld(reg)))
+                continue;
+
+            m_dangerFrontier = reg;
+            if (verbose)
+                dataLog(&quot;  Danger frontier now at NEW &quot;, m_dangerFrontier, &quot;\n&quot;);
+            break;
+        }
+        if (verbose)
+            dataLog(&quot;  All clear! Danger zone is empty.\n&quot;);
+    }
+
+    // A safe write is a write that never writes into the danger zone.
+    bool hasOnlySafeWrites(CachedRecovery&amp; cachedRecovery) const
+    {
+        for (VirtualRegister target : cachedRecovery.targets()) {
+            if (isDangerNew(target))
+                return false;
+        }
+        return true;
+    }
+
+    // You must ensure that there is no dangerous writes before
+    // calling this function.
+    bool tryWrites(CachedRecovery&amp;);
+
+    // This function tries to ensure that there is no longer any
+    // possible safe write, i.e. all remaining writes are either to
+    // the danger zone or callee save restorations.
+    //
+    // It returns false if it was unable to perform some safe writes
+    // due to high register pressure.
+    bool performSafeWrites();
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // CallFrameShuffler_h
</ins></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitCallFrameShuffler32_64cpp"></a>
<div class="addfile"><h4>Added: branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp (0 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp                                (rev 0)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -0,0 +1,331 @@
</span><ins>+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include &quot;config.h&quot;
+#include &quot;CallFrameShuffler.h&quot;
+
+#if ENABLE(JIT) &amp;&amp; USE(JSVALUE32_64)
+
+#include &quot;CCallHelpers.h&quot;
+#include &quot;DataFormat.h&quot;
+#include &quot;JSCJSValue.h&quot;
+
+namespace JSC {
+
+DataFormat CallFrameShuffler::emitStore(CachedRecovery&amp; location, MacroAssembler::Address address)
+{
+    ASSERT(!location.recovery().isInJSStack());
+
+    switch (location.recovery().technique()) {
+    case UnboxedInt32InGPR:
+        m_jit.store32(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
+            address.withOffset(TagOffset));
+        m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+        return DataFormatInt32;
+    case UnboxedCellInGPR:
+        m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag),
+            address.withOffset(TagOffset));
+        m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+        return DataFormatCell;
+    case Constant:
+        m_jit.storeTrustedValue(location.recovery().constant(), address);
+        return DataFormatJS;
+    case InPair:
+        m_jit.storeValue(location.recovery().jsValueRegs(), address);
+        return DataFormatJS;
+    case UnboxedBooleanInGPR:
+        m_jit.store32(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
+            address.withOffset(TagOffset));
+        m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+        return DataFormatBoolean;
+    case InFPR:
+    case UnboxedDoubleInFPR:
+        m_jit.storeDouble(location.recovery().fpr(), address);
+        return DataFormatJS;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+}
+
+void CallFrameShuffler::emitBox(CachedRecovery&amp; location)
+{
+    // Nothing to do, we're good! JSValues and doubles can be stored
+    // immediately, and other formats don't need any transformation -
+    // just storing a constant tag separately.
+    ASSERT(canBox(location));
+}
+
+void CallFrameShuffler::emitLoad(CachedRecovery&amp; location)
+{
+    if (!location.recovery().isInJSStack())
+        return;
+
+    if (verbose)
+        dataLog(&quot;   * Loading &quot;, location.recovery(), &quot; into &quot;);
+    VirtualRegister reg { location.recovery().virtualRegister() };
+    MacroAssembler::Address address { addressForOld(reg) };
+
+    bool tryFPR { true };
+    JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
+    if (wantedJSValueRegs) {
+        if (wantedJSValueRegs.payloadGPR() != InvalidGPRReg
+            &amp;&amp; !m_registers[wantedJSValueRegs.payloadGPR()]
+            &amp;&amp; !m_lockedRegisters.get(wantedJSValueRegs.payloadGPR()))
+            tryFPR = false;
+        if (wantedJSValueRegs.tagGPR() != InvalidGPRReg
+            &amp;&amp; !m_registers[wantedJSValueRegs.tagGPR()]
+            &amp;&amp; !m_lockedRegisters.get(wantedJSValueRegs.tagGPR()))
+            tryFPR = false;
+    }
+
+    if (tryFPR &amp;&amp; location.loadsIntoFPR()) {
+        FPRReg resultFPR = getFreeFPR();
+        if (resultFPR != InvalidFPRReg) {
+            m_jit.loadDouble(address, resultFPR);
+            DataFormat dataFormat = DataFormatJS;
+            if (location.recovery().dataFormat() == DataFormatDouble)
+                dataFormat = DataFormatDouble;
+            updateRecovery(location, 
+                ValueRecovery::inFPR(resultFPR, dataFormat));
+            if (verbose)
+                dataLog(location.recovery(), &quot;\n&quot;);
+            if (reg == newAsOld(dangerFrontier()))
+                updateDangerFrontier();
+            return;
+        }
+    }
+
+    if (location.loadsIntoGPR()) {
+        GPRReg resultGPR { wantedJSValueRegs.payloadGPR() };
+        if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
+            resultGPR = getFreeGPR();
+        ASSERT(resultGPR != InvalidGPRReg);
+        m_jit.loadPtr(address.withOffset(PayloadOffset), resultGPR);
+        updateRecovery(location, 
+            ValueRecovery::inGPR(resultGPR, location.recovery().dataFormat()));
+        if (verbose)
+            dataLog(location.recovery(), &quot;\n&quot;);
+        if (reg == newAsOld(dangerFrontier()))
+            updateDangerFrontier();
+        return;
+    }
+
+    ASSERT(location.recovery().technique() == DisplacedInJSStack);
+    GPRReg payloadGPR { wantedJSValueRegs.payloadGPR() };
+    GPRReg tagGPR { wantedJSValueRegs.tagGPR() };
+    if (payloadGPR == InvalidGPRReg || m_registers[payloadGPR] || m_lockedRegisters.get(payloadGPR))
+        payloadGPR = getFreeGPR();
+    m_lockedRegisters.set(payloadGPR);
+    if (tagGPR == InvalidGPRReg || m_registers[tagGPR] || m_lockedRegisters.get(tagGPR))
+        tagGPR = getFreeGPR();
+    m_lockedRegisters.clear(payloadGPR);
+    ASSERT(payloadGPR != InvalidGPRReg &amp;&amp; tagGPR != InvalidGPRReg &amp;&amp; tagGPR != payloadGPR);
+    m_jit.loadPtr(address.withOffset(PayloadOffset), payloadGPR);
+    m_jit.loadPtr(address.withOffset(TagOffset), tagGPR);
+    updateRecovery(location, 
+        ValueRecovery::inPair(tagGPR, payloadGPR));
+    if (verbose)
+        dataLog(location.recovery(), &quot;\n&quot;);
+    if (reg == newAsOld(dangerFrontier()))
+        updateDangerFrontier();
+}
+
+bool CallFrameShuffler::canLoad(CachedRecovery&amp; location)
+{
+    if (!location.recovery().isInJSStack())
+        return true;
+
+    if (location.loadsIntoFPR() &amp;&amp; getFreeFPR() != InvalidFPRReg)
+        return true;
+
+    if (location.loadsIntoGPR() &amp;&amp; getFreeGPR() != InvalidGPRReg)
+        return true;
+
+    if (location.recovery().technique() == DisplacedInJSStack) {
+        GPRReg payloadGPR { getFreeGPR() };
+        if (payloadGPR == InvalidGPRReg)
+            return false;
+        m_lockedRegisters.set(payloadGPR);
+        GPRReg tagGPR { getFreeGPR() };
+        m_lockedRegisters.clear(payloadGPR);
+        return tagGPR != InvalidGPRReg;
+    }
+
+    return false;
+}
+
+// We prefer loading doubles and undetermined JSValues into FPRs
+// because it would otherwise use up two GPRs.
+bool CachedRecovery::loadsIntoFPR() const
+{
+    switch (recovery().technique()) {
+    case DoubleDisplacedInJSStack:
+    case DisplacedInJSStack:
+        return true;
+
+    default:
+        return false;
+    }
+}
+
+// Integers, booleans and cells can be loaded into a single GPR
+bool CachedRecovery::loadsIntoGPR() const
+{
+    switch (recovery().technique()) {
+    case Int32DisplacedInJSStack:
+    case BooleanDisplacedInJSStack:
+    case CellDisplacedInJSStack:
+        return true;
+
+    default:
+        return false;
+    }
+}
+
+void CallFrameShuffler::emitDisplace(CachedRecovery&amp; location)
+{
+    ASSERT(location.recovery().isInRegisters());
+    JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
+    ASSERT(wantedJSValueRegs);
+
+    GPRReg wantedTagGPR { wantedJSValueRegs.tagGPR() };
+    GPRReg wantedPayloadGPR { wantedJSValueRegs.payloadGPR() };
+    
+    if (wantedTagGPR != InvalidGPRReg) {
+        ASSERT(!m_lockedRegisters.get(wantedTagGPR));
+        if (CachedRecovery* currentTag { m_registers[wantedTagGPR] }) {
+            if (currentTag == &amp;location) {
+                if (verbose)
+                    dataLog(&quot;   + &quot;, wantedTagGPR, &quot; is OK\n&quot;);
+            } else {
+                // This can never happen on 32bit platforms since we
+                // have at most one wanted JSValueRegs, for the
+                // callee, and no callee-save registers.
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+    }
+
+    if (wantedPayloadGPR != InvalidGPRReg) {
+        ASSERT(!m_lockedRegisters.get(wantedPayloadGPR));
+        if (CachedRecovery* currentPayload { m_registers[wantedPayloadGPR] }) {
+            if (currentPayload == &amp;location) {
+                if (verbose)
+                    dataLog(&quot;   + &quot;, wantedPayloadGPR, &quot; is OK\n&quot;);
+            } else {
+                // See above
+                RELEASE_ASSERT_NOT_REACHED();
+            }
+        }
+    }
+
+    if (location.recovery().technique() == InPair
+        || location.recovery().isInGPR()) {
+        GPRReg payloadGPR;
+        if (location.recovery().technique() == InPair)
+            payloadGPR = location.recovery().payloadGPR();
+        else
+            payloadGPR = location.recovery().gpr();
+
+        if (wantedPayloadGPR == InvalidGPRReg)
+            wantedPayloadGPR = payloadGPR;
+
+        if (payloadGPR != wantedPayloadGPR) {
+            if (location.recovery().technique() == InPair
+                &amp;&amp; wantedPayloadGPR == location.recovery().tagGPR()) {
+                if (verbose)
+                    dataLog(&quot;   * Swapping &quot;, payloadGPR, &quot; and &quot;, wantedPayloadGPR, &quot;\n&quot;);
+                m_jit.swap(payloadGPR, wantedPayloadGPR);
+                updateRecovery(location, 
+                    ValueRecovery::inPair(payloadGPR, wantedPayloadGPR));
+            } else {
+                if (verbose)
+                    dataLog(&quot;   * Moving &quot;, payloadGPR, &quot; into &quot;, wantedPayloadGPR, &quot;\n&quot;);
+                m_jit.move(payloadGPR, wantedPayloadGPR);
+                if (location.recovery().technique() == InPair) {
+                    updateRecovery(location,
+                        ValueRecovery::inPair(location.recovery().tagGPR(),
+                            wantedPayloadGPR));
+                } else {
+                    updateRecovery(location, 
+                        ValueRecovery::inGPR(wantedPayloadGPR, location.recovery().dataFormat()));
+                }
+            }
+        }
+
+        if (wantedTagGPR == InvalidGPRReg)
+            wantedTagGPR = getFreeGPR();
+        switch (location.recovery().dataFormat()) {
+        case DataFormatInt32:
+            if (verbose)
+                dataLog(&quot;   * Moving int32 tag into &quot;, wantedTagGPR, &quot;\n&quot;);
+            m_jit.move(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
+                wantedTagGPR);
+            break;
+        case DataFormatCell:
+            if (verbose)
+                dataLog(&quot;   * Moving cell tag into &quot;, wantedTagGPR, &quot;\n&quot;);
+            m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag),
+                wantedTagGPR);
+            break;
+        case DataFormatBoolean:
+            if (verbose)
+                dataLog(&quot;   * Moving boolean tag into &quot;, wantedTagGPR, &quot;\n&quot;);
+            m_jit.move(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
+                wantedTagGPR);
+            break;
+        case DataFormatJS:
+            ASSERT(wantedTagGPR != location.recovery().payloadGPR());
+            if (wantedTagGPR != location.recovery().tagGPR()) {
+                if (verbose)
+                    dataLog(&quot;   * Moving &quot;, location.recovery().tagGPR(), &quot; into &quot;, wantedTagGPR, &quot;\n&quot;);
+                m_jit.move(location.recovery().tagGPR(), wantedTagGPR);
+            }
+            break;
+
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
+    } else {
+        ASSERT(location.recovery().isInFPR());
+        if (wantedTagGPR == InvalidGPRReg) {
+            ASSERT(wantedPayloadGPR != InvalidGPRReg);
+            m_lockedRegisters.set(wantedPayloadGPR);
+            wantedTagGPR = getFreeGPR();
+            m_lockedRegisters.clear(wantedPayloadGPR);
+        }
+        if (wantedPayloadGPR == InvalidGPRReg) {
+            m_lockedRegisters.set(wantedTagGPR);
+            wantedPayloadGPR = getFreeGPR();
+            m_lockedRegisters.clear(wantedTagGPR);
+        }
+        m_jit.boxDouble(location.recovery().fpr(), wantedTagGPR, wantedPayloadGPR);
+    }
+    updateRecovery(location, ValueRecovery::inPair(wantedTagGPR, wantedPayloadGPR));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT) &amp;&amp; USE(JSVALUE32_64)
</ins></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitCallFrameShuffler64cpp"></a>
<div class="addfile"><h4>Added: branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp (0 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp                                (rev 0)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -0,0 +1,341 @@
</span><ins>+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include &quot;config.h&quot;
+#include &quot;CallFrameShuffler.h&quot;
+
+#if ENABLE(JIT) &amp;&amp; USE(JSVALUE64)
+
+#include &quot;CCallHelpers.h&quot;
+#include &quot;DataFormat.h&quot;
+
+namespace JSC {
+
+DataFormat CallFrameShuffler::emitStore(
+    CachedRecovery&amp; cachedRecovery, MacroAssembler::Address address)
+{
+    ASSERT(!cachedRecovery.recovery().isInJSStack());
+
+    switch (cachedRecovery.recovery().technique()) {
+    case InGPR:
+        m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+        return DataFormatJS;
+    case UnboxedInt32InGPR:
+        m_jit.store32(cachedRecovery.recovery().gpr(), address.withOffset(PayloadOffset));
+        return DataFormatInt32;
+    case UnboxedInt52InGPR:
+        m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount),
+            cachedRecovery.recovery().gpr());
+        FALLTHROUGH;
+    case UnboxedStrictInt52InGPR:
+        m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+        return DataFormatStrictInt52;
+    case UnboxedBooleanInGPR:
+        m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+        return DataFormatBoolean;
+    case UnboxedCellInGPR:
+        m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+        return DataFormatCell;
+    case UnboxedDoubleInFPR:
+        m_jit.storeDouble(cachedRecovery.recovery().fpr(), address);
+        return DataFormatDouble;
+    case InFPR:
+        m_jit.storeDouble(cachedRecovery.recovery().fpr(), address);
+        return DataFormatJS;
+    case Constant:
+        m_jit.storeTrustedValue(cachedRecovery.recovery().constant(), address);
+        return DataFormatJS;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+    }
+}
+
+void CallFrameShuffler::emitBox(CachedRecovery&amp; cachedRecovery)
+{
+    ASSERT(canBox(cachedRecovery));
+    if (cachedRecovery.recovery().isConstant())
+        return;
+
+    if (cachedRecovery.recovery().isInGPR()) {
+        switch (cachedRecovery.recovery().dataFormat()) {
+        case DataFormatInt32:
+            if (verbose)
+                dataLog(&quot;   * Boxing &quot;, cachedRecovery.recovery());
+            m_jit.zeroExtend32ToPtr(
+                cachedRecovery.recovery().gpr(),
+                cachedRecovery.recovery().gpr());
+            // We have to do this the hard way.
+            m_jit.or64(MacroAssembler::TrustedImm64(TagTypeNumber),
+                cachedRecovery.recovery().gpr());
+            cachedRecovery.setRecovery(
+                ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
+            if (verbose)
+                dataLog(&quot; into &quot;, cachedRecovery.recovery(), &quot;\n&quot;);
+            return;
+        case DataFormatInt52:
+            if (verbose)
+                dataLog(&quot;   * Boxing &quot;, cachedRecovery.recovery());
+            m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount),
+                cachedRecovery.recovery().gpr());
+            cachedRecovery.setRecovery(
+                ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatStrictInt52));
+            if (verbose)
+                dataLog(&quot; into &quot;, cachedRecovery.recovery(), &quot;\n&quot;);
+            FALLTHROUGH;
+        case DataFormatStrictInt52: {
+            if (verbose)
+                dataLog(&quot;   * Boxing &quot;, cachedRecovery.recovery());
+            FPRReg resultFPR = getFreeFPR();
+            ASSERT(resultFPR != InvalidFPRReg);
+            m_jit.convertInt64ToDouble(cachedRecovery.recovery().gpr(), resultFPR);
+            updateRecovery(cachedRecovery, ValueRecovery::inFPR(resultFPR, DataFormatDouble));
+            if (verbose)
+                dataLog(&quot; into &quot;, cachedRecovery.recovery(), &quot;\n&quot;);
+            break;
+        }
+        case DataFormatBoolean:
+            if (verbose)
+                dataLog(&quot;   * Boxing &quot;, cachedRecovery.recovery());
+            m_jit.add32(MacroAssembler::TrustedImm32(ValueFalse),
+                cachedRecovery.recovery().gpr());
+            cachedRecovery.setRecovery(
+                ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
+            if (verbose)
+                dataLog(&quot; into &quot;, cachedRecovery.recovery(), &quot;\n&quot;);
+            return;
+        default:
+            return;
+        }
+    }
+
+    if (cachedRecovery.recovery().isInFPR()) {
+        if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) {
+            if (verbose)
+                dataLog(&quot;   * Boxing &quot;, cachedRecovery.recovery());
+            GPRReg resultGPR = cachedRecovery.wantedJSValueRegs().gpr();
+            if (resultGPR == InvalidGPRReg || m_registers[resultGPR])
+                resultGPR = getFreeGPR();
+            ASSERT(resultGPR != InvalidGPRReg);
+            m_jit.purifyNaN(cachedRecovery.recovery().fpr());
+            m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), resultGPR);
+            m_jit.sub64(MacroAssembler::TrustedImm64(TagTypeNumber), resultGPR);
+            updateRecovery(cachedRecovery, ValueRecovery::inGPR(resultGPR, DataFormatJS));
+            if (verbose)
+                dataLog(&quot; into &quot;, cachedRecovery.recovery(), &quot;\n&quot;);
+            return;
+        }
+        ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
+        return;
+    }
+
+    RELEASE_ASSERT_NOT_REACHED();
+}
+
+void CallFrameShuffler::emitLoad(CachedRecovery&amp; cachedRecovery)
+{
+    if (!cachedRecovery.recovery().isInJSStack())
+        return;
+
+    if (verbose)
+        dataLog(&quot;   * Loading &quot;, cachedRecovery.recovery(), &quot; into &quot;);
+
+    VirtualRegister reg = cachedRecovery.recovery().virtualRegister();
+    MacroAssembler::Address address { addressForOld(reg) };
+    bool tryFPR { true };
+    GPRReg resultGPR { cachedRecovery.wantedJSValueRegs().gpr() };
+
+    // If we want a GPR and it's available, that's better than loading
+    // into an FPR.
+    if (resultGPR != InvalidGPRReg &amp;&amp; !m_registers[resultGPR]
+        &amp;&amp; !m_lockedRegisters.get(resultGPR) &amp;&amp; cachedRecovery.loadsIntoGPR())
+        tryFPR = false;
+
+    // Otherwise, we prefer loading into FPRs if possible
+    if (tryFPR &amp;&amp; cachedRecovery.loadsIntoFPR()) {
+        FPRReg resultFPR = getFreeFPR();
+        if (resultFPR != InvalidFPRReg) {
+            m_jit.loadDouble(address, resultFPR);
+            DataFormat dataFormat = DataFormatJS;
+            // We could be transforming a DataFormatCell into a
+            // DataFormatJS here - but that's OK.
+            if (cachedRecovery.recovery().dataFormat() == DataFormatDouble)
+                dataFormat = DataFormatDouble;
+            updateRecovery(cachedRecovery,
+                ValueRecovery::inFPR(resultFPR, dataFormat));
+            if (verbose)
+                dataLog(cachedRecovery.recovery(), &quot;\n&quot;);
+            if (reg == newAsOld(dangerFrontier()))
+                updateDangerFrontier();
+            return;
+        }
+    }
+
+    ASSERT(cachedRecovery.loadsIntoGPR());
+    if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
+        resultGPR = getFreeGPR();
+    ASSERT(resultGPR != InvalidGPRReg);
+    m_jit.loadPtr(address, resultGPR);
+    updateRecovery(cachedRecovery,
+        ValueRecovery::inGPR(resultGPR, cachedRecovery.recovery().dataFormat()));
+    if (verbose)
+        dataLog(cachedRecovery.recovery(), &quot;\n&quot;);
+    if (reg == newAsOld(dangerFrontier()))
+        updateDangerFrontier();
+}
+
+bool CallFrameShuffler::canLoad(CachedRecovery&amp; cachedRecovery)
+{
+    if (!cachedRecovery.recovery().isInJSStack())
+        return true;
+
+    ASSERT(cachedRecovery.loadsIntoFPR() || cachedRecovery.loadsIntoGPR());
+
+    if (cachedRecovery.loadsIntoFPR() &amp;&amp; getFreeFPR() != InvalidFPRReg)
+        return true;
+
+    if (cachedRecovery.loadsIntoGPR() &amp;&amp; getFreeGPR() != InvalidGPRReg)
+        return true;
+
+    return false;
+}
+
+bool CachedRecovery::loadsIntoFPR() const
+{
+    switch (recovery().technique()) {
+    case DoubleDisplacedInJSStack:
+    case DisplacedInJSStack:
+    case CellDisplacedInJSStack:
+        return true;
+
+    default:
+        return false;
+    }
+}
+
+bool CachedRecovery::loadsIntoGPR() const
+{
+    switch (recovery().technique()) {
+    case Int32DisplacedInJSStack:
+    case Int52DisplacedInJSStack:
+    case StrictInt52DisplacedInJSStack:
+    case BooleanDisplacedInJSStack:
+    case DisplacedInJSStack:
+    case CellDisplacedInJSStack:
+        return true;
+
+    default:
+        return false;
+    }
+}
+
+void CallFrameShuffler::emitDisplace(CachedRecovery&amp; cachedRecovery)
+{
+    ASSERT(cachedRecovery.recovery().isInRegisters());
+    GPRReg wantedGPR { cachedRecovery.wantedJSValueRegs().gpr() };
+    ASSERT(wantedGPR != InvalidGPRReg);
+    ASSERT(!m_lockedRegisters.get(wantedGPR));
+
+    if (CachedRecovery* current = m_registers[wantedGPR]) {
+        if (current == &amp;cachedRecovery) {
+            if (verbose)
+                dataLog(&quot;   + &quot;, wantedGPR, &quot; is OK\n&quot;);
+            return;
+        }
+        // We could do a more complex thing by finding cycles
+        // etc. in that case.
+        // However, ending up in this situation will be super
+        // rare, and should actually be outright impossible for
+        // non-FTL tiers, since:
+        //  (a) All doubles have been converted into JSValues with
+        //      ValueRep nodes, so FPRs are initially free
+        //
+        //  (b) The only recoveries with wanted registers are the
+        //      callee (which always starts out in a register) and
+        //      the callee-save registers
+        //
+        //  (c) The callee-save registers are the first things we
+        //      load (after the return PC), and they are loaded as JSValues
+        //
+        //  (d) We prefer loading JSValues into FPRs if their
+        //      wanted GPR is not available
+        //
+        //  (e) If we end up spilling some registers with a
+        //      target, we won't load them again before the very
+        //      end of the algorithm
+        //
+        // Combined, this means that we will never load a recovery
+        // with a wanted GPR into any GPR other than its wanted
+        // GPR. The callee could however have been initially in
+        // one of the callee-save registers - but since the wanted
+        // GPR for the callee is always regT0, it will be the
+        // first one to be displaced, and we won't see it when
+        // handling any of the callee-save registers.
+        //
+        // Thus, the only way we could ever reach this path is in
+        // the FTL, when there is so much pressure that we
+        // absolutely need to load the callee-save registers into
+        // different GPRs initially but not enough pressure to
+        // then have to spill all of them. And even in that case,
+        // depending on the order in which LLVM saves the
+        // callee-saves, we will probably still be safe. Anyway,
+        // the couple extra move instructions compared to an
+        // efficient cycle-based algorithm are not going to hurt
+        // us.
+        GPRReg tempGPR = getFreeGPR();
+        if (verbose)
+            dataLog(&quot;  * Moving &quot;, wantedGPR, &quot; into &quot;, tempGPR, &quot;\n&quot;);
+        m_jit.move(wantedGPR, tempGPR);
+        updateRecovery(*current,
+            ValueRecovery::inGPR(tempGPR, current-&gt;recovery().dataFormat()));
+    }
+    ASSERT(!m_registers[wantedGPR]);
+
+    if (cachedRecovery.recovery().isInGPR()) {
+        if (verbose)
+            dataLog(&quot;   * Moving &quot;, cachedRecovery.recovery().gpr(), &quot; into &quot;, wantedGPR, &quot;\n&quot;);
+        m_jit.move(cachedRecovery.recovery().gpr(), wantedGPR);
+        updateRecovery(cachedRecovery,
+            ValueRecovery::inGPR(wantedGPR, cachedRecovery.recovery().dataFormat()));
+    } else {
+        ASSERT(cachedRecovery.recovery().isInFPR());
+        if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) {
+            // This will automatically pick the wanted GPR
+            emitBox(cachedRecovery);
+        } else {
+            if (verbose)
+                dataLog(&quot;   * Moving &quot;, cachedRecovery.recovery().fpr(), &quot; into &quot;, wantedGPR, &quot;\n&quot;);
+            m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), wantedGPR);
+            updateRecovery(cachedRecovery,
+                ValueRecovery::inGPR(wantedGPR, cachedRecovery.recovery().dataFormat()));
+        }
+    }
+
+    ASSERT(m_registers[wantedGPR] == &amp;cachedRecovery);
+    ASSERT(cachedRecovery.recovery().gpr() == wantedGPR);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT) &amp;&amp; USE(JSVALUE64)
</ins></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitJITCallcpp"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/jit/JITCall.cpp (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/JITCall.cpp        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/JITCall.cpp        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -29,6 +29,7 @@
</span><span class="cx"> #if USE(JSVALUE64)
</span><span class="cx"> #include &quot;JIT.h&quot;
</span><span class="cx"> 
</span><ins>+#include &quot;CallFrameShuffler.h&quot;
</ins><span class="cx"> #include &quot;CodeBlock.h&quot;
</span><span class="cx"> #include &quot;JITInlines.h&quot;
</span><span class="cx"> #include &quot;JSArray.h&quot;
</span><span class="lines">@@ -181,9 +182,6 @@
</span><span class="cx">         return;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
-        emitRestoreCalleeSaves();
-
</del><span class="cx">     DataLabelPtr addressOfLinkedFunctionCheck;
</span><span class="cx">     Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
</span><span class="cx">     addSlowCase(slowCase);
</span><span class="lines">@@ -194,7 +192,28 @@
</span><span class="cx">     m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
</span><span class="cx">     m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
</span><span class="cx"> 
</span><del>-    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
</del><ins>+    if (opcodeID == op_tail_call) {
+        CallFrameShuffleData shuffleData;
+        shuffleData.numLocals =
+            instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register);
+        shuffleData.args.resize(instruction[3].u.operand);
+        for (int i = 0; i &lt; instruction[3].u.operand; ++i) {
+            shuffleData.args[i] =
+                ValueRecovery::displacedInJSStack(
+                    virtualRegisterForArgument(i) - instruction[4].u.operand,
+                    DataFormatJS);
+        }
+        shuffleData.callee =
+            ValueRecovery::inGPR(regT0, DataFormatJS);
+        shuffleData.setupCalleeSaveRegisters(m_codeBlock);
+        info-&gt;setFrameShuffleData(shuffleData);
+        CallFrameShuffler(*this, shuffleData).prepareForTailCall();
+        m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+        return;
+    }
+
+    if (opcodeID == op_tail_call_varargs) {
+        emitRestoreCalleeSaves();
</ins><span class="cx">         prepareForTailCallSlow();
</span><span class="cx">         m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
</span><span class="cx">         return;
</span><span class="lines">@@ -219,6 +238,9 @@
</span><span class="cx"> 
</span><span class="cx">     linkSlowCase(iter);
</span><span class="cx"> 
</span><ins>+    if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+        emitRestoreCalleeSaves();
+
</ins><span class="cx">     move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
</span><span class="cx"> 
</span><span class="cx">     m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm-&gt;getCTIStub(linkCallThunkGenerator).code());
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitRegisterMaph"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/jit/RegisterMap.h (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/RegisterMap.h        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/RegisterMap.h        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -28,6 +28,8 @@
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(JIT)
</span><span class="cx"> 
</span><ins>+#include &quot;FPRInfo.h&quot;
+#include &quot;GPRInfo.h&quot;
</ins><span class="cx"> #include &quot;MacroAssembler.h&quot;
</span><span class="cx"> #include &quot;Reg.h&quot;
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesjsctailcallSourceJavaScriptCorejitRepatchcpp"></a>
<div class="modfile"><h4>Modified: branches/jsc-tailcall/Source/JavaScriptCore/jit/Repatch.cpp (188936 => 188937)</h4>
<pre class="diff"><span>
<span class="info">--- branches/jsc-tailcall/Source/JavaScriptCore/jit/Repatch.cpp        2015-08-25 22:50:15 UTC (rev 188936)
+++ branches/jsc-tailcall/Source/JavaScriptCore/jit/Repatch.cpp        2015-08-25 22:59:36 UTC (rev 188937)
</span><span class="lines">@@ -31,6 +31,7 @@
</span><span class="cx"> #include &quot;AccessorCallJITStubRoutine.h&quot;
</span><span class="cx"> #include &quot;BinarySwitch.h&quot;
</span><span class="cx"> #include &quot;CCallHelpers.h&quot;
</span><ins>+#include &quot;CallFrameShuffler.h&quot;
</ins><span class="cx"> #include &quot;DFGOperations.h&quot;
</span><span class="cx"> #include &quot;DFGSpeculativeJIT.h&quot;
</span><span class="cx"> #include &quot;FTLThunks.h&quot;
</span><span class="lines">@@ -1787,26 +1788,32 @@
</span><span class="cx">     
</span><span class="cx">     CCallHelpers::JumpList slowPath;
</span><span class="cx">     
</span><del>-    ptrdiff_t offsetToFrame = -sizeof(CallerFrameAndPC);
-
-    if (!ASSERT_DISABLED) {
-        CCallHelpers::Jump okArgumentCount = stubJit.branch32(
-            CCallHelpers::Below, CCallHelpers::Address(CCallHelpers::stackPointerRegister, static_cast&lt;ptrdiff_t&gt;(sizeof(Register) * JSStack::ArgumentCount) + offsetToFrame + PayloadOffset), CCallHelpers::TrustedImm32(10000000));
-        stubJit.abortWithReason(RepatchInsaneArgumentCount);
-        okArgumentCount.link(&amp;stubJit);
</del><ins>+    std::unique_ptr&lt;CallFrameShuffler&gt; frameShuffler;
+    if (callLinkInfo.frameShuffleData()) {
+        ASSERT(callLinkInfo.isTailCall());
+        frameShuffler = std::make_unique&lt;CallFrameShuffler&gt;(stubJit, *callLinkInfo.frameShuffleData());
+#if USE(JSVALUE32_64)
+        // We would have already checked that the callee is a cell, and we can
+        // use the additional register this buys us.
+        frameShuffler-&gt;assumeCalleeIsCell();
+#endif
+        frameShuffler-&gt;lockGPR(calleeGPR);
</ins><span class="cx">     }
</span><del>-    
-    GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR);
</del><span class="cx">     GPRReg comparisonValueGPR;
</span><span class="cx">     
</span><span class="cx">     if (isClosureCall) {
</span><del>-        // Verify that we have a function and stash the executable in scratch.
</del><ins>+        GPRReg scratchGPR;
+        if (frameShuffler)
+            scratchGPR = frameShuffler-&gt;acquireGPR();
+        else
+            scratchGPR = AssemblyHelpers::selectScratchGPR(calleeGPR);
+        // Verify that we have a function and stash the executable in scratchGPR.
</ins><span class="cx"> 
</span><span class="cx"> #if USE(JSVALUE64)
</span><del>-        // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister
-        // being set. So we do this the hard way.
-        stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch);
-        slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch));
</del><ins>+        // We can't rely on tagMaskRegister being set, so we do this the hard
+        // way.
+        stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratchGPR);
+        slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratchGPR));
</ins><span class="cx"> #else
</span><span class="cx">         // We would have already checked that the callee is a cell.
</span><span class="cx"> #endif
</span><span class="lines">@@ -1819,9 +1826,9 @@
</span><span class="cx">     
</span><span class="cx">         stubJit.loadPtr(
</span><span class="cx">             CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutable()),
</span><del>-            scratch);
</del><ins>+            scratchGPR);
</ins><span class="cx">         
</span><del>-        comparisonValueGPR = scratch;
</del><ins>+        comparisonValueGPR = scratchGPR;
</ins><span class="cx">     } else
</span><span class="cx">         comparisonValueGPR = calleeGPR;
</span><span class="cx">     
</span><span class="lines">@@ -1863,8 +1870,13 @@
</span><span class="cx">         caseValues[i] = newCaseValue;
</span><span class="cx">     }
</span><span class="cx">     
</span><del>-    GPRReg fastCountsBaseGPR =
-        AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3);
</del><ins>+    GPRReg fastCountsBaseGPR;
+    if (frameShuffler)
+        fastCountsBaseGPR = frameShuffler-&gt;acquireGPR();
+    else {
+        fastCountsBaseGPR =
+            AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3);
+    }
</ins><span class="cx">     stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR);
</span><span class="cx">     
</span><span class="cx">     BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr);
</span><span class="lines">@@ -1884,7 +1896,10 @@
</span><span class="cx">                 CCallHelpers::TrustedImm32(1),
</span><span class="cx">                 CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t)));
</span><span class="cx">         }
</span><del>-        if (callLinkInfo.isTailCall()) {
</del><ins>+        if (frameShuffler) {
+            CallFrameShuffler(stubJit, frameShuffler-&gt;snapshot()).prepareForTailCall();
+            calls[caseIndex].call = stubJit.nearTailCall();
+        } else if (callLinkInfo.isTailCall()) {
</ins><span class="cx">             stubJit.emitRestoreCalleeSaves();
</span><span class="cx">             stubJit.prepareForTailCallSlow();
</span><span class="cx">             calls[caseIndex].call = stubJit.nearTailCall();
</span><span class="lines">@@ -1896,10 +1911,23 @@
</span><span class="cx">     
</span><span class="cx">     slowPath.link(&amp;stubJit);
</span><span class="cx">     binarySwitch.fallThrough().link(&amp;stubJit);
</span><del>-    stubJit.move(calleeGPR, GPRInfo::regT0);
</del><ins>+
+    if (frameShuffler) {
+        frameShuffler-&gt;releaseGPR(calleeGPR);
+        frameShuffler-&gt;releaseGPR(comparisonValueGPR);
+        frameShuffler-&gt;releaseGPR(fastCountsBaseGPR);
</ins><span class="cx"> #if USE(JSVALUE32_64)
</span><del>-    stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
</del><ins>+        frameShuffler-&gt;setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT1, GPRInfo::regT0));
+#else
+        frameShuffler-&gt;setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
</ins><span class="cx"> #endif
</span><ins>+        frameShuffler-&gt;prepareForSlowPath();
+    } else {
+        stubJit.move(calleeGPR, GPRInfo::regT0);
+#if USE(JSVALUE32_64)
+        stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+#endif
+    }
</ins><span class="cx">     stubJit.move(CCallHelpers::TrustedImmPtr(&amp;callLinkInfo), GPRInfo::regT2);
</span><span class="cx">     stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation().executableAddress()), GPRInfo::regT4);
</span><span class="cx">     
</span></span></pre>
</div>
</div>

</body>
</html>