<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[283089] trunk</title>
</head>
<body>

<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt;  }
#msg dl a { font-weight: bold}
#msg dl a:link    { color:#fc3; }
#msg dl a:active  { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff  {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.webkit.org/projects/webkit/changeset/283089">283089</a></dd>
<dt>Author</dt> <dd>commit-queue@webkit.org</dd>
<dt>Date</dt> <dd>2021-09-26 14:20:52 -0700 (Sun, 26 Sep 2021)</dd>
</dl>

<h3>Log Message</h3>
<pre>Unreviewed, reverting <a href="http://trac.webkit.org/projects/webkit/changeset/283083">r283083</a> and <a href="http://trac.webkit.org/projects/webkit/changeset/283088">r283088</a>.
https://bugs.webkit.org/show_bug.cgi?id=230806

Windows pors are crashing

Reverted changesets:

"Build an unlinked baseline JIT"
https://bugs.webkit.org/show_bug.cgi?id=229223
https://commits.webkit.org/<a href="http://trac.webkit.org/projects/webkit/changeset/283083">r283083</a>

"Make byte codes with arithmetic profiles switch to using an
index instead of a pointer in metadata"
https://bugs.webkit.org/show_bug.cgi?id=230798
https://commits.webkit.org/<a href="http://trac.webkit.org/projects/webkit/changeset/283088">r283088</a></pre>

<h3>Modified Paths</h3>
<ul>
<li><a href="#trunkSourceJavaScriptCoreCMakeListstxt">trunk/Source/JavaScriptCore/CMakeLists.txt</a></li>
<li><a href="#trunkSourceJavaScriptCoreChangeLog">trunk/Source/JavaScriptCore/ChangeLog</a></li>
<li><a href="#trunkSourceJavaScriptCoreJavaScriptCorexcodeprojprojectpbxproj">trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj</a></li>
<li><a href="#trunkSourceJavaScriptCoreSourcestxt">trunk/Source/JavaScriptCore/Sources.txt</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeAccessCasecpp">trunk/Source/JavaScriptCore/bytecode/AccessCase.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeBytecodeListrb">trunk/Source/JavaScriptCore/bytecode/BytecodeList.rb</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeBytecodeOperandsForCheckpointh">trunk/Source/JavaScriptCore/bytecode/BytecodeOperandsForCheckpoint.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeCallLinkInfocpp">trunk/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeCallLinkInfoh">trunk/Source/JavaScriptCore/bytecode/CallLinkInfo.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeCodeBlockcpp">trunk/Source/JavaScriptCore/bytecode/CodeBlock.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeCodeBlockh">trunk/Source/JavaScriptCore/bytecode/CodeBlock.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeCodeBlockInlinesh">trunk/Source/JavaScriptCore/bytecode/CodeBlockInlines.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeExecutableToCodeBlockEdgeh">trunk/Source/JavaScriptCore/bytecode/ExecutableToCodeBlockEdge.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeExecutionCountercpp">trunk/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeExecutionCounterh">trunk/Source/JavaScriptCore/bytecode/ExecutionCounter.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeGetByIdMetadatah">trunk/Source/JavaScriptCore/bytecode/GetByIdMetadata.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeGetByStatuscpp">trunk/Source/JavaScriptCore/bytecode/GetByStatus.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeGetterSetterAccessCasecpp">trunk/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeInByStatuscpp">trunk/Source/JavaScriptCore/bytecode/InByStatus.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeInlineAccesscpp">trunk/Source/JavaScriptCore/bytecode/InlineAccess.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeInlineAccessh">trunk/Source/JavaScriptCore/bytecode/InlineAccess.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeIterationModeMetadatah">trunk/Source/JavaScriptCore/bytecode/IterationModeMetadata.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeLLIntCallLinkInfoh">trunk/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeOpcodeh">trunk/Source/JavaScriptCore/bytecode/Opcode.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodePolymorphicAccesscpp">trunk/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodePolymorphicAccessh">trunk/Source/JavaScriptCore/bytecode/PolymorphicAccess.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodePutByStatuscpp">trunk/Source/JavaScriptCore/bytecode/PutByStatus.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeStructureStubInfocpp">trunk/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeStructureStubInfoh">trunk/Source/JavaScriptCore/bytecode/StructureStubInfo.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeSuperSamplercpp">trunk/Source/JavaScriptCore/bytecode/SuperSampler.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeUnlinkedCodeBlockcpp">trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeUnlinkedCodeBlockh">trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeUnlinkedCodeBlockGeneratorcpp">trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeUnlinkedCodeBlockGeneratorh">trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeUnlinkedMetadataTableh">trunk/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecodeValueProfileh">trunk/Source/JavaScriptCore/bytecode/ValueProfile.h</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecompilerBytecodeGeneratorcpp">trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorebytecompilerBytecodeGeneratorh">trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGCommonDatah">trunk/Source/JavaScriptCore/dfg/DFGCommonData.h</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGJITCodecpp">trunk/Source/JavaScriptCore/dfg/DFGJITCode.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGJITCodeh">trunk/Source/JavaScriptCore/dfg/DFGJITCode.h</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGJITCompilercpp">trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGOSREntrycpp">trunk/Source/JavaScriptCore/dfg/DFGOSREntry.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGOSRExitcpp">trunk/Source/JavaScriptCore/dfg/DFGOSRExit.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGOSRExitCompilerCommoncpp">trunk/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGOperationscpp">trunk/Source/JavaScriptCore/dfg/DFGOperations.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGSpeculativeJITcpp">trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGSpeculativeJIT32_64cpp">trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoredfgDFGSpeculativeJIT64cpp">trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreftlFTLCompilecpp">trunk/Source/JavaScriptCore/ftl/FTLCompile.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreftlFTLJITCodeh">trunk/Source/JavaScriptCore/ftl/FTLJITCode.h</a></li>
<li><a href="#trunkSourceJavaScriptCoreftlFTLLinkcpp">trunk/Source/JavaScriptCore/ftl/FTLLink.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreftlFTLLowerDFGToB3cpp">trunk/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreftlFTLOSRExitCompilercpp">trunk/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoregeneratorMetadatarb">trunk/Source/JavaScriptCore/generator/Metadata.rb</a></li>
<li><a href="#trunkSourceJavaScriptCorejitAssemblyHelperscpp">trunk/Source/JavaScriptCore/jit/AssemblyHelpers.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitAssemblyHelpersh">trunk/Source/JavaScriptCore/jit/AssemblyHelpers.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitBaselineJITPlancpp">trunk/Source/JavaScriptCore/jit/BaselineJITPlan.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitCCallHelperscpp">trunk/Source/JavaScriptCore/jit/CCallHelpers.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitCCallHelpersh">trunk/Source/JavaScriptCore/jit/CCallHelpers.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitCallFrameShuffleDatacpp">trunk/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitCallFrameShuffleDatah">trunk/Source/JavaScriptCore/jit/CallFrameShuffleData.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitCallFrameShufflercpp">trunk/Source/JavaScriptCore/jit/CallFrameShuffler.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitCallFrameShufflerh">trunk/Source/JavaScriptCore/jit/CallFrameShuffler.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITcpp">trunk/Source/JavaScriptCore/jit/JIT.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITh">trunk/Source/JavaScriptCore/jit/JIT.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITArithmeticcpp">trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITCallcpp">trunk/Source/JavaScriptCore/jit/JITCall.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITCall32_64cpp">trunk/Source/JavaScriptCore/jit/JITCall32_64.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITCodeh">trunk/Source/JavaScriptCore/jit/JITCode.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITCompilationKeycpp">trunk/Source/JavaScriptCore/jit/JITCompilationKey.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITCompilationKeyh">trunk/Source/JavaScriptCore/jit/JITCompilationKey.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITInlineCacheGeneratorcpp">trunk/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITInlineCacheGeneratorh">trunk/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITInlinesh">trunk/Source/JavaScriptCore/jit/JITInlines.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITOpcodescpp">trunk/Source/JavaScriptCore/jit/JITOpcodes.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITOpcodes32_64cpp">trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITOperationscpp">trunk/Source/JavaScriptCore/jit/JITOperations.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITOperationsh">trunk/Source/JavaScriptCore/jit/JITOperations.h</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITPlancpp">trunk/Source/JavaScriptCore/jit/JITPlan.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITPropertyAccesscpp">trunk/Source/JavaScriptCore/jit/JITPropertyAccess.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitJITPropertyAccess32_64cpp">trunk/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitRepatchcpp">trunk/Source/JavaScriptCore/jit/Repatch.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorellintLLIntSlowPathscpp">trunk/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorellintLowLevelInterpreterasm">trunk/Source/JavaScriptCore/llint/LowLevelInterpreter.asm</a></li>
<li><a href="#trunkSourceJavaScriptCorellintLowLevelInterpreter32_64asm">trunk/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm</a></li>
<li><a href="#trunkSourceJavaScriptCorellintLowLevelInterpreter64asm">trunk/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm</a></li>
<li><a href="#trunkSourceJavaScriptCoreofflineasmclooprb">trunk/Source/JavaScriptCore/offlineasm/cloop.rb</a></li>
<li><a href="#trunkSourceJavaScriptCoreruntimeCacheableIdentifierh">trunk/Source/JavaScriptCore/runtime/CacheableIdentifier.h</a></li>
<li><a href="#trunkSourceJavaScriptCoreruntimeCacheableIdentifierInlinesh">trunk/Source/JavaScriptCore/runtime/CacheableIdentifierInlines.h</a></li>
<li><a href="#trunkSourceJavaScriptCoreruntimeCachedTypescpp">trunk/Source/JavaScriptCore/runtime/CachedTypes.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreruntimeCommonSlowPathscpp">trunk/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreruntimeFunctionExecutableh">trunk/Source/JavaScriptCore/runtime/FunctionExecutable.h</a></li>
<li><a href="#trunkSourceJavaScriptCoreruntimeOptionscpp">trunk/Source/JavaScriptCore/runtime/Options.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCoreruntimeOptionsListh">trunk/Source/JavaScriptCore/runtime/OptionsList.h</a></li>
<li><a href="#trunkSourceJavaScriptCoreruntimeScriptExecutablecpp">trunk/Source/JavaScriptCore/runtime/ScriptExecutable.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorewasmWasmLLIntTierUpCounterh">trunk/Source/JavaScriptCore/wasm/WasmLLIntTierUpCounter.h</a></li>
<li><a href="#trunkSourceJavaScriptCorewasmWasmTierUpCountcpp">trunk/Source/JavaScriptCore/wasm/WasmTierUpCount.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorewasmWasmTierUpCounth">trunk/Source/JavaScriptCore/wasm/WasmTierUpCount.h</a></li>
<li><a href="#trunkSourceWTFChangeLog">trunk/Source/WTF/ChangeLog</a></li>
<li><a href="#trunkSourceWTFwtfBagh">trunk/Source/WTF/wtf/Bag.h</a></li>
<li><a href="#trunkSourceWTFwtfPackedh">trunk/Source/WTF/wtf/Packed.h</a></li>
<li><a href="#trunkToolsChangeLog">trunk/Tools/ChangeLog</a></li>
<li><a href="#trunkToolsScriptsrunjscstresstests">trunk/Tools/Scripts/run-jsc-stress-tests</a></li>
</ul>

<h3>Removed Paths</h3>
<ul>
<li><a href="#trunkSourceJavaScriptCorejitBaselineJITCodecpp">trunk/Source/JavaScriptCore/jit/BaselineJITCode.cpp</a></li>
<li><a href="#trunkSourceJavaScriptCorejitBaselineJITCodeh">trunk/Source/JavaScriptCore/jit/BaselineJITCode.h</a></li>
</ul>

</div>
<div id="patch">
<h3>Diff</h3>
<a id="trunkSourceJavaScriptCoreCMakeListstxt"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/CMakeLists.txt (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/CMakeLists.txt       2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/CMakeLists.txt  2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -611,7 +611,6 @@
</span><span class="cx">     builtins/BuiltinNames.h
</span><span class="cx">     builtins/BuiltinUtils.h
</span><span class="cx"> 
</span><del>-    bytecode/ArithProfile.h
</del><span class="cx">     bytecode/ArrayAllocationProfile.h
</span><span class="cx">     bytecode/ArrayProfile.h
</span><span class="cx">     bytecode/BytecodeConventions.h
</span><span class="lines">@@ -838,7 +837,6 @@
</span><span class="cx"> 
</span><span class="cx">     jit/AssemblyHelpers.h
</span><span class="cx">     jit/AssemblyHelpersSpoolers.h
</span><del>-    jit/BaselineJITCode.h
</del><span class="cx">     jit/CCallHelpers.h
</span><span class="cx">     jit/ExecutableAllocator.h
</span><span class="cx">     jit/ExecutableMemoryHandle.h
</span><span class="lines">@@ -877,7 +875,6 @@
</span><span class="cx">     parser/ParserError.h
</span><span class="cx">     parser/ParserModes.h
</span><span class="cx">     parser/ParserTokens.h
</span><del>-    parser/ResultType.h
</del><span class="cx">     parser/SourceCode.h
</span><span class="cx">     parser/SourceProvider.h
</span><span class="cx">     parser/SourceProviderCache.h
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreChangeLog"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/ChangeLog (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/ChangeLog    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/ChangeLog       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -1,3 +1,21 @@
</span><ins>+2021-09-26  Commit Queue  <commit-queue@webkit.org>
+
+        Unreviewed, reverting r283083 and r283088.
+        https://bugs.webkit.org/show_bug.cgi?id=230806
+
+        Windows pors are crashing
+
+        Reverted changesets:
+
+        "Build an unlinked baseline JIT"
+        https://bugs.webkit.org/show_bug.cgi?id=229223
+        https://commits.webkit.org/r283083
+
+        "Make byte codes with arithmetic profiles switch to using an
+        index instead of a pointer in metadata"
+        https://bugs.webkit.org/show_bug.cgi?id=230798
+        https://commits.webkit.org/r283088
+
</ins><span class="cx"> 2021-09-26  Saam Barati  <sbarati@apple.com>
</span><span class="cx"> 
</span><span class="cx">         Make byte codes with arithmetic profiles switch to using an index instead of a pointer in metadata
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreJavaScriptCorexcodeprojprojectpbxproj"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -900,7 +900,6 @@
</span><span class="cx">          52CD0F5D2242F569004A18A5 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 51F0EB6105C86C6B00E6DF1B /* Foundation.framework */; };
</span><span class="cx">          52CD0F5E2242F569004A18A5 /* JavaScriptCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 932F5BD90822A1C700736975 /* JavaScriptCore.framework */; };
</span><span class="cx">          52CD0F682242F71C004A18A5 /* testdfg.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 52CD0F672242F71C004A18A5 /* testdfg.cpp */; };
</span><del>-               52DD000826E039B90054E408 /* BaselineJITCode.h in Headers */ = {isa = PBXBuildFile; fileRef = 52DD000626E039B30054E408 /* BaselineJITCode.h */; settings = {ATTRIBUTES = (Private, ); }; };
</del><span class="cx">           52EED7942492B870008F4C93 /* FunctionAllowlist.h in Headers */ = {isa = PBXBuildFile; fileRef = 52EED7932492B868008F4C93 /* FunctionAllowlist.h */; };
</span><span class="cx">          52F6C35E1E71EB080081F4CC /* WebAssemblyWrapperFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 52F6C35C1E71EB080081F4CC /* WebAssemblyWrapperFunction.h */; };
</span><span class="cx">          530A66B91FA3E78B0026A545 /* UnifiedSource3-mm.mm in Sources */ = {isa = PBXBuildFile; fileRef = 530A66B11FA3E77A0026A545 /* UnifiedSource3-mm.mm */; };
</span><span class="lines">@@ -1221,7 +1220,7 @@
</span><span class="cx">          79872C48221BBAF3008C6969 /* JSBaseInternal.h in Headers */ = {isa = PBXBuildFile; fileRef = 79872C47221BBAED008C6969 /* JSBaseInternal.h */; };
</span><span class="cx">          799EF7C41C56ED96002B0534 /* B3PCToOriginMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 799EF7C31C56ED96002B0534 /* B3PCToOriginMap.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><span class="cx">          79A090801D768465008B889B /* HashMapImpl.h in Headers */ = {isa = PBXBuildFile; fileRef = 79A0907E1D768465008B889B /* HashMapImpl.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><del>-               79A228361D35D71F00D8E067 /* ArithProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 79A228341D35D71E00D8E067 /* ArithProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
</del><ins>+                79A228361D35D71F00D8E067 /* ArithProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 79A228341D35D71E00D8E067 /* ArithProfile.h */; };
</ins><span class="cx">           79ABB17E1E5CCB570045B9A6 /* AirDisassembler.h in Headers */ = {isa = PBXBuildFile; fileRef = 79ABB17C1E5CCB570045B9A6 /* AirDisassembler.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><span class="cx">          79AC30FF1F99536400484FD7 /* ObjectAllocationProfileInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 79AC30FE1F99536300484FD7 /* ObjectAllocationProfileInlines.h */; };
</span><span class="cx">          79AF0BE41D3EFD4C00E95FA5 /* JITMathICInlineResult.h in Headers */ = {isa = PBXBuildFile; fileRef = 79AF0BE31D3EFD4C00E95FA5 /* JITMathICInlineResult.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><span class="lines">@@ -3675,8 +3674,6 @@
</span><span class="cx">          52CD0F642242F569004A18A5 /* testdfg */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = testdfg; sourceTree = BUILT_PRODUCTS_DIR; };
</span><span class="cx">          52CD0F672242F71C004A18A5 /* testdfg.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = testdfg.cpp; path = dfg/testdfg.cpp; sourceTree = "<group>"; };
</span><span class="cx">          52D1308F221CE03A009C836C /* foo.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = foo.js; sourceTree = "<group>"; };
</span><del>-               52DD000626E039B30054E408 /* BaselineJITCode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = BaselineJITCode.h; sourceTree = "<group>"; };
-               52DD000726E039B40054E408 /* BaselineJITCode.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = BaselineJITCode.cpp; sourceTree = "<group>"; };
</del><span class="cx">           52EED7922492B868008F4C93 /* FunctionAllowlist.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = FunctionAllowlist.cpp; sourceTree = "<group>"; };
</span><span class="cx">          52EED7932492B868008F4C93 /* FunctionAllowlist.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = FunctionAllowlist.h; sourceTree = "<group>"; };
</span><span class="cx">          52F6C35B1E71EB080081F4CC /* WebAssemblyWrapperFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = WebAssemblyWrapperFunction.cpp; path = js/WebAssemblyWrapperFunction.cpp; sourceTree = "<group>"; };
</span><span class="lines">@@ -6279,8 +6276,6 @@
</span><span class="cx">                          6B767E7A26791F270017F8D1 /* AssemblyHelpersSpoolers.h */,
</span><span class="cx">                          723998F6265DBCDB0057867F /* BaselineJITPlan.cpp */,
</span><span class="cx">                          723998F5265DBCDB0057867F /* BaselineJITPlan.h */,
</span><del>-                               52DD000726E039B40054E408 /* BaselineJITCode.cpp */,
-                               52DD000626E039B30054E408 /* BaselineJITCode.h */,
</del><span class="cx">                           0F64B26F1A784BAF006E4E66 /* BinarySwitch.cpp */,
</span><span class="cx">                          0F64B2701A784BAF006E4E66 /* BinarySwitch.h */,
</span><span class="cx">                          65B8392D1BACA9D30044E824 /* CachedRecovery.cpp */,
</span><span class="lines">@@ -9735,7 +9730,6 @@
</span><span class="cx">                          0F2017821DCADD4200EA5950 /* DFGFlowMap.h in Headers */,
</span><span class="cx">                          0F9D339717FFC4E60073C2BC /* DFGFlushedAt.h in Headers */,
</span><span class="cx">                          A7D89CF817A0B8CC00773AD8 /* DFGFlushFormat.h in Headers */,
</span><del>-                               52DD000826E039B90054E408 /* BaselineJITCode.h in Headers */,
</del><span class="cx">                           0F2DD8151AB3D8BE00BBB8E8 /* DFGForAllKills.h in Headers */,
</span><span class="cx">                          0F69CC89193AC60A0045759E /* DFGFrozenValue.h in Headers */,
</span><span class="cx">                          E386FD7E26E867B800E4C28B /* TemporalPlainTime.h in Headers */,
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreSourcestxt"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/Sources.txt (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/Sources.txt  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/Sources.txt     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -619,7 +619,6 @@
</span><span class="cx"> interpreter/StackVisitor.cpp
</span><span class="cx"> 
</span><span class="cx"> jit/AssemblyHelpers.cpp
</span><del>-jit/BaselineJITCode.cpp
</del><span class="cx"> jit/BaselineJITPlan.cpp
</span><span class="cx"> jit/BinarySwitch.cpp
</span><span class="cx"> jit/CCallHelpers.cpp
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeAccessCasecpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/AccessCase.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/AccessCase.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/AccessCase.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -202,15 +202,15 @@
</span><span class="cx">     switch (stubInfo.cacheType()) {
</span><span class="cx">     case CacheType::GetByIdSelf:
</span><span class="cx">         RELEASE_ASSERT(stubInfo.hasConstantIdentifier);
</span><del>-        return ProxyableAccessCase::create(vm, owner, Load, identifier, stubInfo.byIdSelfOffset, stubInfo.inlineAccessBaseStructure(vm));
</del><ins>+        return ProxyableAccessCase::create(vm, owner, Load, identifier, stubInfo.u.byIdSelf.offset, stubInfo.m_inlineAccessBaseStructure.get());
</ins><span class="cx"> 
</span><span class="cx">     case CacheType::PutByIdReplace:
</span><span class="cx">         RELEASE_ASSERT(stubInfo.hasConstantIdentifier);
</span><del>-        return AccessCase::create(vm, owner, Replace, identifier, stubInfo.byIdSelfOffset, stubInfo.inlineAccessBaseStructure(vm));
</del><ins>+        return AccessCase::create(vm, owner, Replace, identifier, stubInfo.u.byIdSelf.offset, stubInfo.m_inlineAccessBaseStructure.get());
</ins><span class="cx"> 
</span><span class="cx">     case CacheType::InByIdSelf:
</span><span class="cx">         RELEASE_ASSERT(stubInfo.hasConstantIdentifier);
</span><del>-        return AccessCase::create(vm, owner, InHit, identifier, stubInfo.byIdSelfOffset, stubInfo.inlineAccessBaseStructure(vm));
</del><ins>+        return AccessCase::create(vm, owner, InHit, identifier, stubInfo.u.byIdSelf.offset, stubInfo.m_inlineAccessBaseStructure.get());
</ins><span class="cx"> 
</span><span class="cx">     case CacheType::ArrayLength:
</span><span class="cx">         RELEASE_ASSERT(stubInfo.hasConstantIdentifier);
</span><span class="lines">@@ -1985,10 +1985,14 @@
</span><span class="cx">         // Stuff for custom getters/setters.
</span><span class="cx">         CCallHelpers::Call operationCall;
</span><span class="cx"> 
</span><del>-
</del><span class="cx">         // This also does the necessary calculations of whether or not we're an
</span><span class="cx">         // exception handling call site.
</span><del>-        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall();
</del><ins>+        RegisterSet extraRegistersToPreserve;
+#if CPU(ARM64)
+        if (codeBlock->useDataIC())
+            extraRegistersToPreserve.set(ARM64Registers::lr);
+#endif
+        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
</ins><span class="cx"> 
</span><span class="cx">         auto restoreLiveRegistersFromStackForCall = [&](AccessGenerationState::SpillState& spillState, bool callHasReturnValue) {
</span><span class="cx">             RegisterSet dontRestore;
</span><span class="lines">@@ -2075,7 +2079,8 @@
</span><span class="cx">             ASSERT(!(numberOfRegsForCall % stackAlignmentRegisters()));
</span><span class="cx">             unsigned numberOfBytesForCall = numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC);
</span><span class="cx"> 
</span><del>-            unsigned alignedNumberOfBytesForCall = WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
</del><ins>+            unsigned alignedNumberOfBytesForCall =
+            WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
</ins><span class="cx"> 
</span><span class="cx">             jit.subPtr(
</span><span class="cx">                 CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
</span><span class="lines">@@ -2133,7 +2138,6 @@
</span><span class="cx"> 
</span><span class="cx">             int stackPointerOffset = (codeBlock->stackPointerOffset() * sizeof(Register)) - state.preservedReusedRegisterState.numberOfBytesPreserved - spillState.numberOfStackBytesUsedForRegisterPreservation;
</span><span class="cx">             jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
</span><del>-
</del><span class="cx">             bool callHasReturnValue = isGetter();
</span><span class="cx">             restoreLiveRegistersFromStackForCall(spillState, callHasReturnValue);
</span><span class="cx"> 
</span><span class="lines">@@ -2260,7 +2264,12 @@
</span><span class="cx"> 
</span><span class="cx">             jit.loadPtr(CCallHelpers::Address(baseGPR, JSProxy::targetOffset()), scratchGPR);
</span><span class="cx"> 
</span><del>-            auto spillState = state.preserveLiveRegistersToStackForCallWithoutExceptions();
</del><ins>+            RegisterSet extraRegistersToPreserve;
+#if CPU(ARM64)
+            if (codeBlock->useDataIC())
+                extraRegistersToPreserve.set(ARM64Registers::lr);
+#endif
+            auto spillState = state.preserveLiveRegistersToStackForCallWithoutExceptions(extraRegistersToPreserve);
</ins><span class="cx"> 
</span><span class="cx">             jit.setupArguments<decltype(operationWriteBarrierSlowPath)>(CCallHelpers::TrustedImmPtr(&vm), scratchGPR);
</span><span class="cx">             jit.prepareCallOperation(vm);
</span><span class="lines">@@ -2355,6 +2364,10 @@
</span><span class="cx">                 RegisterSet extraRegistersToPreserve;
</span><span class="cx">                 extraRegistersToPreserve.set(baseGPR);
</span><span class="cx">                 extraRegistersToPreserve.set(valueRegs);
</span><ins>+#if CPU(ARM64)
+                if (codeBlock->useDataIC())
+                    extraRegistersToPreserve.set(ARM64Registers::lr);
+#endif
</ins><span class="cx">                 AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(extraRegistersToPreserve);
</span><span class="cx">                 
</span><span class="cx">                 jit.store32(
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeBytecodeListrb"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/BytecodeList.rb (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/BytecodeList.rb     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/BytecodeList.rb        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -64,6 +64,8 @@
</span><span class="cx"> 
</span><span class="cx">     :ValueProfile,
</span><span class="cx">     :ValueProfileAndVirtualRegisterBuffer,
</span><ins>+    :UnaryArithProfile,
+    :BinaryArithProfile,
</ins><span class="cx">     :ArrayProfile,
</span><span class="cx">     :ArrayAllocationProfile,
</span><span class="cx">     :ObjectAllocationProfile,
</span><span class="lines">@@ -287,8 +289,10 @@
</span><span class="cx">         dst: VirtualRegister,
</span><span class="cx">         lhs: VirtualRegister,
</span><span class="cx">         rhs: VirtualRegister,
</span><del>-        profileIndex: unsigned,
</del><span class="cx">         operandTypes: OperandTypes,
</span><ins>+    },
+    metadata: {
+        arithProfile: BinaryArithProfile
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx"> op_group :ValueProfiledBinaryOp,
</span><span class="lines">@@ -347,7 +351,9 @@
</span><span class="cx">     ],
</span><span class="cx">     args: {
</span><span class="cx">         srcDst: VirtualRegister,
</span><del>-        profileIndex: unsigned,
</del><ins>+    },
+    metadata: {
+        arithProfile: UnaryArithProfile
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx"> op :to_object,
</span><span class="lines">@@ -377,8 +383,10 @@
</span><span class="cx">     args: {
</span><span class="cx">         dst: VirtualRegister,
</span><span class="cx">         operand: VirtualRegister,
</span><del>-        profileIndex: unsigned,
</del><span class="cx">         resultType: ResultType,
</span><ins>+    },
+    metadata: {
+        arithProfile: UnaryArithProfile,
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx"> op :not,
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeBytecodeOperandsForCheckpointh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/BytecodeOperandsForCheckpoint.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/BytecodeOperandsForCheckpoint.h     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/BytecodeOperandsForCheckpoint.h        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -73,28 +73,6 @@
</span><span class="cx">         return &metadata.m_profile;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-template <typename Bytecode>
-uintptr_t valueProfileOffsetFor(unsigned checkpointIndex)
-{
-    UNUSED_PARAM(checkpointIndex);
-    if constexpr (Bytecode::opcodeID == op_iterator_open) {
-        switch (checkpointIndex) {
-        case OpIteratorOpen::symbolCall: return Bytecode::Metadata::offsetOfIteratorProfile();
-        case OpIteratorOpen::getNext: return Bytecode::Metadata::offsetOfNextProfile();
-        default: RELEASE_ASSERT_NOT_REACHED();
-        }
-
-    } else if constexpr (Bytecode::opcodeID == op_iterator_next) {
-        switch (checkpointIndex) {
-        case OpIteratorNext::computeNext: return Bytecode::Metadata::offsetOfNextResultProfile();
-        case OpIteratorNext::getDone: return Bytecode::Metadata::offsetOfDoneProfile();
-        case OpIteratorNext::getValue: return Bytecode::Metadata::offsetOfValueProfile();
-        default: RELEASE_ASSERT_NOT_REACHED();
-        }
-    } else 
-        return Bytecode::Metadata::offsetOfProfile();
-}
-
</del><span class="cx"> template<typename BytecodeMetadata>
</span><span class="cx"> bool hasValueProfileFor(BytecodeMetadata& metadata, unsigned checkpointIndex)
</span><span class="cx"> {
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeCallLinkInfocpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/CallLinkInfo.cpp       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -114,6 +114,11 @@
</span><span class="cx">     RELEASE_ASSERT(!isOnList());
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+CodeLocationLabel<JSInternalPtrTag> CallLinkInfo::fastPathStart()
+{
+    return CodeLocationDataLabelPtr<JSInternalPtrTag>(m_fastPathStart);
+}
+
</ins><span class="cx"> CodeLocationLabel<JSInternalPtrTag> CallLinkInfo::slowPathStart()
</span><span class="cx"> {
</span><span class="cx">     RELEASE_ASSERT(!isDataIC());
</span><span class="lines">@@ -220,12 +225,6 @@
</span><span class="cx">     m_maxArgumentCountIncludingThis = value;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-CodeLocationLabel<JSInternalPtrTag> CallLinkInfo::fastPathStart()
-{
-    RELEASE_ASSERT(isDirect() && isTailCall());
-    return CodeLocationDataLabelPtr<JSInternalPtrTag>(u.codeIC.m_fastPathStart);
-}
-
</del><span class="cx"> void CallLinkInfo::visitWeak(VM& vm)
</span><span class="cx"> {
</span><span class="cx">     auto handleSpecificCallee = [&] (JSFunction* callee) {
</span><span class="lines">@@ -299,11 +298,23 @@
</span><span class="cx">     m_frameShuffleData->shrinkToFit();
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-MacroAssembler::JumpList CallLinkInfo::emitFastPathImpl(CallLinkInfo* callLinkInfo, CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC useDataIC, bool isTailCall, WTF::Function<void()> prepareForTailCall)
</del><ins>+MacroAssembler::JumpList CallLinkInfo::emitFastPathImpl(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC useDataIC, WTF::Function<void()> prepareForTailCall)
</ins><span class="cx"> {
</span><ins>+    setUsesDataICs(useDataIC);
+    if (isDataIC()) {
+        RELEASE_ASSERT(callLinkInfoGPR != GPRReg::InvalidGPRReg);
+        jit.move(CCallHelpers::TrustedImmPtr(this), callLinkInfoGPR);
+        u.dataIC.m_callLinkInfoGPR = callLinkInfoGPR;
+    }
+
+    auto fastPathStart = jit.label();
+    jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+        m_fastPathStart = linkBuffer.locationOf<JSInternalPtrTag>(fastPathStart);
+    });
+
</ins><span class="cx">     CCallHelpers::JumpList slowPath;
</span><span class="cx"> 
</span><del>-    if (useDataIC == UseDataIC::Yes) {
</del><ins>+    if (isDataIC()) {
</ins><span class="cx">         GPRReg scratchGPR = jit.scratchRegister();
</span><span class="cx">         jit.loadPtr(CCallHelpers::Address(callLinkInfoGPR, offsetOfCallee()), scratchGPR); 
</span><span class="cx">         CCallHelpers::Jump goPolymorphic;
</span><span class="lines">@@ -312,7 +323,7 @@
</span><span class="cx">             goPolymorphic = jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR, CCallHelpers::TrustedImm32(polymorphicCalleeMask));
</span><span class="cx">             slowPath.append(jit.branchPtr(CCallHelpers::NotEqual, scratchGPR, calleeGPR));
</span><span class="cx">         }
</span><del>-        if (isTailCall) {
</del><ins>+        if (isTailCall()) {
</ins><span class="cx">             prepareForTailCall();
</span><span class="cx">             goPolymorphic.link(&jit); // Polymorphic stub handles tail call stack prep.
</span><span class="cx">             jit.farJump(CCallHelpers::Address(callLinkInfoGPR, offsetOfMonomorphicCallDestination()), JSEntryPtrTag);
</span><span class="lines">@@ -325,16 +336,14 @@
</span><span class="cx">         slowPath.append(jit.branchPtrWithPatch(CCallHelpers::NotEqual, calleeGPR, calleeCheck, CCallHelpers::TrustedImmPtr(nullptr)));
</span><span class="cx"> 
</span><span class="cx">         CCallHelpers::Call call;
</span><del>-        if (isTailCall) {
</del><ins>+        if (isTailCall()) {
</ins><span class="cx">             prepareForTailCall();
</span><span class="cx">             call = jit.nearTailCall();
</span><span class="cx">         } else
</span><span class="cx">             call = jit.nearCall();
</span><del>-
-        RELEASE_ASSERT(callLinkInfo);
</del><span class="cx">         jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
</span><del>-            callLinkInfo->u.codeIC.m_callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
-            callLinkInfo->u.codeIC.m_calleeLocation = linkBuffer.locationOf<JSInternalPtrTag>(calleeCheck);
</del><ins>+            u.codeIC.m_callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
+            u.codeIC.m_calleeLocation = linkBuffer.locationOf<JSInternalPtrTag>(calleeCheck);
</ins><span class="cx">         });
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="lines">@@ -344,36 +353,15 @@
</span><span class="cx"> CCallHelpers::JumpList CallLinkInfo::emitFastPath(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC useDataIC)
</span><span class="cx"> {
</span><span class="cx">     RELEASE_ASSERT(!isTailCall());
</span><del>-    setUsesDataICs(useDataIC);
-
-    if (isDataIC()) {
-        RELEASE_ASSERT(callLinkInfoGPR != GPRReg::InvalidGPRReg);
-        jit.move(CCallHelpers::TrustedImmPtr(this), callLinkInfoGPR);
-        u.dataIC.m_callLinkInfoGPR = callLinkInfoGPR;
-    }
-
-    return emitFastPathImpl(this, jit, calleeGPR, callLinkInfoGPR, useDataIC, isTailCall(), nullptr);
</del><ins>+    return emitFastPathImpl(jit, calleeGPR, callLinkInfoGPR, useDataIC, nullptr);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-MacroAssembler::JumpList CallLinkInfo::emitTailCallFastPath(CCallHelpers& jit, GPRReg calleeGPR, WTF::Function<void()> prepareForTailCall)
</del><ins>+MacroAssembler::JumpList CallLinkInfo::emitTailCallFastPath(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC useDataIC, WTF::Function<void()> prepareForTailCall)
</ins><span class="cx"> {
</span><span class="cx">     RELEASE_ASSERT(isTailCall());
</span><del>-    setUsesDataICs(UseDataIC::No);
-    return emitFastPathImpl(this, jit, calleeGPR, InvalidGPRReg, UseDataIC::No, isTailCall(), WTFMove(prepareForTailCall));
</del><ins>+    return emitFastPathImpl(jit, calleeGPR, callLinkInfoGPR, useDataIC, WTFMove(prepareForTailCall));
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-MacroAssembler::JumpList CallLinkInfo::emitDataICFastPath(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR)
-{
-    RELEASE_ASSERT(callLinkInfoGPR != InvalidGPRReg);
-    return emitFastPathImpl(nullptr, jit, calleeGPR, callLinkInfoGPR, UseDataIC::Yes, false, nullptr);
-}
-
-MacroAssembler::JumpList CallLinkInfo::emitTailCallDataICFastPath(CCallHelpers& jit, GPRReg calleeGPR, GPRReg callLinkInfoGPR, WTF::Function<void()> prepareForTailCall)
-{
-    RELEASE_ASSERT(callLinkInfoGPR != InvalidGPRReg);
-    return emitFastPathImpl(nullptr, jit, calleeGPR, callLinkInfoGPR, UseDataIC::Yes, true, WTFMove(prepareForTailCall));
-}
-
</del><span class="cx"> void CallLinkInfo::emitSlowPath(VM& vm, CCallHelpers& jit)
</span><span class="cx"> {
</span><span class="cx">     setSlowPathCallDestination(vm.getCTIStub(linkCallThunkGenerator).template retaggedCode<JSEntryPtrTag>());
</span><span class="lines">@@ -381,26 +369,6 @@
</span><span class="cx">     jit.call(CCallHelpers::Address(GPRInfo::regT2, offsetOfSlowPathCallDestination()), JSEntryPtrTag);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void CallLinkInfo::emitDataICSlowPath(VM&, CCallHelpers& jit, GPRReg callLinkInfoGPR)
-{
-    jit.move(callLinkInfoGPR, GPRInfo::regT2);
-    jit.call(CCallHelpers::Address(GPRInfo::regT2, offsetOfSlowPathCallDestination()), JSEntryPtrTag);
-}
-
-void CallLinkInfo::initializeDataIC(VM& vm, UnlinkedCallLinkInfo& unlinkedCallLinkInfo, GPRReg calleeGPR, GPRReg callLinkInfoGPR)
-{
-    m_codeOrigin = CodeOrigin(unlinkedCallLinkInfo.bytecodeIndex);
-    setUpCall(unlinkedCallLinkInfo.callType, calleeGPR);
-    m_doneLocation = unlinkedCallLinkInfo.doneLocation;
-    if (unlinkedCallLinkInfo.frameShuffleData) {
-        // FIXME: It'd be nice if this were a refcounted data structure.
-        m_frameShuffleData = makeUnique<CallFrameShuffleData>(*unlinkedCallLinkInfo.frameShuffleData);
-    }
-    setUsesDataICs(UseDataIC::Yes);
-    u.dataIC.m_callLinkInfoGPR = callLinkInfoGPR;
-    setSlowPathCallDestination(vm.getCTIStub(linkCallThunkGenerator).template retaggedCode<JSEntryPtrTag>());
-}
-
</del><span class="cx"> void CallLinkInfo::emitDirectFastPath(CCallHelpers& jit)
</span><span class="cx"> {
</span><span class="cx">     RELEASE_ASSERT(!isTailCall());
</span><span class="lines">@@ -407,6 +375,11 @@
</span><span class="cx"> 
</span><span class="cx">     setUsesDataICs(UseDataIC::No);
</span><span class="cx"> 
</span><ins>+    auto fastPathStart = jit.label();
+    jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
+        m_fastPathStart = linkBuffer.locationOf<JSInternalPtrTag>(fastPathStart);
+    });
+
</ins><span class="cx">     auto call = jit.nearCall();
</span><span class="cx">     jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
</span><span class="cx">         u.codeIC.m_callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
</span><span class="lines">@@ -424,7 +397,7 @@
</span><span class="cx"> 
</span><span class="cx">     auto fastPathStart = jit.label();
</span><span class="cx">     jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
</span><del>-        u.codeIC.m_fastPathStart = linkBuffer.locationOf<JSInternalPtrTag>(fastPathStart);
</del><ins>+        m_fastPathStart = linkBuffer.locationOf<JSInternalPtrTag>(fastPathStart);
</ins><span class="cx">     });
</span><span class="cx"> 
</span><span class="cx">     // - If we're not yet linked, this is a jump to the slow path.
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeCallLinkInfoh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/CallLinkInfo.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/CallLinkInfo.h      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/CallLinkInfo.h 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -43,11 +43,9 @@
</span><span class="cx"> enum OpcodeID : unsigned;
</span><span class="cx"> struct CallFrameShuffleData;
</span><span class="cx"> 
</span><del>-struct UnlinkedCallLinkInfo;
-
</del><span class="cx"> class CallLinkInfo : public PackedRawSentinelNode<CallLinkInfo> {
</span><span class="cx"> public:
</span><del>-    enum CallType : uint8_t {
</del><ins>+    enum CallType {
</ins><span class="cx">         None,
</span><span class="cx">         Call,
</span><span class="cx">         CallVarargs,
</span><span class="lines">@@ -167,8 +165,6 @@
</span><span class="cx">         m_calleeGPR = calleeGPR;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    void initializeDataIC(VM&, UnlinkedCallLinkInfo&, GPRReg calleeGPR, GPRReg callLinkInfoGPR);
-
</del><span class="cx">     GPRReg calleeGPR() const { return m_calleeGPR; }
</span><span class="cx">     
</span><span class="cx">     enum class UseDataIC : uint8_t {
</span><span class="lines">@@ -177,16 +173,13 @@
</span><span class="cx">     };
</span><span class="cx"> 
</span><span class="cx"> private:
</span><del>-    static MacroAssembler::JumpList emitFastPathImpl(CallLinkInfo*, CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC, bool isTailCall, WTF::Function<void()> prepareForTailCall) WARN_UNUSED_RETURN;
</del><ins>+    MacroAssembler::JumpList emitFastPathImpl(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC, WTF::Function<void()> prepareForTailCall) WARN_UNUSED_RETURN;
</ins><span class="cx"> public:
</span><del>-    static MacroAssembler::JumpList emitDataICFastPath(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR) WARN_UNUSED_RETURN;
-    static MacroAssembler::JumpList emitTailCallDataICFastPath(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR, WTF::Function<void()> prepareForTailCall) WARN_UNUSED_RETURN;
</del><span class="cx">     MacroAssembler::JumpList emitFastPath(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC) WARN_UNUSED_RETURN;
</span><del>-    MacroAssembler::JumpList emitTailCallFastPath(CCallHelpers&, GPRReg calleeGPR, WTF::Function<void()> prepareForTailCall) WARN_UNUSED_RETURN;
</del><ins>+    MacroAssembler::JumpList emitTailCallFastPath(CCallHelpers&, GPRReg calleeGPR, GPRReg callLinkInfoGPR, UseDataIC, WTF::Function<void()> prepareForTailCall) WARN_UNUSED_RETURN;
</ins><span class="cx">     void emitDirectFastPath(CCallHelpers&);
</span><span class="cx">     void emitDirectTailCallFastPath(CCallHelpers&, WTF::Function<void()> prepareForTailCall);
</span><span class="cx">     void emitSlowPath(VM&, CCallHelpers&);
</span><del>-    static void emitDataICSlowPath(VM&, CCallHelpers&, GPRReg callLinkInfoGPR);
</del><span class="cx">     void revertCallToStub();
</span><span class="cx"> 
</span><span class="cx">     bool isDataIC() const { return static_cast<UseDataIC>(m_useDataIC) == UseDataIC::Yes; }
</span><span class="lines">@@ -210,6 +203,7 @@
</span><span class="cx">         m_allowStubs = false;
</span><span class="cx">     }
</span><span class="cx"> 
</span><ins>+    CodeLocationLabel<JSInternalPtrTag> fastPathStart();
</ins><span class="cx">     CodeLocationLabel<JSInternalPtrTag> slowPathStart();
</span><span class="cx">     CodeLocationLabel<JSInternalPtrTag> doneLocation();
</span><span class="cx"> 
</span><span class="lines">@@ -249,6 +243,11 @@
</span><span class="cx">         m_slowStub = nullptr;
</span><span class="cx">     }
</span><span class="cx"> 
</span><ins>+    JITStubRoutine* slowStub()
+    {
+        return m_slowStub.get();
+    }
+
</ins><span class="cx">     bool seenOnce()
</span><span class="cx">     {
</span><span class="cx">         return m_hasSeenShouldRepatch;
</span><span class="lines">@@ -309,9 +308,9 @@
</span><span class="cx">         return static_cast<CallType>(m_callType);
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    static ptrdiff_t offsetOfMaxArgumentCountIncludingThis()
</del><ins>+    uint32_t* addressOfMaxArgumentCountIncludingThis()
</ins><span class="cx">     {
</span><del>-        return OBJECT_OFFSETOF(CallLinkInfo, m_maxArgumentCountIncludingThis);
</del><ins>+        return &m_maxArgumentCountIncludingThis;
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     uint32_t maxArgumentCountIncludingThis()
</span><span class="lines">@@ -382,9 +381,7 @@
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx"> private:
</span><del>-
-    CodeLocationLabel<JSInternalPtrTag> fastPathStart();
-
</del><ins>+    CodeLocationLabel<JSInternalPtrTag> m_fastPathStart;
</ins><span class="cx">     CodeLocationLabel<JSInternalPtrTag> m_doneLocation;
</span><span class="cx">     MacroAssemblerCodePtr<JSEntryPtrTag> m_slowPathCallDestination;
</span><span class="cx">     union UnionType {
</span><span class="lines">@@ -400,7 +397,6 @@
</span><span class="cx">             CodeLocationNearCall<JSInternalPtrTag> m_callLocation;
</span><span class="cx">             CodeLocationDataLabelPtr<JSInternalPtrTag> m_calleeLocation;
</span><span class="cx">             CodeLocationLabel<JSInternalPtrTag> m_slowPathStart;
</span><del>-            CodeLocationLabel<JSInternalPtrTag> m_fastPathStart;
</del><span class="cx">         } codeIC;
</span><span class="cx">     } u;
</span><span class="cx"> 
</span><span class="lines">@@ -428,13 +424,6 @@
</span><span class="cx">     return callLinkInfo.codeOrigin();
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-struct UnlinkedCallLinkInfo {
-    BytecodeIndex bytecodeIndex; // Currently, only used by baseline, so this can trivially produce a CodeOrigin.
-    CallLinkInfo::CallType callType;
-    CodeLocationLabel<JSInternalPtrTag> doneLocation;
-    std::unique_ptr<CallFrameShuffleData> frameShuffleData;
-};
-
</del><span class="cx"> #endif // ENABLE(JIT)
</span><span class="cx"> 
</span><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeCodeBlockcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/CodeBlock.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/CodeBlock.cpp       2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/CodeBlock.cpp  2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -279,6 +279,8 @@
</span><span class="cx">     , m_didFailJITCompilation(false)
</span><span class="cx">     , m_didFailFTLCompilation(false)
</span><span class="cx">     , m_hasBeenCompiledWithFTL(false)
</span><ins>+    , m_hasLinkedOSRExit(false)
+    , m_isEligibleForLLIntDowngrade(false)
</ins><span class="cx">     , m_numCalleeLocals(other.m_numCalleeLocals)
</span><span class="cx">     , m_numVars(other.m_numVars)
</span><span class="cx">     , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
</span><span class="lines">@@ -306,8 +308,6 @@
</span><span class="cx"> 
</span><span class="cx">     ASSERT(source().provider());
</span><span class="cx">     setNumParameters(other.numParameters());
</span><del>-
-    m_llintExecuteCounter = &m_unlinkedCode->llintExecuteCounter();
</del><span class="cx">     
</span><span class="cx">     vm.heap.codeBlockSet().add(this);
</span><span class="cx"> }
</span><span class="lines">@@ -318,6 +318,7 @@
</span><span class="cx">     finishCreationCommon(vm);
</span><span class="cx"> 
</span><span class="cx">     optimizeAfterWarmUp();
</span><ins>+    jitAfterWarmUp();
</ins><span class="cx"> 
</span><span class="cx">     if (other.m_rareData) {
</span><span class="cx">         createRareDataIfNecessary();
</span><span class="lines">@@ -335,6 +336,8 @@
</span><span class="cx">     , m_didFailJITCompilation(false)
</span><span class="cx">     , m_didFailFTLCompilation(false)
</span><span class="cx">     , m_hasBeenCompiledWithFTL(false)
</span><ins>+    , m_hasLinkedOSRExit(false)
+    , m_isEligibleForLLIntDowngrade(false) 
</ins><span class="cx">     , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
</span><span class="cx">     , m_numVars(unlinkedCodeBlock->numVars())
</span><span class="cx">     , m_hasDebuggerStatement(false)
</span><span class="lines">@@ -356,9 +359,7 @@
</span><span class="cx"> 
</span><span class="cx">     ASSERT(source().provider());
</span><span class="cx">     setNumParameters(unlinkedCodeBlock->numParameters());
</span><del>-
-    m_llintExecuteCounter = &m_unlinkedCode->llintExecuteCounter();
-
</del><ins>+    
</ins><span class="cx">     vm.heap.codeBlockSet().add(this);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -369,8 +370,7 @@
</span><span class="cx"> // we can't generate during unlinked bytecode generation. This process is not allowed to generate control
</span><span class="cx"> // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for
</span><span class="cx"> // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis
</span><del>-// inside UnlinkedCodeBlock. Also, Baseline JIT code is shared between all CodeBlocks of an UnlinkedCodeBlock,
-// so the bytecode must remain the same between CodeBlocks sharing an UnlinkedCodeBlock.
</del><ins>+// inside UnlinkedCodeBlock.
</ins><span class="cx"> bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock,
</span><span class="cx">     JSScope* scope)
</span><span class="cx"> {
</span><span class="lines">@@ -533,6 +533,15 @@
</span><span class="cx">         LINK(OpCreatePromise)
</span><span class="cx">         LINK(OpCreateGenerator)
</span><span class="cx"> 
</span><ins>+        LINK(OpAdd)
+        LINK(OpMul)
+        LINK(OpDiv)
+        LINK(OpSub)
+
+        LINK(OpNegate)
+        LINK(OpInc)
+        LINK(OpDec)
+
</ins><span class="cx">         LINK(OpJneqPtr)
</span><span class="cx"> 
</span><span class="cx">         LINK(OpCatch)
</span><span class="lines">@@ -760,6 +769,7 @@
</span><span class="cx">     // rely on the instruction count (and are in theory permitted to also inspect the
</span><span class="cx">     // instruction stream to more accurate assess the cost of tier-up).
</span><span class="cx">     optimizeAfterWarmUp();
</span><ins>+    jitAfterWarmUp();
</ins><span class="cx"> 
</span><span class="cx">     // If the concurrent thread will want the code block's hash, then compute it here
</span><span class="cx">     // synchronously.
</span><span class="lines">@@ -783,78 +793,6 @@
</span><span class="cx">     m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this));
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-#if ENABLE(JIT)
-void CodeBlock::setupWithUnlinkedBaselineCode(Ref<BaselineJITCode> jitCode)
-{
-    setJITCode(jitCode.copyRef());
-
-    {
-        const auto& jitCodeMap = this->jitCodeMap();
-        for (size_t i = 0; i < numberOfExceptionHandlers(); ++i) {
-            HandlerInfo& handler = exceptionHandler(i);
-            // FIXME: <rdar://problem/39433318>.
-            handler.nativeCode = jitCodeMap.find(BytecodeIndex(handler.target)).retagged<ExceptionHandlerPtrTag>();
-        }
-    }
-
-    {
-        ConcurrentJSLocker locker(m_lock);
-        auto& jitData = ensureJITData(locker);
-
-        RELEASE_ASSERT(jitData.m_jitConstantPool.isEmpty());
-        jitData.m_jitConstantPool = FixedVector<void*>(jitCode->m_constantPool.size());
-        for (size_t i = 0; i < jitCode->m_constantPool.size(); ++i) {
-            auto entry = jitCode->m_constantPool.at(i);
-            switch (entry.type) {
-            case JITConstantPool::Type::GlobalObject:
-                jitData.m_jitConstantPool[i] = m_globalObject.get();
-                break;
-            case JITConstantPool::Type::CallLinkInfo: {
-                UnlinkedCallLinkInfo& unlinkedCallLinkInfo = *static_cast<UnlinkedCallLinkInfo*>(entry.payload.get());
-                CallLinkInfo* callLinkInfo = jitData.m_callLinkInfos.add(CodeOrigin(unlinkedCallLinkInfo.bytecodeIndex));
-                callLinkInfo->initializeDataIC(vm(), unlinkedCallLinkInfo, GPRInfo::regT0, GPRInfo::regT2);
-                jitData.m_jitConstantPool[i] = callLinkInfo;
-                break;
-            }
-            case JITConstantPool::Type::StructureStubInfo: {
-                UnlinkedStructureStubInfo& unlinkedStubInfo = *static_cast<UnlinkedStructureStubInfo*>(entry.payload.get());
-                StructureStubInfo* stubInfo = jitData.m_stubInfos.add(unlinkedStubInfo.accessType, CodeOrigin(unlinkedStubInfo.bytecodeIndex));
-                stubInfo->initializeFromUnlinkedStructureStubInfo(this, unlinkedStubInfo);
-                jitData.m_jitConstantPool[i] = stubInfo;
-                break;
-            }
-            case JITConstantPool::Type::FunctionDecl: {
-                unsigned index = bitwise_cast<uintptr_t>(entry.payload.get());
-                jitData.m_jitConstantPool[i] = functionDecl(index);
-                break;
-            }
-            case JITConstantPool::Type::FunctionExpr: {
-                unsigned index = bitwise_cast<uintptr_t>(entry.payload.get());
-                jitData.m_jitConstantPool[i] = functionExpr(index);
-                break;
-            }
-            }
-        }
-    }
-
-    switch (codeType()) {
-    case GlobalCode:
-    case ModuleCode:
-    case EvalCode:
-        m_shouldAlwaysBeInlined = false;
-        break;
-    case FunctionCode:
-        // We could have already set it to false because we detected an uninlineable call.
-        // Don't override that observation.
-        m_shouldAlwaysBeInlined &= canInline(capabilityLevel()) && DFG::mightInlineFunction(this);
-        break;
-    }
-
-    if (jitCode->m_isShareable && !unlinkedCodeBlock()->m_unlinkedBaselineCode && Options::useBaselineJITCodeSharing())
-        unlinkedCodeBlock()->m_unlinkedBaselineCode = WTFMove(jitCode);
-}
-#endif // ENABLE(JIT)
-
</del><span class="cx"> CodeBlock::~CodeBlock()
</span><span class="cx"> {
</span><span class="cx">     VM& vm = *m_vm;
</span><span class="lines">@@ -935,27 +873,6 @@
</span><span class="cx"> #endif // ENABLE(JIT)
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-bool CodeBlock::isConstantOwnedByUnlinkedCodeBlock(VirtualRegister reg) const
-{
-    // This needs to correspond to what we do inside setConstantRegisters.
-    switch (unlinkedCodeBlock()->constantSourceCodeRepresentation(reg)) {
-    case SourceCodeRepresentation::Integer:
-    case SourceCodeRepresentation::Double:
-        return true;
-    case SourceCodeRepresentation::Other: {
-        JSValue value = unlinkedCodeBlock()->getConstant(reg);
-        if (!value || !value.isCell())
-            return true;
-        JSCell* cell = value.asCell();
-        if (cell->inherits<SymbolTable>(vm()) || cell->inherits<JSTemplateObjectDescriptor>(vm()))
-            return false;
-        return true;
-    }
-    case SourceCodeRepresentation::LinkTimeConstant:
-        return false;
-    }
-}
-
</del><span class="cx"> Vector<unsigned> CodeBlock::setConstantRegisters(const FixedVector<WriteBarrier<Unknown>>& constants, const FixedVector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
</span><span class="cx"> {
</span><span class="cx">     VM& vm = *m_vm;
</span><span class="lines">@@ -975,7 +892,6 @@
</span><span class="cx">         switch (representation) {
</span><span class="cx">         case SourceCodeRepresentation::LinkTimeConstant:
</span><span class="cx">             constant = globalObject->linkTimeConstant(static_cast<LinkTimeConstant>(constant.asInt32AsAnyInt()));
</span><del>-            ASSERT(constant.isCell()); // Unlinked Baseline JIT requires this.
</del><span class="cx">             break;
</span><span class="cx">         case SourceCodeRepresentation::Other:
</span><span class="cx">         case SourceCodeRepresentation::Integer:
</span><span class="lines">@@ -1598,7 +1514,7 @@
</span><span class="cx">     return *m_jitData;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void CodeBlock::finalizeJITInlineCaches()
</del><ins>+void CodeBlock::finalizeBaselineJITInlineCaches()
</ins><span class="cx"> {
</span><span class="cx">     if (auto* jitData = m_jitData.get()) {
</span><span class="cx">         for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
</span><span class="lines">@@ -1609,20 +1525,6 @@
</span><span class="cx">             stubInfo->visitWeakReferences(locker, this);
</span><span class="cx">         }
</span><span class="cx">     }
</span><del>-
-
-#if ASSERT_ENABLED
-    if (jitType() == JITType::BaselineJIT) {
-        // Verify we don't need to finalize these since they're virtual calls.
-        for (CallLinkInfo* callLinkInfo : static_cast<BaselineJITCode*>(m_jitCode.get())->m_evalCallLinkInfos) {
-            ASSERT(!callLinkInfo->isLinked());
-            callLinkInfo->forEachDependentCell([] (JSCell*) {
-                ASSERT_NOT_REACHED();
-            });
-        }
-
-    }
-#endif
</del><span class="cx"> }
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><span class="lines">@@ -1632,12 +1534,46 @@
</span><span class="cx"> 
</span><span class="cx">     updateAllPredictions();
</span><span class="cx"> 
</span><ins>+#if ENABLE(JIT)
+    bool isEligibleForLLIntDowngrade = m_isEligibleForLLIntDowngrade;
+    m_isEligibleForLLIntDowngrade = false;
+    // If BaselineJIT code is not executing, and an optimized replacement exists, we attempt
+    // to discard baseline JIT code and reinstall LLInt code to save JIT memory.
+    if (Options::useLLInt() && !m_hasLinkedOSRExit && jitType() == JITType::BaselineJIT && !m_vm->heap.codeBlockSet().isCurrentlyExecuting(this)) {
+        if (CodeBlock* optimizedCodeBlock = optimizedReplacement()) {
+            if (!optimizedCodeBlock->m_osrExitCounter) {
+                if (isEligibleForLLIntDowngrade) {
+                    m_jitCode = nullptr;
+                    LLInt::setEntrypoint(this);
+                    RELEASE_ASSERT(jitType() == JITType::InterpreterThunk);
+
+                    for (size_t i = 0; i < m_unlinkedCode->numberOfExceptionHandlers(); i++) {
+                        const UnlinkedHandlerInfo& unlinkedHandler = m_unlinkedCode->exceptionHandler(i);
+                        HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+                        auto& instruction = *instructions().at(unlinkedHandler.target).ptr();
+                        handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(LLInt::handleCatch(instruction.width()).code()));
+                    }
+
+                    unlinkIncomingCalls();
+
+                    // It's safe to clear these out here because in finalizeUnconditionally all compiler threads
+                    // are safepointed, meaning they're running either before or after bytecode parser, and bytecode
+                    // parser is the only data structure pointing into the various *infos.
+                    resetJITData();
+                } else
+                    m_isEligibleForLLIntDowngrade = true;
+            }
+        }
+    }
+
+#endif
+    
</ins><span class="cx">     if (JITCode::couldBeInterpreted(jitType()))
</span><span class="cx">         finalizeLLIntInlineCaches();
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(JIT)
</span><span class="cx">     if (!!jitCode())
</span><del>-        finalizeJITInlineCaches();
</del><ins>+        finalizeBaselineJITInlineCaches();
</ins><span class="cx"> #endif
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="lines">@@ -1658,7 +1594,7 @@
</span><span class="cx">         case JITType::HostCallThunk:
</span><span class="cx">             return;
</span><span class="cx">         case JITType::InterpreterThunk:
</span><del>-            count = m_llintExecuteCounter->count();
</del><ins>+            count = m_llintExecuteCounter.count();
</ins><span class="cx">             break;
</span><span class="cx">         case JITType::BaselineJIT:
</span><span class="cx">             count = m_jitExecuteCounter.count();
</span><span class="lines">@@ -1739,6 +1675,30 @@
</span><span class="cx">     return ensureJITData(locker).m_stubInfos.add(accessType, codeOrigin);
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+JITAddIC* CodeBlock::addJITAddIC(BinaryArithProfile* arithProfile)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return ensureJITData(locker).m_addICs.add(arithProfile);
+}
+
+JITMulIC* CodeBlock::addJITMulIC(BinaryArithProfile* arithProfile)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return ensureJITData(locker).m_mulICs.add(arithProfile);
+}
+
+JITSubIC* CodeBlock::addJITSubIC(BinaryArithProfile* arithProfile)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return ensureJITData(locker).m_subICs.add(arithProfile);
+}
+
+JITNegIC* CodeBlock::addJITNegIC(UnaryArithProfile* arithProfile)
+{
+    ConcurrentJSLocker locker(m_lock);
+    return ensureJITData(locker).m_negICs.add(arithProfile);
+}
+
</ins><span class="cx"> StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
</span><span class="cx"> {
</span><span class="cx">     ConcurrentJSLocker locker(m_lock);
</span><span class="lines">@@ -3173,7 +3133,7 @@
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(JIT)
</span><span class="cx">     case JITType::BaselineJIT:
</span><del>-        return JIT::frameRegisterCountFor(this->unlinkedCodeBlock());
</del><ins>+        return JIT::frameRegisterCountFor(this);
</ins><span class="cx"> #endif // ENABLE(JIT)
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="lines">@@ -3389,13 +3349,13 @@
</span><span class="cx"> {
</span><span class="cx">     switch (pc->opcodeID()) {
</span><span class="cx">     case op_add:
</span><del>-        return &unlinkedCodeBlock()->binaryArithProfile(pc->as<OpAdd>().m_profileIndex);
</del><ins>+        return &pc->as<OpAdd>().metadata(this).m_arithProfile;
</ins><span class="cx">     case op_mul:
</span><del>-        return &unlinkedCodeBlock()->binaryArithProfile(pc->as<OpMul>().m_profileIndex);
</del><ins>+        return &pc->as<OpMul>().metadata(this).m_arithProfile;
</ins><span class="cx">     case op_sub:
</span><del>-        return &unlinkedCodeBlock()->binaryArithProfile(pc->as<OpSub>().m_profileIndex);
</del><ins>+        return &pc->as<OpSub>().metadata(this).m_arithProfile;
</ins><span class="cx">     case op_div:
</span><del>-        return &unlinkedCodeBlock()->binaryArithProfile(pc->as<OpDiv>().m_profileIndex);
</del><ins>+        return &pc->as<OpDiv>().metadata(this).m_arithProfile;
</ins><span class="cx">     default:
</span><span class="cx">         break;
</span><span class="cx">     }
</span><span class="lines">@@ -3407,11 +3367,11 @@
</span><span class="cx"> {
</span><span class="cx">     switch (pc->opcodeID()) {
</span><span class="cx">     case op_negate:
</span><del>-        return &unlinkedCodeBlock()->unaryArithProfile(pc->as<OpNegate>().m_profileIndex);
</del><ins>+        return &pc->as<OpNegate>().metadata(this).m_arithProfile;
</ins><span class="cx">     case op_inc:
</span><del>-        return &unlinkedCodeBlock()->unaryArithProfile(pc->as<OpInc>().m_profileIndex);
</del><ins>+        return &pc->as<OpInc>().metadata(this).m_arithProfile;
</ins><span class="cx">     case op_dec:
</span><del>-        return &unlinkedCodeBlock()->unaryArithProfile(pc->as<OpDec>().m_profileIndex);
</del><ins>+        return &pc->as<OpDec>().metadata(this).m_arithProfile;
</ins><span class="cx">     default:
</span><span class="cx">         break;
</span><span class="cx">     }
</span><span class="lines">@@ -3512,16 +3472,22 @@
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(JIT)
</span><ins>+void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) 
+{ 
+    ConcurrentJSLocker locker(m_lock);
+    ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
+}
+
</ins><span class="cx"> std::optional<CodeOrigin> CodeBlock::findPC(void* pc)
</span><span class="cx"> {
</span><del>-    if (auto* pcToCodeOriginMap = m_jitCode->pcToCodeOriginMap()) {
-        if (std::optional<CodeOrigin> codeOrigin = pcToCodeOriginMap->findPC(pc))
-            return codeOrigin;
-    }
-
</del><span class="cx">     {
</span><span class="cx">         ConcurrentJSLocker locker(m_lock);
</span><span class="cx">         if (auto* jitData = m_jitData.get()) {
</span><ins>+            if (jitData->m_pcToCodeOriginMap) {
+                if (std::optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
+                    return codeOrigin;
+            }
+
</ins><span class="cx">             for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
</span><span class="cx">                 if (stubInfo->containsPC(pc))
</span><span class="cx">                     return std::optional<CodeOrigin>(stubInfo->codeOrigin);
</span><span class="lines">@@ -3555,16 +3521,30 @@
</span><span class="cx">     return bytecodeIndex;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void CodeBlock::jitSoon()
</del><ins>+int32_t CodeBlock::thresholdForJIT(int32_t threshold)
</ins><span class="cx"> {
</span><del>-    m_llintExecuteCounter->setNewThreshold(unlinkedCodeBlock()->thresholdForJIT(Options::thresholdForJITSoon()), this);
</del><ins>+    switch (unlinkedCodeBlock()->didOptimize()) {
+    case TriState::Indeterminate:
+        return threshold;
+    case TriState::False:
+        return threshold * 4;
+    case TriState::True:
+        return threshold / 2;
+    }
+    ASSERT_NOT_REACHED();
+    return threshold;
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-void CodeBlock::jitNextInvocation()
</del><ins>+void CodeBlock::jitAfterWarmUp()
</ins><span class="cx"> {
</span><del>-    m_llintExecuteCounter->setNewThreshold(0, this);
</del><ins>+    m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><ins>+void CodeBlock::jitSoon()
+{
+    m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this);
+}
+
</ins><span class="cx"> bool CodeBlock::hasInstalledVMTrapBreakpoints() const
</span><span class="cx"> {
</span><span class="cx"> #if ENABLE(SIGNAL_BASED_VM_TRAPS)
</span><span class="lines">@@ -3608,8 +3588,28 @@
</span><span class="cx">     double numSubs = 0.0;
</span><span class="cx">     double totalSubSize = 0.0;
</span><span class="cx"> 
</span><del>-    auto countICs = [&] (CodeBlock*) {
-        // FIXME: We need to re-implement this using JITCode.
</del><ins>+    auto countICs = [&] (CodeBlock* codeBlock) {
+        if (auto* jitData = codeBlock->m_jitData.get()) {
+            for (JITAddIC* addIC : jitData->m_addICs) {
+                numAdds++;
+                totalAddSize += addIC->codeSize();
+            }
+
+            for (JITMulIC* mulIC : jitData->m_mulICs) {
+                numMuls++;
+                totalMulSize += mulIC->codeSize();
+            }
+
+            for (JITNegIC* negIC : jitData->m_negICs) {
+                numNegs++;
+                totalNegSize += negIC->codeSize();
+            }
+
+            for (JITSubIC* subIC : jitData->m_subICs) {
+                numSubs++;
+                totalSubSize += subIC->codeSize();
+            }
+        }
</ins><span class="cx">     };
</span><span class="cx">     heap()->forEachCodeBlock(countICs);
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeCodeBlockh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/CodeBlock.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/CodeBlock.h 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/CodeBlock.h    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -94,9 +94,9 @@
</span><span class="cx"> class LLIntOffsetsExtractor;
</span><span class="cx"> class LLIntPrototypeLoadAdaptiveStructureWatchpoint;
</span><span class="cx"> class MetadataTable;
</span><ins>+class PCToCodeOriginMap;
</ins><span class="cx"> class RegisterAtOffsetList;
</span><span class="cx"> class StructureStubInfo;
</span><del>-class BaselineJITCode;
</del><span class="cx"> 
</span><span class="cx"> DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(CodeBlockRareData);
</span><span class="cx"> 
</span><span class="lines">@@ -158,9 +158,7 @@
</span><span class="cx">     MetadataTable* metadataTable() const { return m_metadata.get(); }
</span><span class="cx"> 
</span><span class="cx">     unsigned numParameters() const { return m_numParameters; }
</span><del>-private:
</del><span class="cx">     void setNumParameters(unsigned newValue);
</span><del>-public:
</del><span class="cx"> 
</span><span class="cx">     unsigned numberOfArgumentsToSkip() const { return m_numberOfArgumentsToSkip; }
</span><span class="cx"> 
</span><span class="lines">@@ -169,6 +167,7 @@
</span><span class="cx">     unsigned numVars() const { return m_numVars; }
</span><span class="cx">     unsigned numTmps() const { return m_unlinkedCode->hasCheckpoints() * maxNumCheckpointTmps; }
</span><span class="cx"> 
</span><ins>+    unsigned* addressOfNumParameters() { return &m_numParameters; }
</ins><span class="cx">     static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
</span><span class="cx"> 
</span><span class="cx">     CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); }
</span><span class="lines">@@ -233,6 +232,16 @@
</span><span class="cx">     bool hasInstalledVMTrapBreakpoints() const;
</span><span class="cx">     bool installVMTrapBreakpoints();
</span><span class="cx"> 
</span><ins>+    inline bool isKnownCell(VirtualRegister reg)
+    {
+        // FIXME: Consider adding back the optimization where we return true if `reg` is `this` and we're in sloppy mode.
+        // https://bugs.webkit.org/show_bug.cgi?id=210145
+        if (reg.isConstant())
+            return getConstant(reg).isCell();
+
+        return false;
+    }
+
</ins><span class="cx">     ALWAYS_INLINE bool isTemporaryRegister(VirtualRegister reg)
</span><span class="cx">     {
</span><span class="cx">         return reg.offset() >= static_cast<int>(m_numVars);
</span><span class="lines">@@ -259,21 +268,23 @@
</span><span class="cx"> #if ENABLE(JIT)
</span><span class="cx">     struct JITData {
</span><span class="cx">         WTF_MAKE_STRUCT_FAST_ALLOCATED;
</span><del>-        friend class LLIntOffsetsExtractor;
</del><span class="cx"> 
</span><span class="cx">         Bag<StructureStubInfo> m_stubInfos;
</span><ins>+        Bag<JITAddIC> m_addICs;
+        Bag<JITMulIC> m_mulICs;
+        Bag<JITNegIC> m_negICs;
+        Bag<JITSubIC> m_subICs;
</ins><span class="cx">         Bag<CallLinkInfo> m_callLinkInfos;
</span><span class="cx">         SentinelLinkedList<CallLinkInfo, PackedRawSentinelNode<CallLinkInfo>> m_incomingCalls;
</span><span class="cx">         SentinelLinkedList<PolymorphicCallNode, PackedRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
</span><ins>+        FixedVector<SimpleJumpTable> m_switchJumpTables;
+        FixedVector<StringJumpTable> m_stringSwitchJumpTables;
+        std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
</ins><span class="cx">         bool m_hasCalleeSaveRegisters { false };
</span><span class="cx">         RegisterAtOffsetList m_calleeSaveRegisters;
</span><del>-
-        FixedVector<void*> m_jitConstantPool;
-        static ptrdiff_t offsetOfJITConstantPool() { return OBJECT_OFFSETOF(JITData, m_jitConstantPool); }
</del><ins>+        JITCodeMap m_jitCodeMap;
</ins><span class="cx">     };
</span><span class="cx"> 
</span><del>-    void setupWithUnlinkedBaselineCode(Ref<BaselineJITCode>);
-
</del><span class="cx">     JITData& ensureJITData(const ConcurrentJSLocker& locker)
</span><span class="cx">     {
</span><span class="cx">         if (LIKELY(m_jitData))
</span><span class="lines">@@ -282,8 +293,23 @@
</span><span class="cx">     }
</span><span class="cx">     JITData& ensureJITDataSlow(const ConcurrentJSLocker&);
</span><span class="cx"> 
</span><del>-    static ptrdiff_t offsetOfJITData() { return OBJECT_OFFSETOF(CodeBlock, m_jitData); }
</del><ins>+    JITAddIC* addJITAddIC(BinaryArithProfile*);
+    JITMulIC* addJITMulIC(BinaryArithProfile*);
+    JITNegIC* addJITNegIC(UnaryArithProfile*);
+    JITSubIC* addJITSubIC(BinaryArithProfile*);
</ins><span class="cx"> 
</span><ins>+    template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type>
+    JITAddIC* addMathIC(BinaryArithProfile* profile) { return addJITAddIC(profile); }
+
+    template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type>
+    JITMulIC* addMathIC(BinaryArithProfile* profile) { return addJITMulIC(profile); }
+
+    template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type>
+    JITNegIC* addMathIC(UnaryArithProfile* profile) { return addJITNegIC(profile); }
+
+    template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type>
+    JITSubIC* addMathIC(BinaryArithProfile* profile) { return addJITSubIC(profile); }
+
</ins><span class="cx">     StructureStubInfo* addStubInfo(AccessType, CodeOrigin);
</span><span class="cx"> 
</span><span class="cx">     // O(n) operation. Use getICStatusMap() unless you really only intend to get one stub info.
</span><span class="lines">@@ -296,8 +322,18 @@
</span><span class="cx">     // looking for a CallLinkInfoMap to amortize the cost of calling this.
</span><span class="cx">     CallLinkInfo* getCallLinkInfoForBytecodeIndex(BytecodeIndex);
</span><span class="cx">     
</span><del>-    const JITCodeMap& jitCodeMap();
</del><ins>+    void setJITCodeMap(JITCodeMap&& jitCodeMap)
+    {
+        ConcurrentJSLocker locker(m_lock);
+        ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap);
+    }
+    const JITCodeMap& jitCodeMap()
+    {
+        ConcurrentJSLocker locker(m_lock);
+        return ensureJITData(locker).m_jitCodeMap;
+    }
</ins><span class="cx"> 
</span><ins>+    void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
</ins><span class="cx">     std::optional<CodeOrigin> findPC(void* pc);
</span><span class="cx"> 
</span><span class="cx">     void setCalleeSaveRegisters(RegisterSet);
</span><span class="lines">@@ -436,7 +472,6 @@
</span><span class="cx"> 
</span><span class="cx">     String nameForRegister(VirtualRegister);
</span><span class="cx"> 
</span><del>-    static ptrdiff_t offsetOfArgumentValueProfiles() { return OBJECT_OFFSETOF(CodeBlock, m_argumentValueProfiles); }
</del><span class="cx">     unsigned numberOfArgumentValueProfiles()
</span><span class="cx">     {
</span><span class="cx">         ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters) || !Options::useJIT());
</span><span class="lines">@@ -534,10 +569,8 @@
</span><span class="cx">     const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
</span><span class="cx">     WriteBarrier<Unknown>& constantRegister(VirtualRegister reg) { return m_constantRegisters[reg.toConstantIndex()]; }
</span><span class="cx">     ALWAYS_INLINE JSValue getConstant(VirtualRegister reg) const { return m_constantRegisters[reg.toConstantIndex()].get(); }
</span><del>-    bool isConstantOwnedByUnlinkedCodeBlock(VirtualRegister) const;
</del><span class="cx">     ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(VirtualRegister reg) const { return m_unlinkedCode->constantSourceCodeRepresentation(reg); }
</span><span class="cx">     ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(unsigned index) const { return m_unlinkedCode->constantSourceCodeRepresentation(index); }
</span><del>-    static ptrdiff_t offsetOfConstantsVectorBuffer() { return OBJECT_OFFSETOF(CodeBlock, m_constantRegisters) + decltype(m_constantRegisters)::dataMemoryOffset(); }
</del><span class="cx"> 
</span><span class="cx">     FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
</span><span class="cx">     int numberOfFunctionDecls() { return m_functionDecls.size(); }
</span><span class="lines">@@ -562,22 +595,22 @@
</span><span class="cx">     // Jump Tables
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(JIT)
</span><del>-    SimpleJumpTable& baselineSwitchJumpTable(int tableIndex);
-    StringJumpTable& baselineStringSwitchJumpTable(int tableIndex);
-    void* baselineJITConstantPool()
</del><ins>+    SimpleJumpTable& switchJumpTable(int tableIndex)
</ins><span class="cx">     {
</span><del>-        RELEASE_ASSERT(m_jitData && jitType() == JITType::BaselineJIT);
-        return m_jitData->m_jitConstantPool.data();
</del><ins>+        RELEASE_ASSERT(m_jitData);
+        return m_jitData->m_switchJumpTables[tableIndex];
</ins><span class="cx">     }
</span><span class="cx"> #endif
</span><span class="cx">     size_t numberOfUnlinkedSwitchJumpTables() const { return m_unlinkedCode->numberOfUnlinkedSwitchJumpTables(); }
</span><span class="cx">     const UnlinkedSimpleJumpTable& unlinkedSwitchJumpTable(int tableIndex) { return m_unlinkedCode->unlinkedSwitchJumpTable(tableIndex); }
</span><span class="cx"> 
</span><del>-#if ENABLE(DFG_JIT)
-    StringJumpTable& dfgStringSwitchJumpTable(int tableIndex);
-    SimpleJumpTable& dfgSwitchJumpTable(int tableIndex);
</del><ins>+#if ENABLE(JIT)
+    StringJumpTable& stringSwitchJumpTable(int tableIndex)
+    {
+        RELEASE_ASSERT(m_jitData);
+        return m_jitData->m_stringSwitchJumpTables[tableIndex];
+    }
</ins><span class="cx"> #endif
</span><del>-
</del><span class="cx">     size_t numberOfUnlinkedStringSwitchJumpTables() const { return m_unlinkedCode->numberOfUnlinkedStringSwitchJumpTables(); }
</span><span class="cx">     const UnlinkedStringJumpTable& unlinkedStringSwitchJumpTable(int tableIndex) { return m_unlinkedCode->unlinkedStringSwitchJumpTable(tableIndex); }
</span><span class="cx"> 
</span><span class="lines">@@ -599,20 +632,21 @@
</span><span class="cx"> 
</span><span class="cx">     bool checkIfJITThresholdReached()
</span><span class="cx">     {
</span><del>-        return m_llintExecuteCounter->checkIfThresholdCrossedAndSet(this);
</del><ins>+        return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     void dontJITAnytimeSoon()
</span><span class="cx">     {
</span><del>-        m_llintExecuteCounter->deferIndefinitely();
</del><ins>+        m_llintExecuteCounter.deferIndefinitely();
</ins><span class="cx">     }
</span><span class="cx"> 
</span><ins>+    int32_t thresholdForJIT(int32_t threshold);
+    void jitAfterWarmUp();
</ins><span class="cx">     void jitSoon();
</span><del>-    void jitNextInvocation();
</del><span class="cx"> 
</span><span class="cx">     const BaselineExecutionCounter& llintExecuteCounter() const
</span><span class="cx">     {
</span><del>-        return *m_llintExecuteCounter;
</del><ins>+        return m_llintExecuteCounter;
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     typedef HashMap<std::tuple<StructureID, unsigned>, FixedVector<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap;
</span><span class="lines">@@ -763,7 +797,6 @@
</span><span class="cx"> 
</span><span class="cx">     bool hasDebuggerRequests() const { return m_debuggerRequests; }
</span><span class="cx">     void* debuggerRequestsAddress() { return &m_debuggerRequests; }
</span><del>-    static ptrdiff_t offsetOfDebuggerRequests() { return OBJECT_OFFSETOF(CodeBlock, m_debuggerRequests); }
</del><span class="cx"> 
</span><span class="cx">     void addBreakpoint(unsigned numBreakpoints);
</span><span class="cx">     void removeBreakpoint(unsigned numBreakpoints)
</span><span class="lines">@@ -805,7 +838,6 @@
</span><span class="cx">     mutable ConcurrentJSLock m_lock;
</span><span class="cx"> 
</span><span class="cx">     bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
</span><del>-    static ptrdiff_t offsetOfShouldAlwaysBeInlined() { return OBJECT_OFFSETOF(CodeBlock, m_shouldAlwaysBeInlined); }
</del><span class="cx"> 
</span><span class="cx"> #if ENABLE(JIT)
</span><span class="cx">     unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel
</span><span class="lines">@@ -815,6 +847,8 @@
</span><span class="cx">     bool m_didFailFTLCompilation : 1;
</span><span class="cx">     bool m_hasBeenCompiledWithFTL : 1;
</span><span class="cx"> 
</span><ins>+    bool m_hasLinkedOSRExit : 1;
+    bool m_isEligibleForLLIntDowngrade : 1;
</ins><span class="cx">     bool m_visitChildrenSkippedDueToOldAge { false };
</span><span class="cx"> 
</span><span class="cx">     // Internal methods for use by validation code. It would be private if it wasn't
</span><span class="lines">@@ -873,12 +907,16 @@
</span><span class="cx">     static ptrdiff_t offsetOfMetadataTable() { return OBJECT_OFFSETOF(CodeBlock, m_metadata); }
</span><span class="cx">     static ptrdiff_t offsetOfInstructionsRawPointer() { return OBJECT_OFFSETOF(CodeBlock, m_instructionsRawPointer); }
</span><span class="cx"> 
</span><del>-    bool loopHintsAreEligibleForFuzzingEarlyReturn() { return m_unlinkedCode->loopHintsAreEligibleForFuzzingEarlyReturn(); }
</del><ins>+    bool loopHintsAreEligibleForFuzzingEarlyReturn()
+    {
+        // Some builtins are required to always complete the loops they run.
+        return !m_unlinkedCode->isBuiltinFunction();
+    }
</ins><span class="cx"> 
</span><span class="cx"> protected:
</span><span class="cx">     void finalizeLLIntInlineCaches();
</span><span class="cx"> #if ENABLE(JIT)
</span><del>-    void finalizeJITInlineCaches();
</del><ins>+    void finalizeBaselineJITInlineCaches();
</ins><span class="cx"> #endif
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="cx">     void tallyFrequentExitSites();
</span><span class="lines">@@ -972,9 +1010,7 @@
</span><span class="cx">     StructureWatchpointMap m_llintGetByIdWatchpointMap;
</span><span class="cx">     RefPtr<JITCode> m_jitCode;
</span><span class="cx"> #if ENABLE(JIT)
</span><del>-public:
</del><span class="cx">     std::unique_ptr<JITData> m_jitData;
</span><del>-private:
</del><span class="cx"> #endif
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="cx">     // This is relevant to non-DFG code blocks that serve as the profiled code block
</span><span class="lines">@@ -993,7 +1029,7 @@
</span><span class="cx"> 
</span><span class="cx">     WriteBarrier<CodeBlock> m_alternative;
</span><span class="cx">     
</span><del>-    BaselineExecutionCounter* m_llintExecuteCounter { nullptr };
</del><ins>+    BaselineExecutionCounter m_llintExecuteCounter;
</ins><span class="cx"> 
</span><span class="cx">     BaselineExecutionCounter m_jitExecuteCounter;
</span><span class="cx">     uint32_t m_osrExitCounter;
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeCodeBlockInlinesh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/CodeBlockInlines.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/CodeBlockInlines.h  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/CodeBlockInlines.h     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -25,10 +25,8 @@
</span><span class="cx"> 
</span><span class="cx"> #pragma once
</span><span class="cx"> 
</span><del>-#include "BaselineJITCode.h"
</del><span class="cx"> #include "BytecodeStructs.h"
</span><span class="cx"> #include "CodeBlock.h"
</span><del>-#include "DFGJITCode.h"
</del><span class="cx"> #include "UnlinkedMetadataTableInlines.h"
</span><span class="cx"> 
</span><span class="cx"> namespace JSC {
</span><span class="lines">@@ -42,7 +40,9 @@
</span><span class="cx">     if (m_metadata) {
</span><span class="cx"> #define VISIT(__op) \
</span><span class="cx">         m_metadata->forEach<__op>([&] (auto& metadata) { func(metadata.m_profile, false); });
</span><ins>+
</ins><span class="cx">         FOR_EACH_OPCODE_WITH_VALUE_PROFILE(VISIT)
</span><ins>+
</ins><span class="cx"> #undef VISIT
</span><span class="cx"> 
</span><span class="cx">         m_metadata->forEach<OpIteratorOpen>([&] (auto& metadata) { 
</span><span class="lines">@@ -57,6 +57,7 @@
</span><span class="cx">             func(metadata.m_valueProfile, false);
</span><span class="cx">         });
</span><span class="cx">     }   
</span><ins>+
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> template<typename Functor>
</span><span class="lines">@@ -98,38 +99,4 @@
</span><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-#if ENABLE(JIT)
-ALWAYS_INLINE const JITCodeMap& CodeBlock::jitCodeMap()
-{
-    ASSERT(jitType() == JITType::BaselineJIT);
-    return static_cast<BaselineJITCode*>(m_jitCode.get())->m_jitCodeMap;
-}
-
-ALWAYS_INLINE SimpleJumpTable& CodeBlock::baselineSwitchJumpTable(int tableIndex)
-{
-    ASSERT(jitType() == JITType::BaselineJIT);
-    return static_cast<BaselineJITCode*>(m_jitCode.get())->m_switchJumpTables[tableIndex];
-}
-
-ALWAYS_INLINE StringJumpTable& CodeBlock::baselineStringSwitchJumpTable(int tableIndex)
-{
-    ASSERT(jitType() == JITType::BaselineJIT);
-    return static_cast<BaselineJITCode*>(m_jitCode.get())->m_stringSwitchJumpTables[tableIndex];
-}
-#endif
-
-#if ENABLE(DFG_JIT)
-ALWAYS_INLINE SimpleJumpTable& CodeBlock::dfgSwitchJumpTable(int tableIndex)
-{
-    ASSERT(jitType() == JITType::DFGJIT);
-    return static_cast<DFG::JITCode*>(m_jitCode.get())->m_switchJumpTables[tableIndex];
-}
-
-ALWAYS_INLINE StringJumpTable& CodeBlock::dfgStringSwitchJumpTable(int tableIndex)
-{
-    ASSERT(jitType() == JITType::DFGJIT);
-    return static_cast<DFG::JITCode*>(m_jitCode.get())->m_stringSwitchJumpTables[tableIndex];
-}
-#endif
-
</del><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeExecutableToCodeBlockEdgeh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/ExecutableToCodeBlockEdge.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/ExecutableToCodeBlockEdge.h 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/ExecutableToCodeBlockEdge.h    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -70,8 +70,6 @@
</span><span class="cx">     static ExecutableToCodeBlockEdge* wrap(CodeBlock* codeBlock);
</span><span class="cx">     
</span><span class="cx">     static ExecutableToCodeBlockEdge* wrapAndActivate(CodeBlock* codeBlock);
</span><del>-
-    static ptrdiff_t offsetOfCodeBlock() { return OBJECT_OFFSETOF(ExecutableToCodeBlockEdge, m_codeBlock); }
</del><span class="cx">     
</span><span class="cx"> private:
</span><span class="cx">     friend class LLIntOffsetsExtractor;
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeExecutionCountercpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -159,7 +159,7 @@
</span><span class="cx">         return true;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    threshold = clippedThreshold(threshold);
</del><ins>+    threshold = clippedThreshold(codeBlock ? codeBlock->globalObject() : nullptr, threshold);
</ins><span class="cx">     
</span><span class="cx">     m_counter = static_cast<int32_t>(-threshold);
</span><span class="cx">         
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeExecutionCounterh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/ExecutionCounter.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/ExecutionCounter.h  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/ExecutionCounter.h     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -60,7 +60,7 @@
</span><span class="cx">     void forceSlowPathConcurrently(); // If you use this, checkIfThresholdCrossedAndSet() may still return false.
</span><span class="cx">     bool checkIfThresholdCrossedAndSet(CodeBlock*);
</span><span class="cx">     bool hasCrossedThreshold() const { return m_counter >= 0; }
</span><del>-    void setNewThreshold(int32_t threshold, CodeBlock* = nullptr);
</del><ins>+    void setNewThreshold(int32_t threshold, CodeBlock*);
</ins><span class="cx">     void deferIndefinitely();
</span><span class="cx">     double count() const { return static_cast<double>(m_totalCount) + m_counter; }
</span><span class="cx">     void dump(PrintStream&) const;
</span><span class="lines">@@ -86,9 +86,13 @@
</span><span class="cx">     }
</span><span class="cx">     
</span><span class="cx">     template<typename T>
</span><del>-    static T clippedThreshold(T threshold)
</del><ins>+    static T clippedThreshold(JSGlobalObject* globalObject, T threshold)
</ins><span class="cx">     {
</span><del>-        int32_t maxThreshold = maximumExecutionCountsBetweenCheckpoints();
</del><ins>+        int32_t maxThreshold;
+        if (Options::randomizeExecutionCountsBetweenCheckpoints() && globalObject)
+            maxThreshold = globalObject->weakRandomInteger() % maximumExecutionCountsBetweenCheckpoints();
+        else
+            maxThreshold = maximumExecutionCountsBetweenCheckpoints();
</ins><span class="cx">         if (threshold > maxThreshold)
</span><span class="cx">             threshold = maxThreshold;
</span><span class="cx">         return threshold;
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeGetByIdMetadatah"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/GetByIdMetadata.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/GetByIdMetadata.h   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/GetByIdMetadata.h      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -49,7 +49,6 @@
</span><span class="cx"> static_assert(sizeof(GetByIdModeMetadataUnset) == 12);
</span><span class="cx"> 
</span><span class="cx"> struct GetByIdModeMetadataArrayLength {
</span><del>-    static ptrdiff_t offsetOfArrayProfile() { return OBJECT_OFFSETOF(GetByIdModeMetadataArrayLength, arrayProfile); }
</del><span class="cx">     ArrayProfile arrayProfile;
</span><span class="cx"> };
</span><span class="cx"> static_assert(sizeof(GetByIdModeMetadataArrayLength) == 12);
</span><span class="lines">@@ -89,7 +88,6 @@
</span><span class="cx">         GetByIdMode mode;
</span><span class="cx">         uint8_t hitCountForLLIntCaching; // This must be zero when we use ProtoLoad mode.
</span><span class="cx">     };
</span><del>-    static ptrdiff_t offsetOfMode() { return OBJECT_OFFSETOF(GetByIdModeMetadata, mode); }
</del><span class="cx">     GetByIdModeMetadataDefault defaultMode;
</span><span class="cx">     GetByIdModeMetadataUnset unsetMode;
</span><span class="cx">     GetByIdModeMetadataArrayLength arrayLengthMode;
</span><span class="lines">@@ -119,7 +117,6 @@
</span><span class="cx">         GetByIdModeMetadataProtoLoad protoLoadMode;
</span><span class="cx">     };
</span><span class="cx">     GetByIdMode mode;
</span><del>-    static ptrdiff_t offsetOfMode() { return OBJECT_OFFSETOF(GetByIdModeMetadata, mode); }
</del><span class="cx">     uint8_t hitCountForLLIntCaching;
</span><span class="cx"> };
</span><span class="cx"> #endif
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeGetByStatuscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/GetByStatus.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/GetByStatus.cpp     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/GetByStatus.cpp        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -215,7 +215,7 @@
</span><span class="cx">         return GetByStatus(NoInformation);
</span><span class="cx">         
</span><span class="cx">     case CacheType::GetByIdSelf: {
</span><del>-        Structure* structure = stubInfo->inlineAccessBaseStructure(profiledBlock->vm());
</del><ins>+        Structure* structure = stubInfo->m_inlineAccessBaseStructure.get();
</ins><span class="cx">         if (structure->takesSlowPathInDFGForImpureProperty())
</span><span class="cx">             return GetByStatus(JSC::slowVersion(summary), stubInfo);
</span><span class="cx">         CacheableIdentifier identifier = stubInfo->identifier();
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeGetterSetterAccessCasecpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/GetterSetterAccessCase.cpp     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -190,7 +190,7 @@
</span><span class="cx"> 
</span><span class="cx">     // Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
</span><span class="cx">     ScratchRegisterAllocator::PreservedState preservedState =
</span><del>-        allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
</del><ins>+    allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
</ins><span class="cx"> 
</span><span class="cx">     if (GetterSetterAccessCaseInternal::verbose) {
</span><span class="cx">         dataLog("baseGPR = ", baseGPR, "\n");
</span><span class="lines">@@ -225,9 +225,12 @@
</span><span class="cx">         registersToSpillForCCall.set(reg);
</span><span class="cx">     for (FPRReg reg : fpScratch)
</span><span class="cx">         registersToSpillForCCall.set(reg);
</span><del>-    if (jit.codeBlock()->useDataIC())
-        registersToSpillForCCall.set(stubInfo.m_stubInfoGPR);
</del><span class="cx">     registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());
</span><ins>+#if CPU(ARM64)
+    CodeBlock* codeBlock = jit.codeBlock();
+    if (codeBlock->useDataIC())
+        registersToSpillForCCall.set(ARM64Registers::lr);
+#endif
</ins><span class="cx"> 
</span><span class="cx">     AccessCaseSnippetParams params(state.m_vm, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
</span><span class="cx">     snippet->generator()->run(jit, params);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeInByStatuscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/InByStatus.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/InByStatus.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/InByStatus.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -139,7 +139,7 @@
</span><span class="cx">         return InByStatus(NoInformation);
</span><span class="cx"> 
</span><span class="cx">     case CacheType::InByIdSelf: {
</span><del>-        Structure* structure = stubInfo->inlineAccessBaseStructure(vm);
</del><ins>+        Structure* structure = stubInfo->m_inlineAccessBaseStructure.get();
</ins><span class="cx">         if (structure->takesSlowPathInDFGForImpureProperty())
</span><span class="cx">             return InByStatus(TakesSlowPath);
</span><span class="cx">         CacheableIdentifier identifier = stubInfo->identifier();
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeInlineAccesscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/InlineAccess.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/InlineAccess.cpp    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/InlineAccess.cpp       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -176,16 +176,11 @@
</span><span class="cx">     return false;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-bool InlineAccess::generateSelfPropertyAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
</del><ins>+bool InlineAccess::generateSelfPropertyAccess(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
</ins><span class="cx"> {
</span><span class="cx">     if (!stubInfo.hasConstantIdentifier)
</span><span class="cx">         return false;
</span><span class="cx"> 
</span><del>-    if (codeBlock->useDataIC()) {
-        // These dynamic slots get filled in by StructureStubInfo. Nothing else to do.
-        return true;
-    }
-
</del><span class="cx">     CCallHelpers jit;
</span><span class="cx">     
</span><span class="cx">     GPRReg base = stubInfo.baseGPR;
</span><span class="lines">@@ -238,14 +233,11 @@
</span><span class="cx">     return getScratchRegister(stubInfo) != InvalidGPRReg;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-bool InlineAccess::canGenerateSelfPropertyReplace(CodeBlock* codeBlock, StructureStubInfo& stubInfo, PropertyOffset offset)
</del><ins>+bool InlineAccess::canGenerateSelfPropertyReplace(StructureStubInfo& stubInfo, PropertyOffset offset)
</ins><span class="cx"> {
</span><span class="cx">     if (!stubInfo.hasConstantIdentifier)
</span><span class="cx">         return false;
</span><span class="cx"> 
</span><del>-    if (codeBlock->useDataIC())
-        return true;
-
</del><span class="cx">     if (isInlineOffset(offset))
</span><span class="cx">         return true;
</span><span class="cx"> 
</span><span class="lines">@@ -252,18 +244,13 @@
</span><span class="cx">     return hasFreeRegister(stubInfo);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-bool InlineAccess::generateSelfPropertyReplace(CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
</del><ins>+bool InlineAccess::generateSelfPropertyReplace(StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset)
</ins><span class="cx"> {
</span><span class="cx">     if (!stubInfo.hasConstantIdentifier)
</span><span class="cx">         return false;
</span><span class="cx"> 
</span><del>-    ASSERT(canGenerateSelfPropertyReplace(codeBlock, stubInfo, offset));
</del><ins>+    ASSERT(canGenerateSelfPropertyReplace(stubInfo, offset));
</ins><span class="cx"> 
</span><del>-    if (codeBlock->useDataIC()) {
-        // These dynamic slots get filled in by StructureStubInfo. Nothing else to do.
-        return true;
-    }
-
</del><span class="cx">     CCallHelpers jit;
</span><span class="cx"> 
</span><span class="cx">     GPRReg base = stubInfo.baseGPR;
</span><span class="lines">@@ -292,7 +279,7 @@
</span><span class="cx">     return linkedCodeInline;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-bool InlineAccess::isCacheableArrayLength(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JSArray* array)
</del><ins>+bool InlineAccess::isCacheableArrayLength(StructureStubInfo& stubInfo, JSArray* array)
</ins><span class="cx"> {
</span><span class="cx">     ASSERT(array->indexingType() & IsArray);
</span><span class="cx"> 
</span><span class="lines">@@ -299,9 +286,6 @@
</span><span class="cx">     if (!stubInfo.hasConstantIdentifier)
</span><span class="cx">         return false;
</span><span class="cx"> 
</span><del>-    if (codeBlock->jitType() == JITType::BaselineJIT)
-        return false;
-
</del><span class="cx">     if (!hasFreeRegister(stubInfo))
</span><span class="cx">         return false;
</span><span class="cx"> 
</span><span class="lines">@@ -308,9 +292,9 @@
</span><span class="cx">     return !hasAnyArrayStorage(array->indexingType()) && array->indexingType() != ArrayClass;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-bool InlineAccess::generateArrayLength(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JSArray* array)
</del><ins>+bool InlineAccess::generateArrayLength(StructureStubInfo& stubInfo, JSArray* array)
</ins><span class="cx"> {
</span><del>-    ASSERT_UNUSED(codeBlock, isCacheableArrayLength(codeBlock, stubInfo, array));
</del><ins>+    ASSERT(isCacheableArrayLength(stubInfo, array));
</ins><span class="cx"> 
</span><span class="cx">     if (!stubInfo.hasConstantIdentifier)
</span><span class="cx">         return false;
</span><span class="lines">@@ -335,20 +319,17 @@
</span><span class="cx">     return linkedCodeInline;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-bool InlineAccess::isCacheableStringLength(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
</del><ins>+bool InlineAccess::isCacheableStringLength(StructureStubInfo& stubInfo)
</ins><span class="cx"> {
</span><span class="cx">     if (!stubInfo.hasConstantIdentifier)
</span><span class="cx">         return false;
</span><span class="cx"> 
</span><del>-    if (codeBlock->jitType() == JITType::BaselineJIT)
-        return false;
-
</del><span class="cx">     return hasFreeRegister(stubInfo);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-bool InlineAccess::generateStringLength(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
</del><ins>+bool InlineAccess::generateStringLength(StructureStubInfo& stubInfo)
</ins><span class="cx"> {
</span><del>-    ASSERT_UNUSED(codeBlock, isCacheableStringLength(codeBlock, stubInfo));
</del><ins>+    ASSERT(isCacheableStringLength(stubInfo));
</ins><span class="cx"> 
</span><span class="cx">     if (!stubInfo.hasConstantIdentifier)
</span><span class="cx">         return false;
</span><span class="lines">@@ -382,7 +363,7 @@
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> 
</span><del>-bool InlineAccess::generateSelfInAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure)
</del><ins>+bool InlineAccess::generateSelfInAccess(StructureStubInfo& stubInfo, Structure* structure)
</ins><span class="cx"> {
</span><span class="cx">     CCallHelpers jit;
</span><span class="cx"> 
</span><span class="lines">@@ -389,11 +370,6 @@
</span><span class="cx">     if (!stubInfo.hasConstantIdentifier)
</span><span class="cx">         return false;
</span><span class="cx"> 
</span><del>-    if (codeBlock->useDataIC()) {
-        // These dynamic slots get filled in by StructureStubInfo. Nothing else to do.
-        return true;
-    }
-
</del><span class="cx">     GPRReg base = stubInfo.baseGPR;
</span><span class="cx">     JSValueRegs value = stubInfo.valueRegs();
</span><span class="cx"> 
</span><span class="lines">@@ -430,13 +406,13 @@
</span><span class="cx">     if (codeBlock->useDataIC()) {
</span><span class="cx">         // If it is not GetById-like-thing, we do not emit nop sled (e.g. GetByVal).
</span><span class="cx">         // The code is already an indirect jump, and only thing we should do is replacing m_codePtr.
</span><del>-        if (codeBlock->jitType() != JITType::BaselineJIT && stubInfo.hasConstantIdentifier) {
</del><ins>+        if (stubInfo.hasConstantIdentifier) {
</ins><span class="cx">             // If m_codePtr is pointing to stubInfo.slowPathStartLocation, this means that InlineAccess code is not a stub one.
</span><span class="cx">             // We rewrite this with the stub-based dispatching code once, and continue using it until we reset the code.
</span><span class="cx">             if (stubInfo.m_codePtr.executableAddress() == stubInfo.slowPathStartLocation.executableAddress()) {
</span><span class="cx">                 CCallHelpers::emitJITCodeOver(stubInfo.start.retagged<JSInternalPtrTag>(), scopedLambda<void(CCallHelpers&)>([&](CCallHelpers& jit) {
</span><span class="cx">                     jit.move(CCallHelpers::TrustedImmPtr(&stubInfo), stubInfo.m_stubInfoGPR);
</span><del>-                    jit.farJump(CCallHelpers::Address(stubInfo.m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</del><ins>+                    jit.call(CCallHelpers::Address(stubInfo.m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</ins><span class="cx">                     auto jump = jit.jump();
</span><span class="cx">                     auto doneLocation = stubInfo.doneLocation;
</span><span class="cx">                     jit.addLinkTask([=](LinkBuffer& linkBuffer) {
</span><span class="lines">@@ -445,9 +421,7 @@
</span><span class="cx">                 }), "InlineAccess: linking stub call");
</span><span class="cx">             }
</span><span class="cx">         }
</span><del>-
</del><span class="cx">         stubInfo.m_codePtr = target;
</span><del>-        stubInfo.m_inlineAccessBaseStructure = 0; // Clear out the inline access code.
</del><span class="cx">         return;
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="lines">@@ -460,14 +434,8 @@
</span><span class="cx">     }), "InlineAccess: linking constant jump");
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void InlineAccess::resetStubAsJumpInAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
</del><ins>+void InlineAccess::resetStubAsJumpInAccess(CodeBlock*, StructureStubInfo& stubInfo)
</ins><span class="cx"> {
</span><del>-    if (codeBlock->useDataIC() && codeBlock->jitType() == JITType::BaselineJIT) {
-        stubInfo.m_codePtr = stubInfo.slowPathStartLocation;
-        stubInfo.m_inlineAccessBaseStructure = 0; // Clear out the inline access code.
-        return;
-    }
-
</del><span class="cx">     CCallHelpers::emitJITCodeOver(stubInfo.start.retagged<JSInternalPtrTag>(), scopedLambda<void(CCallHelpers&)>([&](CCallHelpers& jit) {
</span><span class="cx">         // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
</span><span class="cx">         auto jump = jit.jump();
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeInlineAccessh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/InlineAccess.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/InlineAccess.h      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/InlineAccess.h 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -99,14 +99,14 @@
</span><span class="cx">         return std::max(size, sizeForPropertyAccess());
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    static bool generateSelfPropertyAccess(CodeBlock*, StructureStubInfo&, Structure*, PropertyOffset);
-    static bool canGenerateSelfPropertyReplace(CodeBlock*, StructureStubInfo&, PropertyOffset);
-    static bool generateSelfPropertyReplace(CodeBlock*, StructureStubInfo&, Structure*, PropertyOffset);
-    static bool isCacheableArrayLength(CodeBlock*, StructureStubInfo&, JSArray*);
-    static bool isCacheableStringLength(CodeBlock*, StructureStubInfo&);
-    static bool generateArrayLength(CodeBlock*, StructureStubInfo&, JSArray*);
-    static bool generateSelfInAccess(CodeBlock*, StructureStubInfo&, Structure*);
-    static bool generateStringLength(CodeBlock*, StructureStubInfo&);
</del><ins>+    static bool generateSelfPropertyAccess(StructureStubInfo&, Structure*, PropertyOffset);
+    static bool canGenerateSelfPropertyReplace(StructureStubInfo&, PropertyOffset);
+    static bool generateSelfPropertyReplace(StructureStubInfo&, Structure*, PropertyOffset);
+    static bool isCacheableArrayLength(StructureStubInfo&, JSArray*);
+    static bool isCacheableStringLength(StructureStubInfo&);
+    static bool generateArrayLength(StructureStubInfo&, JSArray*);
+    static bool generateSelfInAccess(StructureStubInfo&, Structure*);
+    static bool generateStringLength(StructureStubInfo&);
</ins><span class="cx"> 
</span><span class="cx">     static void rewireStubAsJumpInAccessNotUsingInlineAccess(CodeBlock*, StructureStubInfo&, CodeLocationLabel<JITStubRoutinePtrTag>);
</span><span class="cx">     static void rewireStubAsJumpInAccess(CodeBlock*, StructureStubInfo&, CodeLocationLabel<JITStubRoutinePtrTag>);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeIterationModeMetadatah"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/IterationModeMetadata.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/IterationModeMetadata.h     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/IterationModeMetadata.h        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -40,7 +40,6 @@
</span><span class="cx"> 
</span><span class="cx"> struct IterationModeMetadata {
</span><span class="cx">     uint8_t seenModes { 0 };
</span><del>-    static ptrdiff_t offsetOfSeenModes() { return OBJECT_OFFSETOF(IterationModeMetadata, seenModes); } 
</del><span class="cx">     static_assert(sizeof(decltype(seenModes)) == sizeof(IterationMode));
</span><span class="cx"> };
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeLLIntCallLinkInfoh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -87,7 +87,6 @@
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     ArrayProfile m_arrayProfile;
</span><del>-    static ptrdiff_t offsetOfArrayProfile() { return OBJECT_OFFSETOF(LLIntCallLinkInfo, m_arrayProfile); }
</del><span class="cx"> 
</span><span class="cx"> private:
</span><span class="cx">     uintptr_t m_calleeOrLastSeenCalleeWithLinkBit { unlinkedBit };
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeOpcodeh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/Opcode.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/Opcode.h    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/Opcode.h       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -156,18 +156,7 @@
</span><span class="cx">     macro(OpIteratorOpen) \
</span><span class="cx">     macro(OpIteratorNext) \
</span><span class="cx"> 
</span><del>-#define FOR_EACH_OPCODE_WITH_BINARY_ARITH_PROFILE(macro) \
-    macro(OpAdd) \
-    macro(OpMul) \
-    macro(OpDiv) \
-    macro(OpSub) \
</del><span class="cx"> 
</span><del>-#define FOR_EACH_OPCODE_WITH_UNARY_ARITH_PROFILE(macro) \
-    macro(OpInc) \
-    macro(OpDec) \
-    macro(OpNegate) \
-
-
</del><span class="cx"> IGNORE_WARNINGS_BEGIN("type-limits")
</span><span class="cx"> 
</span><span class="cx"> #define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= numOpcodeIDs, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodePolymorphicAccesscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp       2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/PolymorphicAccess.cpp  2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -73,7 +73,7 @@
</span><span class="cx"> {
</span><span class="cx">     restoreScratch();
</span><span class="cx">     if (jit->codeBlock()->useDataIC())
</span><del>-        jit->farJump(CCallHelpers::Address(stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfDoneLocation()), JSInternalPtrTag);
</del><ins>+        jit->ret();
</ins><span class="cx">     else
</span><span class="cx">         success.append(jit->jump());
</span><span class="cx"> }
</span><span class="lines">@@ -110,8 +110,6 @@
</span><span class="cx">             RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
</span><span class="cx"> 
</span><span class="cx">         m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
</span><del>-        if (jit->codeBlock()->useDataIC())
-            m_liveRegistersForCall.add(stubInfo->m_stubInfoGPR);
</del><span class="cx">         m_liveRegistersForCall.exclude(calleeSaveRegisters());
</span><span class="cx">     }
</span><span class="cx">     return m_liveRegistersForCall;
</span><span class="lines">@@ -130,12 +128,11 @@
</span><span class="cx">     };
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-auto AccessGenerationState::preserveLiveRegistersToStackForCallWithoutExceptions() -> SpillState
</del><ins>+auto AccessGenerationState::preserveLiveRegistersToStackForCallWithoutExceptions(const RegisterSet& extra) -> SpillState
</ins><span class="cx"> {
</span><span class="cx">     RegisterSet liveRegisters = allocator->usedRegisters();
</span><del>-    if (jit->codeBlock()->useDataIC())
-        liveRegisters.add(stubInfo->m_stubInfoGPR);
</del><span class="cx">     liveRegisters.exclude(calleeSaveRegisters());
</span><ins>+    liveRegisters.merge(extra);
</ins><span class="cx"> 
</span><span class="cx">     constexpr unsigned extraStackPadding = 0;
</span><span class="cx">     unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
</span><span class="lines">@@ -599,11 +596,16 @@
</span><span class="cx">     CCallHelpers jit(codeBlock);
</span><span class="cx">     state.jit = &jit;
</span><span class="cx"> 
</span><del>-    if (!canBeShared && ASSERT_ENABLED) {
-        jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, jit.scratchRegister());
-        auto ok = jit.branchPtr(CCallHelpers::Equal, CCallHelpers::stackPointerRegister, jit.scratchRegister());
-        jit.breakpoint();
-        ok.link(&jit);
</del><ins>+    if (codeBlock->useDataIC()) {
+        if (state.m_doesJSGetterSetterCalls) {
+            // We have no guarantee that stack-pointer is the expected one. This is not a problem if we do not have JS getter / setter calls since stack-pointer is
+            // a callee-save register in the C calling convension. However, our JS executable call does not save stack-pointer. So we are adjusting stack-pointer after
+            // JS getter / setter calls. But this could be different from the initial stack-pointer, and makes PAC tagging broken.
+            // To ensure PAC-tagging work, we first adjust stack-pointer to the appropriate one.
+            jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+            jit.tagReturnAddress();
+        } else
+            jit.tagReturnAddress();
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     state.preservedReusedRegisterState =
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodePolymorphicAccessh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/PolymorphicAccess.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/PolymorphicAccess.h 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/PolymorphicAccess.h    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -251,7 +251,7 @@
</span><span class="cx">     const RegisterSet& calculateLiveRegistersForCallAndExceptionHandling();
</span><span class="cx"> 
</span><span class="cx">     SpillState preserveLiveRegistersToStackForCall(const RegisterSet& extra = { });
</span><del>-    SpillState preserveLiveRegistersToStackForCallWithoutExceptions();
</del><ins>+    SpillState preserveLiveRegistersToStackForCallWithoutExceptions(const RegisterSet& extra = { });
</ins><span class="cx"> 
</span><span class="cx">     void restoreLiveRegistersFromStackForCallWithThrownException(const SpillState&);
</span><span class="cx">     void restoreLiveRegistersFromStackForCall(const SpillState&, const RegisterSet& dontRestore = { });
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodePutByStatuscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/PutByStatus.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/PutByStatus.cpp     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/PutByStatus.cpp        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -180,10 +180,10 @@
</span><span class="cx">         CacheableIdentifier identifier = stubInfo->identifier();
</span><span class="cx">         UniquedStringImpl* uid = identifier.uid();
</span><span class="cx">         RELEASE_ASSERT(uid);
</span><del>-        Structure* structure = stubInfo->inlineAccessBaseStructure(profiledBlock->vm());
-        PropertyOffset offset = structure->getConcurrently(uid);
</del><ins>+        PropertyOffset offset =
+            stubInfo->m_inlineAccessBaseStructure->getConcurrently(uid);
</ins><span class="cx">         if (isValidOffset(offset))
</span><del>-            return PutByVariant::replace(WTFMove(identifier), structure, offset);
</del><ins>+            return PutByVariant::replace(WTFMove(identifier), stubInfo->m_inlineAccessBaseStructure.get(), offset);
</ins><span class="cx">         return PutByStatus(JSC::slowVersion(summary), *stubInfo);
</span><span class="cx">     }
</span><span class="cx">         
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeStructureStubInfocpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp       2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp  2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -27,7 +27,6 @@
</span><span class="cx"> #include "StructureStubInfo.h"
</span><span class="cx"> 
</span><span class="cx"> #include "CacheableIdentifierInlines.h"
</span><del>-#include "JITInlineCacheGenerator.h"
</del><span class="cx"> #include "PolymorphicAccess.h"
</span><span class="cx"> #include "Repatch.h"
</span><span class="cx"> 
</span><span class="lines">@@ -66,9 +65,9 @@
</span><span class="cx">     ASSERT(hasConstantIdentifier);
</span><span class="cx">     setCacheType(locker, CacheType::GetByIdSelf);
</span><span class="cx">     m_identifier = identifier;
</span><del>-    m_inlineAccessBaseStructure = inlineAccessBaseStructure->id();
</del><ins>+    m_inlineAccessBaseStructure.setWithoutWriteBarrier(inlineAccessBaseStructure);
</ins><span class="cx">     codeBlock->vm().heap.writeBarrier(codeBlock);
</span><del>-    byIdSelfOffset = offset;
</del><ins>+    u.byIdSelf.offset = offset;
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void StructureStubInfo::initArrayLength(const ConcurrentJSLockerBase& locker)
</span><span class="lines">@@ -88,9 +87,9 @@
</span><span class="cx">     ASSERT(m_cacheType == CacheType::Unset);
</span><span class="cx">     setCacheType(locker, CacheType::PutByIdReplace);
</span><span class="cx">     m_identifier = identifier;
</span><del>-    m_inlineAccessBaseStructure = inlineAccessBaseStructure->id();
</del><ins>+    m_inlineAccessBaseStructure.setWithoutWriteBarrier(inlineAccessBaseStructure);
</ins><span class="cx">     codeBlock->vm().heap.writeBarrier(codeBlock);
</span><del>-    byIdSelfOffset = offset;
</del><ins>+    u.byIdSelf.offset = offset;
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void StructureStubInfo::initInByIdSelf(const ConcurrentJSLockerBase& locker, CodeBlock* codeBlock, Structure* inlineAccessBaseStructure, PropertyOffset offset, CacheableIdentifier identifier)
</span><span class="lines">@@ -98,9 +97,9 @@
</span><span class="cx">     ASSERT(m_cacheType == CacheType::Unset);
</span><span class="cx">     setCacheType(locker, CacheType::InByIdSelf);
</span><span class="cx">     m_identifier = identifier;
</span><del>-    m_inlineAccessBaseStructure = inlineAccessBaseStructure->id();
</del><ins>+    m_inlineAccessBaseStructure.setWithoutWriteBarrier(inlineAccessBaseStructure);
</ins><span class="cx">     codeBlock->vm().heap.writeBarrier(codeBlock);
</span><del>-    byIdSelfOffset = offset;
</del><ins>+    u.byIdSelf.offset = offset;
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void StructureStubInfo::deref()
</span><span class="lines">@@ -237,7 +236,7 @@
</span><span class="cx">         // we're buffered is because we rely on it to reset during GC if m_inlineAccessBaseStructure
</span><span class="cx">         // is collected.
</span><span class="cx">         m_identifier = nullptr;
</span><del>-        m_inlineAccessBaseStructure = 0;
</del><ins>+        m_inlineAccessBaseStructure.clear();
</ins><span class="cx">         
</span><span class="cx">         // If we generated some code then we don't want to attempt to repatch in the future until we
</span><span class="cx">         // gather enough cases.
</span><span class="lines">@@ -252,7 +251,7 @@
</span><span class="cx"> {
</span><span class="cx">     clearBufferedStructures();
</span><span class="cx">     m_identifier = nullptr;
</span><del>-    m_inlineAccessBaseStructure = 0;
</del><ins>+    m_inlineAccessBaseStructure.clear();
</ins><span class="cx"> 
</span><span class="cx">     if (m_cacheType == CacheType::Unset)
</span><span class="cx">         return;
</span><span class="lines">@@ -286,7 +285,6 @@
</span><span class="cx">         resetPutBy(codeBlock, *this, PutByKind::ById);
</span><span class="cx">         break;
</span><span class="cx">     case AccessType::PutByVal:
</span><del>-    case AccessType::PutPrivateName:
</del><span class="cx">         resetPutBy(codeBlock, *this, PutByKind::ByVal);
</span><span class="cx">         break;
</span><span class="cx">     case AccessType::InById:
</span><span class="lines">@@ -362,8 +360,8 @@
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     bool isValid = true;
</span><del>-    if (Structure* structure = inlineAccessBaseStructure(vm))
-        isValid &= vm.heap.isMarked(structure);
</del><ins>+    if (m_inlineAccessBaseStructure)
+        isValid &= vm.heap.isMarked(m_inlineAccessBaseStructure.get());
</ins><span class="cx">     if (m_cacheType == CacheType::Stub)
</span><span class="cx">         isValid &= u.stub->visitWeak(vm);
</span><span class="cx"> 
</span><span class="lines">@@ -377,8 +375,8 @@
</span><span class="cx"> template<typename Visitor>
</span><span class="cx"> void StructureStubInfo::propagateTransitions(Visitor& visitor)
</span><span class="cx"> {
</span><del>-    if (Structure* structure = inlineAccessBaseStructure(visitor.vm()))
-        structure->markIfCheap(visitor);
</del><ins>+    if (m_inlineAccessBaseStructure)
+        m_inlineAccessBaseStructure->markIfCheap(visitor);
</ins><span class="cx"> 
</span><span class="cx">     if (m_cacheType == CacheType::Stub)
</span><span class="cx">         u.stub->propagateTransitions(visitor);
</span><span class="lines">@@ -432,209 +430,6 @@
</span><span class="cx">     m_cacheType = newCacheType;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void StructureStubInfo::initializeFromUnlinkedStructureStubInfo(CodeBlock*, UnlinkedStructureStubInfo& unlinkedStubInfo)
-{
-#if USE(JSVALUE64)
-    accessType = unlinkedStubInfo.accessType;
-    start = unlinkedStubInfo.start;
-    doneLocation = unlinkedStubInfo.doneLocation;
-    slowPathStartLocation = unlinkedStubInfo.slowPathStartLocation;
-    callSiteIndex = CallSiteIndex(BytecodeIndex(unlinkedStubInfo.bytecodeIndex.offset()));
-    codeOrigin = CodeOrigin(unlinkedStubInfo.bytecodeIndex);
-    m_codePtr = slowPathStartLocation;
-
-    usedRegisters = RegisterSet::stubUnavailableRegisters();
-    if (accessType == AccessType::GetById && unlinkedStubInfo.bytecodeIndex.checkpoint()) {
-        // For iterator_next, we can't clobber the "dontClobberRegister" register either.
-        usedRegisters.add(BaselineGetByIdRegisters::dontClobberRegister);
-    }
-
-    switch (accessType) {
-    case AccessType::DeleteByVal:
-        m_slowOperation = operationDeleteByValOptimize;
-        break;
-    case AccessType::DeleteByID:
-        m_slowOperation = operationDeleteByIdOptimize;
-        break;
-    case AccessType::GetByVal:
-        m_slowOperation = operationGetByValOptimize;
-        break;
-    case AccessType::InstanceOf:
-        m_slowOperation = operationInstanceOfOptimize;
-        break;
-    case AccessType::InByVal:
-        m_slowOperation = operationInByValOptimize;
-        break;
-    case AccessType::InById:
-        m_slowOperation = operationInByIdOptimize;
-        break;
-    case AccessType::GetById:
-        m_slowOperation = operationGetByIdOptimize;
-        break;
-    case AccessType::TryGetById:
-        m_slowOperation = operationTryGetByIdOptimize;
-        break;
-    case AccessType::GetByIdDirect:
-        m_slowOperation = operationGetByIdDirectOptimize;
-        break;
-    case AccessType::GetByIdWithThis:
-        m_slowOperation = operationGetByIdWithThisOptimize;
-        break;
-    case AccessType::HasPrivateName: 
-        m_slowOperation = operationHasPrivateNameOptimize;
-        break;
-    case AccessType::HasPrivateBrand: 
-        m_slowOperation = operationHasPrivateBrandOptimize;
-        break;
-    case AccessType::GetPrivateName: 
-        m_slowOperation = operationGetPrivateNameOptimize;
-        break;
-    case AccessType::PutById:
-        switch (unlinkedStubInfo.putKind) {
-        case PutKind::NotDirect:
-            if (unlinkedStubInfo.ecmaMode.isStrict())
-                m_slowOperation = operationPutByIdStrictOptimize;
-            else
-                m_slowOperation = operationPutByIdNonStrictOptimize;
-            break;
-        case PutKind::Direct:
-            if (unlinkedStubInfo.ecmaMode.isStrict())
-                m_slowOperation = operationPutByIdDirectStrictOptimize;
-            else
-                m_slowOperation = operationPutByIdDirectNonStrictOptimize;
-            break;
-        case PutKind::DirectPrivateFieldDefine:
-            m_slowOperation = operationPutByIdDefinePrivateFieldStrictOptimize;
-            break;
-        case PutKind::DirectPrivateFieldSet:
-            m_slowOperation = operationPutByIdSetPrivateFieldStrictOptimize;
-            break;
-        }
-        break;
-    case AccessType::PutByVal:
-        switch (unlinkedStubInfo.putKind) {
-        case PutKind::NotDirect:
-            if (unlinkedStubInfo.ecmaMode.isStrict())
-                m_slowOperation = operationPutByValStrictOptimize;
-            else
-                m_slowOperation = operationPutByValNonStrictOptimize;
-            break;
-        case PutKind::Direct:
-            if (unlinkedStubInfo.ecmaMode.isStrict())
-                m_slowOperation = operationDirectPutByValStrictOptimize;
-            else
-                m_slowOperation = operationDirectPutByValNonStrictOptimize;
-            break;
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-        }
-        break;
-    case AccessType::PutPrivateName:
-        m_slowOperation = unlinkedStubInfo.privateFieldPutKind.isDefine() ? operationPutByValDefinePrivateFieldOptimize : operationPutByValSetPrivateFieldOptimize;
-        break;
-    case AccessType::SetPrivateBrand:
-        m_slowOperation = operationSetPrivateBrandOptimize;
-        break;
-    case AccessType::CheckPrivateBrand:
-        m_slowOperation = operationCheckPrivateBrandOptimize;
-        break;
-    }
-
-    switch (accessType) {
-    case AccessType::DeleteByVal:
-        hasConstantIdentifier = false;
-        baseGPR = BaselineDelByValRegisters::base;
-        regs.propertyGPR = BaselineDelByValRegisters::property;
-        valueGPR = BaselineDelByValRegisters::result;
-        m_stubInfoGPR = BaselineDelByValRegisters::stubInfo;
-        break;
-    case AccessType::DeleteByID:
-        hasConstantIdentifier = true;
-        baseGPR = BaselineDelByIdRegisters::base;
-        regs.propertyGPR = InvalidGPRReg;
-        valueGPR = BaselineDelByIdRegisters::result;
-        m_stubInfoGPR = BaselineDelByIdRegisters::stubInfo;
-        break;
-    case AccessType::GetByVal:
-    case AccessType::GetPrivateName:
-        hasConstantIdentifier = false;
-        baseGPR = BaselineGetByValRegisters::base;
-        regs.propertyGPR = BaselineGetByValRegisters::property;
-        valueGPR = BaselineGetByValRegisters::result;
-        m_stubInfoGPR = BaselineGetByValRegisters::stubInfo;
-        break;
-    case AccessType::InstanceOf:
-        hasConstantIdentifier = false;
-        prototypeIsKnownObject = false;
-        baseGPR = BaselineInstanceofRegisters::value;
-        valueGPR = BaselineInstanceofRegisters::result;
-        regs.prototypeGPR = BaselineInstanceofRegisters::proto;
-        m_stubInfoGPR = BaselineInstanceofRegisters::stubInfo;
-        break;
-    case AccessType::InByVal:
-    case AccessType::HasPrivateName: 
-    case AccessType::HasPrivateBrand: 
-        hasConstantIdentifier = false;
-        baseGPR = BaselineInByValRegisters::base;
-        regs.propertyGPR = BaselineInByValRegisters::property;
-        valueGPR = BaselineInByValRegisters::result;
-        m_stubInfoGPR = BaselineInByValRegisters::stubInfo;
-        break;
-    case AccessType::InById:
-        hasConstantIdentifier = true;
-        regs.thisGPR = InvalidGPRReg;
-        baseGPR = BaselineInByIdRegisters::base;
-        valueGPR = BaselineInByIdRegisters::result;
-        m_stubInfoGPR = BaselineInByIdRegisters::stubInfo;
-        break;
-    case AccessType::TryGetById:
-    case AccessType::GetByIdDirect:
-    case AccessType::GetById:
-        hasConstantIdentifier = true;
-        regs.thisGPR = InvalidGPRReg;
-        baseGPR = BaselineGetByIdRegisters::base;
-        valueGPR = BaselineGetByIdRegisters::result;
-        m_stubInfoGPR = BaselineGetByIdRegisters::stubInfo;
-        break;
-    case AccessType::GetByIdWithThis:
-        hasConstantIdentifier = true;
-        baseGPR = BaselineGetByIdWithThisRegisters::base;
-        valueGPR = BaselineGetByIdWithThisRegisters::result;
-        regs.thisGPR = BaselineGetByIdWithThisRegisters::thisValue;
-        m_stubInfoGPR = BaselineGetByIdWithThisRegisters::stubInfo;
-        break;
-    case AccessType::PutById:
-        hasConstantIdentifier = true;
-        regs.thisGPR = InvalidGPRReg;
-        baseGPR = BaselinePutByIdRegisters::base;
-        valueGPR = BaselinePutByIdRegisters::value;
-        m_stubInfoGPR = BaselinePutByIdRegisters::stubInfo;
-        break;
-    case AccessType::PutByVal:
-    case AccessType::PutPrivateName:
-        hasConstantIdentifier = false;
-        baseGPR = BaselinePutByValRegisters::base;
-        regs.propertyGPR = BaselinePutByValRegisters::property;
-        valueGPR = BaselinePutByValRegisters::value;
-        m_stubInfoGPR = BaselinePutByValRegisters::stubInfo;
-        if (accessType == AccessType::PutByVal)
-            m_arrayProfileGPR = BaselinePutByValRegisters::profile;
-        break;
-    case AccessType::SetPrivateBrand:
-    case AccessType::CheckPrivateBrand:
-        hasConstantIdentifier = false;
-        valueGPR = InvalidGPRReg;
-        baseGPR = BaselinePrivateBrandRegisters::base;
-        regs.brandGPR = BaselinePrivateBrandRegisters::brand;
-        m_stubInfoGPR = BaselinePrivateBrandRegisters::stubInfo;
-        break;
-    }
-#else
-    UNUSED_PARAM(unlinkedStubInfo);
-    ASSERT_NOT_REACHED();
-#endif
-}
-
</del><span class="cx"> #if ASSERT_ENABLED
</span><span class="cx"> void StructureStubInfo::checkConsistency()
</span><span class="cx"> {
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeStructureStubInfoh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/StructureStubInfo.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/StructureStubInfo.h 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/StructureStubInfo.h    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -32,7 +32,6 @@
</span><span class="cx"> #include "JITStubRoutine.h"
</span><span class="cx"> #include "MacroAssembler.h"
</span><span class="cx"> #include "Options.h"
</span><del>-#include "PutKind.h"
</del><span class="cx"> #include "RegisterSet.h"
</span><span class="cx"> #include "Structure.h"
</span><span class="cx"> #include "StructureSet.h"
</span><span class="lines">@@ -57,7 +56,6 @@
</span><span class="cx">     GetByVal,
</span><span class="cx">     PutById,
</span><span class="cx">     PutByVal,
</span><del>-    PutPrivateName,
</del><span class="cx">     InById,
</span><span class="cx">     InByVal,
</span><span class="cx">     HasPrivateName,
</span><span class="lines">@@ -80,8 +78,6 @@
</span><span class="cx">     StringLength
</span><span class="cx"> };
</span><span class="cx"> 
</span><del>-struct UnlinkedStructureStubInfo;
-
</del><span class="cx"> class StructureStubInfo {
</span><span class="cx">     WTF_MAKE_NONCOPYABLE(StructureStubInfo);
</span><span class="cx">     WTF_MAKE_FAST_ALLOCATED;
</span><span class="lines">@@ -102,8 +98,6 @@
</span><span class="cx">     void deref();
</span><span class="cx">     void aboutToDie();
</span><span class="cx"> 
</span><del>-    void initializeFromUnlinkedStructureStubInfo(CodeBlock*, UnlinkedStructureStubInfo&);
-
</del><span class="cx">     DECLARE_VISIT_AGGREGATE;
</span><span class="cx"> 
</span><span class="cx">     // Check if the stub has weak references that are dead. If it does, then it resets itself,
</span><span class="lines">@@ -331,19 +325,13 @@
</span><span class="cx"> 
</span><span class="cx"> public:
</span><span class="cx">     CodeOrigin codeOrigin;
</span><del>-    PropertyOffset byIdSelfOffset;
-    static ptrdiff_t offsetOfByIdSelfOffset() { return OBJECT_OFFSETOF(StructureStubInfo, byIdSelfOffset); }
-    static ptrdiff_t offsetOfInlineAccessBaseStructure() { return OBJECT_OFFSETOF(StructureStubInfo, m_inlineAccessBaseStructure); }
</del><span class="cx">     union {
</span><ins>+        struct {
+            PropertyOffset offset;
+        } byIdSelf;
</ins><span class="cx">         PolymorphicAccess* stub;
</span><span class="cx">     } u;
</span><del>-    Structure* inlineAccessBaseStructure(VM& vm)
-    {
-        if (!m_inlineAccessBaseStructure)
-            return nullptr;
-        return vm.getStructure(m_inlineAccessBaseStructure);
-    }
-    StructureID m_inlineAccessBaseStructure { 0 };
</del><ins>+    WriteBarrier<Structure> m_inlineAccessBaseStructure;
</ins><span class="cx"> private:
</span><span class="cx">     CacheableIdentifier m_identifier;
</span><span class="cx">     // Represents those structures that already have buffered AccessCases in the PolymorphicAccess.
</span><span class="lines">@@ -364,7 +352,6 @@
</span><span class="cx">     MacroAssemblerCodePtr<JITStubRoutinePtrTag> m_codePtr;
</span><span class="cx"> 
</span><span class="cx">     static ptrdiff_t offsetOfCodePtr() { return OBJECT_OFFSETOF(StructureStubInfo, m_codePtr); }
</span><del>-    static ptrdiff_t offsetOfDoneLocation() { return OBJECT_OFFSETOF(StructureStubInfo, doneLocation); }
</del><span class="cx">     static ptrdiff_t offsetOfSlowPathStartLocation() { return OBJECT_OFFSETOF(StructureStubInfo, slowPathStartLocation); }
</span><span class="cx">     static ptrdiff_t offsetOfSlowOperation() { return OBJECT_OFFSETOF(StructureStubInfo, m_slowOperation); }
</span><span class="cx">     static ptrdiff_t offsetOfCountdown() { return OBJECT_OFFSETOF(StructureStubInfo, countdown); }
</span><span class="lines">@@ -460,17 +447,6 @@
</span><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-struct UnlinkedStructureStubInfo {
-    AccessType accessType;
-    PutKind putKind;
-    PrivateFieldPutKind privateFieldPutKind { PrivateFieldPutKind::none() };
-    ECMAMode ecmaMode { ECMAMode::sloppy() };
-    BytecodeIndex bytecodeIndex;
-    CodeLocationLabel<JITStubRoutinePtrTag> start; // This is either the start of the inline IC for *byId caches. or the location of patchable jump for 'instanceof' caches.
-    CodeLocationLabel<JSInternalPtrTag> doneLocation;
-    CodeLocationLabel<JITStubRoutinePtrTag> slowPathStartLocation;
-};
-
</del><span class="cx"> #else
</span><span class="cx"> 
</span><span class="cx"> class StructureStubInfo;
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeSuperSamplercpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/SuperSampler.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/SuperSampler.cpp    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/SuperSampler.cpp       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -84,7 +84,7 @@
</span><span class="cx">     double percentage = 100.0 * in / (in + out);
</span><span class="cx">     if (percentage != percentage)
</span><span class="cx">         percentage = 0.0;
</span><del>-    dataLog("Percent time behind super sampler flag: ", percentage, "%\n");
</del><ins>+    dataLog("Percent time behind super sampler flag: ", percentage, "\n");
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void enableSuperSampler()
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeUnlinkedCodeBlockcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp       2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp  2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -27,7 +27,6 @@
</span><span class="cx"> 
</span><span class="cx"> #include "UnlinkedCodeBlock.h"
</span><span class="cx"> 
</span><del>-#include "BaselineJITCode.h"
</del><span class="cx"> #include "BytecodeLivenessAnalysis.h"
</span><span class="cx"> #include "BytecodeStructs.h"
</span><span class="cx"> #include "ClassInfo.h"
</span><span class="lines">@@ -79,8 +78,6 @@
</span><span class="cx">         createRareDataIfNecessary(locker);
</span><span class="cx">         m_rareData->m_privateBrandRequirement = static_cast<unsigned>(PrivateBrandRequirement::Needed);
</span><span class="cx">     }
</span><del>-
-    m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()));
</del><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> template<typename Visitor>
</span><span class="lines">@@ -319,50 +316,8 @@
</span><span class="cx">     return m_outOfLineJumpTargets.get(bytecodeOffset);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-#if ASSERT_ENABLED
-bool UnlinkedCodeBlock::hasIdentifier(UniquedStringImpl* uid)
</del><ins>+void UnlinkedCodeBlock::allocateSharedProfiles()
</ins><span class="cx"> {
</span><del>-    if (numberOfIdentifiers() > 100) {
-        if (numberOfIdentifiers() != m_cachedIdentifierUids.size()) {
-            Locker locker(m_cachedIdentifierUidsLock);
-            HashSet<UniquedStringImpl*> cachedIdentifierUids;
-            for (unsigned i = 0; i < numberOfIdentifiers(); ++i) {
-                const Identifier& identifier = this->identifier(i);
-                cachedIdentifierUids.add(identifier.impl());
-            }
-
-            WTF::storeStoreFence();
-            m_cachedIdentifierUids = WTFMove(cachedIdentifierUids);
-        }
-
-        return m_cachedIdentifierUids.contains(uid);
-    }
-
-    for (unsigned i = 0; i < numberOfIdentifiers(); ++i) {
-        if (identifier(i).impl() == uid)
-            return true;
-    }
-    return false;
-}
-#endif
-
-int32_t UnlinkedCodeBlock::thresholdForJIT(int32_t threshold)
-{
-    switch (didOptimize()) {
-    case TriState::Indeterminate:
-        return threshold;
-    case TriState::False:
-        return threshold * 4;
-    case TriState::True:
-        return threshold / 2;
-    }
-    ASSERT_NOT_REACHED();
-    return threshold;
-}
-
-
-void UnlinkedCodeBlock::allocateSharedProfiles(unsigned numBinaryArithProfiles, unsigned numUnaryArithProfiles)
-{
</del><span class="cx">     RELEASE_ASSERT(!m_metadata->isFinalized());
</span><span class="cx"> 
</span><span class="cx">     {
</span><span class="lines">@@ -382,17 +337,16 @@
</span><span class="cx">     if (m_metadata->hasMetadata()) {
</span><span class="cx">         unsigned numberOfArrayProfiles = 0;
</span><span class="cx"> 
</span><del>-#define COUNT(__op) numberOfArrayProfiles += m_metadata->numEntries<__op>();
</del><ins>+#define COUNT(__op) \
+        numberOfArrayProfiles += m_metadata->numEntries<__op>();
</ins><span class="cx">         FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(COUNT)
</span><span class="cx">         FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(COUNT)
</span><span class="cx"> #undef COUNT
</span><span class="cx">         numberOfArrayProfiles += m_metadata->numEntries<OpIteratorNext>();
</span><span class="cx">         numberOfArrayProfiles += m_metadata->numEntries<OpGetById>();
</span><ins>+
</ins><span class="cx">         m_arrayProfiles = FixedVector<UnlinkedArrayProfile>(numberOfArrayProfiles);
</span><span class="cx">     }
</span><del>-
-    m_binaryArithProfiles = FixedVector<BinaryArithProfile>(numBinaryArithProfiles);
-    m_unaryArithProfiles = FixedVector<UnaryArithProfile>(numUnaryArithProfiles);
</del><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeUnlinkedCodeBlockh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -25,12 +25,10 @@
</span><span class="cx"> 
</span><span class="cx"> #pragma once
</span><span class="cx"> 
</span><del>-#include "ArithProfile.h"
</del><span class="cx"> #include "ArrayProfile.h"
</span><span class="cx"> #include "BytecodeConventions.h"
</span><span class="cx"> #include "CodeType.h"
</span><span class="cx"> #include "DFGExitProfile.h"
</span><del>-#include "ExecutionCounter.h"
</del><span class="cx"> #include "ExpressionRangeInfo.h"
</span><span class="cx"> #include "HandlerInfo.h"
</span><span class="cx"> #include "Identifier.h"
</span><span class="lines">@@ -66,7 +64,6 @@
</span><span class="cx"> class UnlinkedCodeBlockGenerator;
</span><span class="cx"> class UnlinkedFunctionCodeBlock;
</span><span class="cx"> class UnlinkedFunctionExecutable;
</span><del>-class BaselineJITCode;
</del><span class="cx"> struct ExecutableInfo;
</span><span class="cx"> enum class LinkTimeConstant : int32_t;
</span><span class="cx"> 
</span><span class="lines">@@ -178,7 +175,6 @@
</span><span class="cx"> 
</span><span class="cx">     const FixedVector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; }
</span><span class="cx">     const WriteBarrier<Unknown>& constantRegister(VirtualRegister reg) const { return m_constantRegisters[reg.toConstantIndex()]; }
</span><del>-    WriteBarrier<Unknown>& constantRegister(VirtualRegister reg) { return m_constantRegisters[reg.toConstantIndex()]; }
</del><span class="cx">     ALWAYS_INLINE JSValue getConstant(VirtualRegister reg) const { return m_constantRegisters[reg.toConstantIndex()].get(); }
</span><span class="cx">     const FixedVector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
</span><span class="cx"> 
</span><span class="lines">@@ -211,15 +207,6 @@
</span><span class="cx">     JSParserScriptMode scriptMode() const { return static_cast<JSParserScriptMode>(m_scriptMode); }
</span><span class="cx"> 
</span><span class="cx">     const InstructionStream& instructions() const;
</span><del>-    const Instruction* instructionAt(BytecodeIndex index) const { return instructions().at(index).ptr(); }
-    unsigned bytecodeOffset(const Instruction* instruction)
-    {
-        const auto* instructionsBegin = instructions().at(0).ptr();
-        const auto* instructionsEnd = reinterpret_cast<const Instruction*>(reinterpret_cast<uintptr_t>(instructionsBegin) + instructions().size());
-        RELEASE_ASSERT(instruction >= instructionsBegin && instruction < instructionsEnd);
-        return instruction - instructionsBegin;
-    }
-    unsigned instructionsSize() const { return instructions().size(); }
</del><span class="cx"> 
</span><span class="cx">     unsigned numCalleeLocals() const { return m_numCalleeLocals; }
</span><span class="cx">     unsigned numVars() const { return m_numVars; }
</span><span class="lines">@@ -347,21 +334,10 @@
</span><span class="cx">         return m_metadata->sizeInBytes();
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    bool loopHintsAreEligibleForFuzzingEarlyReturn()
-    {
-        // Some builtins are required to always complete the loops they run.
-        return !isBuiltinFunction();
-    }
-    void allocateSharedProfiles(unsigned numBinaryArithProfiles, unsigned numUnaryArithProfiles);
</del><ins>+    void allocateSharedProfiles();
</ins><span class="cx">     UnlinkedValueProfile& unlinkedValueProfile(unsigned index) { return m_valueProfiles[index]; }
</span><span class="cx">     UnlinkedArrayProfile& unlinkedArrayProfile(unsigned index) { return m_arrayProfiles[index]; }
</span><span class="cx"> 
</span><del>-#if ASSERT_ENABLED
-    bool hasIdentifier(UniquedStringImpl*);
-#endif
-
-    int32_t thresholdForJIT(int32_t threshold);
-
</del><span class="cx"> protected:
</span><span class="cx">     UnlinkedCodeBlock(VM&, Structure*, CodeType, const ExecutableInfo&, OptionSet<CodeGenerationMode>);
</span><span class="cx"> 
</span><span class="lines">@@ -421,9 +397,6 @@
</span><span class="cx">     unsigned m_lexicalScopeFeatures : 4;
</span><span class="cx"> public:
</span><span class="cx">     ConcurrentJSLock m_lock;
</span><del>-#if ENABLE(JIT)
-    RefPtr<BaselineJITCode> m_unlinkedBaselineCode;
-#endif
</del><span class="cx"> private:
</span><span class="cx">     CodeFeatures m_features { 0 };
</span><span class="cx">     SourceParseMode m_parseMode;
</span><span class="lines">@@ -440,6 +413,7 @@
</span><span class="cx">     std::unique_ptr<InstructionStream> m_instructions;
</span><span class="cx">     std::unique_ptr<BytecodeLivenessAnalysis> m_liveness;
</span><span class="cx"> 
</span><ins>+
</ins><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="cx">     DFG::ExitProfile m_exitProfile;
</span><span class="cx"> #endif
</span><span class="lines">@@ -484,17 +458,7 @@
</span><span class="cx">     {
</span><span class="cx">         return outOfLineJumpOffset(instruction.offset());
</span><span class="cx">     }
</span><del>-    int outOfLineJumpOffset(const Instruction* pc)
-    {
-        unsigned bytecodeOffset = this->bytecodeOffset(pc);
-        return outOfLineJumpOffset(bytecodeOffset);
-    }
</del><span class="cx"> 
</span><del>-    BinaryArithProfile& binaryArithProfile(unsigned i) { return m_binaryArithProfiles[i]; }
-    UnaryArithProfile& unaryArithProfile(unsigned i) { return m_unaryArithProfiles[i]; }
-
-    BaselineExecutionCounter& llintExecuteCounter() { return m_llintExecuteCounter; }
-
</del><span class="cx"> private:
</span><span class="cx">     using OutOfLineJumpTargets = HashMap<InstructionStream::Offset, int>;
</span><span class="cx"> 
</span><span class="lines">@@ -501,17 +465,9 @@
</span><span class="cx">     OutOfLineJumpTargets m_outOfLineJumpTargets;
</span><span class="cx">     std::unique_ptr<RareData> m_rareData;
</span><span class="cx">     FixedVector<ExpressionRangeInfo> m_expressionInfo;
</span><del>-    BaselineExecutionCounter m_llintExecuteCounter;
</del><span class="cx">     FixedVector<UnlinkedValueProfile> m_valueProfiles;
</span><span class="cx">     FixedVector<UnlinkedArrayProfile> m_arrayProfiles;
</span><del>-    FixedVector<BinaryArithProfile> m_binaryArithProfiles;
-    FixedVector<UnaryArithProfile> m_unaryArithProfiles;
</del><span class="cx"> 
</span><del>-#if ASSERT_ENABLED
-    Lock m_cachedIdentifierUidsLock;
-    HashSet<UniquedStringImpl*> m_cachedIdentifierUids;
-#endif
-
</del><span class="cx"> protected:
</span><span class="cx">     DECLARE_VISIT_CHILDREN;
</span><span class="cx">     static size_t estimatedSize(JSCell*, VM&);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeUnlinkedCodeBlockGeneratorcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -119,7 +119,7 @@
</span><span class="cx">     {
</span><span class="cx">         Locker locker { m_codeBlock->cellLock() };
</span><span class="cx">         m_codeBlock->m_instructions = WTFMove(instructions);
</span><del>-        m_codeBlock->allocateSharedProfiles(m_numBinaryArithProfiles, m_numUnaryArithProfiles);
</del><ins>+        m_codeBlock->allocateSharedProfiles();
</ins><span class="cx">         m_codeBlock->m_metadata->finalize();
</span><span class="cx"> 
</span><span class="cx">         m_codeBlock->m_jumpTargets = WTFMove(m_jumpTargets);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeUnlinkedCodeBlockGeneratorh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.h        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlockGenerator.h   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -200,9 +200,6 @@
</span><span class="cx"> 
</span><span class="cx">     void dump(PrintStream&) const;
</span><span class="cx"> 
</span><del>-    unsigned addBinaryArithProfile() { return m_numBinaryArithProfiles++; }
-    unsigned addUnaryArithProfile() { return m_numUnaryArithProfiles++; }
-
</del><span class="cx"> private:
</span><span class="cx">     VM& m_vm;
</span><span class="cx">     Strong<UnlinkedCodeBlock> m_codeBlock;
</span><span class="lines">@@ -224,8 +221,6 @@
</span><span class="cx">     Vector<InstructionStream::Offset> m_opProfileControlFlowBytecodeOffsets;
</span><span class="cx">     Vector<BitVector> m_bitVectors;
</span><span class="cx">     Vector<IdentifierSet> m_constantIdentifierSets;
</span><del>-    unsigned m_numBinaryArithProfiles { 0 };
-    unsigned m_numUnaryArithProfiles { 0 };
</del><span class="cx"> };
</span><span class="cx"> 
</span><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeUnlinkedMetadataTableh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -64,15 +64,6 @@
</span><span class="cx">         return adoptRef(*new UnlinkedMetadataTable);
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    template <typename Opcode>
-    uintptr_t offsetInMetadataTable(const Opcode& opcode)
-    {
-        ASSERT(m_isFinalized);
-        uintptr_t baseTypeOffset = m_is32Bit ? offsetTable32()[Opcode::opcodeID] : offsetTable16()[Opcode::opcodeID];
-        baseTypeOffset = roundUpToMultipleOf(alignof(typename Opcode::Metadata), baseTypeOffset);
-        return baseTypeOffset + sizeof(typename Opcode::Metadata) * opcode.m_metadataID;
-    }
-
</del><span class="cx">     template <typename Bytecode>
</span><span class="cx">     unsigned numEntries();
</span><span class="cx"> 
</span><span class="lines">@@ -116,7 +107,6 @@
</span><span class="cx">         return s_offset16TableSize;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-
</del><span class="cx">     using Offset32 = uint32_t;
</span><span class="cx">     using Offset16 = uint16_t;
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecodeValueProfileh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecode/ValueProfile.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecode/ValueProfile.h      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecode/ValueProfile.h 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -45,11 +45,13 @@
</span><span class="cx"> 
</span><span class="cx">     static constexpr unsigned numberOfBuckets = numberOfBucketsArgument;
</span><span class="cx">     static constexpr unsigned numberOfSpecFailBuckets = 1;
</span><ins>+    static constexpr unsigned bucketIndexMask = numberOfBuckets - 1;
</ins><span class="cx">     static constexpr unsigned totalNumberOfBuckets = numberOfBuckets + numberOfSpecFailBuckets;
</span><span class="cx">     
</span><span class="cx">     ValueProfileBase()
</span><span class="cx">     {
</span><del>-        clearBuckets();
</del><ins>+        for (unsigned i = 0; i < totalNumberOfBuckets; ++i)
+            m_buckets[i] = JSValue::encode(JSValue());
</ins><span class="cx">     }
</span><span class="cx">     
</span><span class="cx">     EncodedJSValue* specFailBucket(unsigned i)
</span><span class="lines">@@ -57,12 +59,6 @@
</span><span class="cx">         ASSERT(numberOfBuckets + i < totalNumberOfBuckets);
</span><span class="cx">         return m_buckets + numberOfBuckets + i;
</span><span class="cx">     }
</span><del>-
-    void clearBuckets()
-    {
-        for (unsigned i = 0; i < totalNumberOfBuckets; ++i)
-            m_buckets[i] = JSValue::encode(JSValue());
-    }
</del><span class="cx">     
</span><span class="cx">     const ClassInfo* classInfo(unsigned bucket) const
</span><span class="cx">     {
</span><span class="lines">@@ -127,6 +123,8 @@
</span><span class="cx">         }
</span><span class="cx">     }
</span><span class="cx">     
</span><ins>+    // Updates the prediction and returns the new one. Never call this from any thread
+    // that isn't executing the code.
</ins><span class="cx">     SpeculatedType computeUpdatedPrediction(const ConcurrentJSLocker&)
</span><span class="cx">     {
</span><span class="cx">         for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
</span><span class="lines">@@ -141,7 +139,7 @@
</span><span class="cx">         
</span><span class="cx">         return m_prediction;
</span><span class="cx">     }
</span><del>-
</del><ins>+    
</ins><span class="cx">     EncodedJSValue m_buckets[totalNumberOfBuckets];
</span><span class="cx"> 
</span><span class="cx">     SpeculatedType m_prediction { SpecNone };
</span><span class="lines">@@ -163,7 +161,6 @@
</span><span class="cx"> 
</span><span class="cx"> struct ValueProfile : public ValueProfileWithLogNumberOfBuckets<0> {
</span><span class="cx">     ValueProfile() : ValueProfileWithLogNumberOfBuckets<0>() { }
</span><del>-    static ptrdiff_t offsetOfFirstBucket() { return OBJECT_OFFSETOF(ValueProfile, m_buckets[0]); }
</del><span class="cx"> };
</span><span class="cx"> 
</span><span class="cx"> struct ValueProfileAndVirtualRegister : public ValueProfile {
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecompilerBytecodeGeneratorcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -1598,7 +1598,7 @@
</span><span class="cx">         emitUnaryOp<OpNot>(dst, src);
</span><span class="cx">         break;
</span><span class="cx">     case op_negate:
</span><del>-        OpNegate::emit(this, dst, src, m_codeBlock->addUnaryArithProfile(), type);
</del><ins>+        OpNegate::emit(this, dst, src, type);
</ins><span class="cx">         break;
</span><span class="cx">     case op_bitnot:
</span><span class="cx">         emitUnaryOp<OpBitnot>(dst, src);
</span><span class="lines">@@ -1696,13 +1696,13 @@
</span><span class="cx"> 
</span><span class="cx"> RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst)
</span><span class="cx"> {
</span><del>-    OpInc::emit(this, srcDst, m_codeBlock->addUnaryArithProfile());
</del><ins>+    OpInc::emit(this, srcDst);
</ins><span class="cx">     return srcDst;
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst)
</span><span class="cx"> {
</span><del>-    OpDec::emit(this, srcDst, m_codeBlock->addUnaryArithProfile());
</del><ins>+    OpDec::emit(this, srcDst);
</ins><span class="cx">     return srcDst;
</span><span class="cx"> }
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorebytecompilerBytecodeGeneratorh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -682,7 +682,7 @@
</span><span class="cx">             RegisterID*>
</span><span class="cx">         emitBinaryOp(RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types)
</span><span class="cx">         {
</span><del>-            BinaryOp::emit(this, dst, src1, src2, m_codeBlock->addBinaryArithProfile(), types);
</del><ins>+            BinaryOp::emit(this, dst, src1, src2, types);
</ins><span class="cx">             return dst;
</span><span class="cx">         }
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGCommonDatah"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGCommonData.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGCommonData.h  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGCommonData.h     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -27,7 +27,6 @@
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="cx"> 
</span><del>-#include "BaselineJITCode.h"
</del><span class="cx"> #include "CodeBlockJettisoningWatchpoint.h"
</span><span class="cx"> #include "DFGAdaptiveInferredPropertyValueWatchpoint.h"
</span><span class="cx"> #include "DFGAdaptiveStructureWatchpoint.h"
</span><span class="lines">@@ -35,9 +34,7 @@
</span><span class="cx"> #include "DFGJumpReplacement.h"
</span><span class="cx"> #include "DFGOSREntry.h"
</span><span class="cx"> #include "InlineCallFrameSet.h"
</span><del>-#include "JITMathIC.h"
</del><span class="cx"> #include "JSCast.h"
</span><del>-#include "PCToCodeOriginMap.h"
</del><span class="cx"> #include "ProfilerCompilation.h"
</span><span class="cx"> #include "RecordedStatuses.h"
</span><span class="cx"> #include <wtf/Bag.h>
</span><span class="lines">@@ -73,7 +70,7 @@
</span><span class="cx">     WriteBarrier<JSCell> m_to;
</span><span class="cx"> };
</span><span class="cx">         
</span><del>-class CommonData : public MathICHolder {
</del><ins>+class CommonData {
</ins><span class="cx">     WTF_MAKE_NONCOPYABLE(CommonData);
</span><span class="cx"> public:
</span><span class="cx">     CommonData()
</span><span class="lines">@@ -119,7 +116,6 @@
</span><span class="cx">     FixedVector<CodeBlockJettisoningWatchpoint> m_watchpoints;
</span><span class="cx">     FixedVector<AdaptiveStructureWatchpoint> m_adaptiveStructureWatchpoints;
</span><span class="cx">     FixedVector<AdaptiveInferredPropertyValueWatchpoint> m_adaptiveInferredPropertyValueWatchpoints;
</span><del>-    std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
</del><span class="cx">     RecordedStatuses recordedStatuses;
</span><span class="cx">     Vector<JumpReplacement> m_jumpReplacements;
</span><span class="cx">     
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGJITCodecpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGJITCode.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGJITCode.cpp   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGJITCode.cpp      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -30,7 +30,6 @@
</span><span class="cx"> 
</span><span class="cx"> #include "CodeBlock.h"
</span><span class="cx"> #include "FTLForOSREntryJITCode.h"
</span><del>-#include "JumpTable.h"
</del><span class="cx"> 
</span><span class="cx"> namespace JSC { namespace DFG {
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGJITCodeh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGJITCode.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGJITCode.h     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGJITCode.h        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -42,9 +42,6 @@
</span><span class="cx"> 
</span><span class="cx"> class TrackedReferences;
</span><span class="cx"> 
</span><del>-struct SimpleJumpTable;
-struct StringJumpTable;
-
</del><span class="cx"> namespace DFG {
</span><span class="cx"> 
</span><span class="cx"> class JITCompiler;
</span><span class="lines">@@ -104,8 +101,6 @@
</span><span class="cx">     std::optional<CodeOrigin> findPC(CodeBlock*, void* pc) final;
</span><span class="cx"> 
</span><span class="cx">     using DirectJITCode::initializeCodeRefForDFG;
</span><del>-
-    PCToCodeOriginMap* pcToCodeOriginMap() override { return common.m_pcToCodeOriginMap.get(); }
</del><span class="cx">     
</span><span class="cx"> private:
</span><span class="cx">     friend class JITCompiler; // Allow JITCompiler to call setCodeRef().
</span><span class="lines">@@ -115,8 +110,6 @@
</span><span class="cx">     FixedVector<DFG::OSREntryData> m_osrEntry;
</span><span class="cx">     FixedVector<DFG::OSRExit> m_osrExit;
</span><span class="cx">     FixedVector<DFG::SpeculationRecovery> m_speculationRecovery;
</span><del>-    FixedVector<SimpleJumpTable> m_switchJumpTables;
-    FixedVector<StringJumpTable> m_stringSwitchJumpTables;
</del><span class="cx">     DFG::VariableEventStream variableEventStream;
</span><span class="cx">     DFG::MinifiedGraph minifiedDFG;
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGJITCompilercpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp       2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp  2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -185,12 +185,13 @@
</span><span class="cx">     
</span><span class="cx">     m_graph.registerFrozenValues();
</span><span class="cx"> 
</span><del>-    ASSERT(m_jitCode->m_stringSwitchJumpTables.isEmpty());
-    ASSERT(m_jitCode->m_switchJumpTables.isEmpty());
-    if (!m_graph.m_stringSwitchJumpTables.isEmpty()) 
-        m_jitCode->m_stringSwitchJumpTables = WTFMove(m_graph.m_stringSwitchJumpTables);
-    if (!m_graph.m_switchJumpTables.isEmpty())
-        m_jitCode->m_switchJumpTables = WTFMove(m_graph.m_switchJumpTables);
</del><ins>+    if (!m_graph.m_stringSwitchJumpTables.isEmpty() || !m_graph.m_switchJumpTables.isEmpty()) {
+        ConcurrentJSLocker locker(m_codeBlock->m_lock);
+        if (!m_graph.m_stringSwitchJumpTables.isEmpty())
+            m_codeBlock->ensureJITData(locker).m_stringSwitchJumpTables = WTFMove(m_graph.m_stringSwitchJumpTables);
+        if (!m_graph.m_switchJumpTables.isEmpty())
+            m_codeBlock->ensureJITData(locker).m_switchJumpTables = WTFMove(m_graph.m_switchJumpTables);
+    }
</ins><span class="cx"> 
</span><span class="cx">     for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
</span><span class="cx">         SwitchData& data = **iter;
</span><span class="lines">@@ -198,12 +199,12 @@
</span><span class="cx">         case SwitchChar:
</span><span class="cx">         case SwitchImm: {
</span><span class="cx">             if (!data.didUseJumpTable) {
</span><del>-                ASSERT(m_jitCode->m_switchJumpTables[data.switchTableIndex].isEmpty());
</del><ins>+                ASSERT(m_codeBlock->switchJumpTable(data.switchTableIndex).isEmpty());
</ins><span class="cx">                 continue;
</span><span class="cx">             }
</span><span class="cx"> 
</span><span class="cx">             const UnlinkedSimpleJumpTable& unlinkedTable = m_graph.unlinkedSwitchJumpTable(data.switchTableIndex);
</span><del>-            SimpleJumpTable& linkedTable = m_jitCode->m_switchJumpTables[data.switchTableIndex];
</del><ins>+            SimpleJumpTable& linkedTable = m_codeBlock->switchJumpTable(data.switchTableIndex);
</ins><span class="cx">             linkedTable.m_ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]);
</span><span class="cx">             RELEASE_ASSERT(linkedTable.m_ctiOffsets.size() == unlinkedTable.m_branchOffsets.size());
</span><span class="cx">             for (unsigned j = linkedTable.m_ctiOffsets.size(); j--;)
</span><span class="lines">@@ -218,12 +219,12 @@
</span><span class="cx"> 
</span><span class="cx">         case SwitchString: {
</span><span class="cx">             if (!data.didUseJumpTable) {
</span><del>-                ASSERT(m_jitCode->m_stringSwitchJumpTables[data.switchTableIndex].isEmpty());
</del><ins>+                ASSERT(m_codeBlock->stringSwitchJumpTable(data.switchTableIndex).isEmpty());
</ins><span class="cx">                 continue;
</span><span class="cx">             }
</span><span class="cx"> 
</span><span class="cx">             const UnlinkedStringJumpTable& unlinkedTable = m_graph.unlinkedStringSwitchJumpTable(data.switchTableIndex);
</span><del>-            StringJumpTable& linkedTable = m_jitCode->m_stringSwitchJumpTables[data.switchTableIndex];
</del><ins>+            StringJumpTable& linkedTable = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
</ins><span class="cx">             auto ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]);
</span><span class="cx">             RELEASE_ASSERT(linkedTable.m_ctiOffsets.size() == unlinkedTable.m_offsetTable.size() + 1);
</span><span class="cx">             for (auto& entry : linkedTable.m_ctiOffsets)
</span><span class="lines">@@ -332,7 +333,7 @@
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
</span><del>-        m_jitCode->common.m_pcToCodeOriginMap = makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer);
</del><ins>+        m_codeBlock->setPCToCodeOriginMap(makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer));
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> static void emitStackOverflowCheck(JITCompiler& jit, MacroAssembler::JumpList& stackOverflow)
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGOSREntrycpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGOSREntry.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGOSREntry.cpp  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGOSREntry.cpp     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -97,6 +97,7 @@
</span><span class="cx">     ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
</span><span class="cx">     ASSERT(codeBlock->alternative());
</span><span class="cx">     ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT);
</span><ins>+    ASSERT(!codeBlock->jitCodeMap());
</ins><span class="cx">     ASSERT(codeBlock->jitCode()->dfgCommon()->isStillValid);
</span><span class="cx"> 
</span><span class="cx">     if (!Options::useOSREntryToDFG())
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGOSRExitcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGOSRExit.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGOSRExit.cpp   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGOSRExit.cpp      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -772,6 +772,9 @@
</span><span class="cx">     // The tag registers are needed to materialize recoveries below.
</span><span class="cx">     jit.emitMaterializeTagCheckRegisters();
</span><span class="cx"> 
</span><ins>+    if (exit.isExceptionHandler())
+        jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
+
</ins><span class="cx">     if (inlineStackContainsActiveCheckpoint) {
</span><span class="cx">         EncodedJSValue* tmpScratch = scratch + operands.tmpIndex(0);
</span><span class="cx">         jit.setupArguments<decltype(operationMaterializeOSRExitSideState)>(&vm, &exit, tmpScratch);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGOSRExitCompilerCommoncpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -28,7 +28,6 @@
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="cx"> 
</span><del>-#include "CodeBlockInlines.h"
</del><span class="cx"> #include "DFGJITCode.h"
</span><span class="cx"> #include "DFGOperations.h"
</span><span class="cx"> #include "JIT.h"
</span><span class="lines">@@ -120,10 +119,10 @@
</span><span class="cx">     int32_t clippedValue;
</span><span class="cx">     switch (jit.codeBlock()->jitType()) {
</span><span class="cx">     case JITType::DFGJIT:
</span><del>-        clippedValue = BaselineExecutionCounter::clippedThreshold(targetValue);
</del><ins>+        clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
</ins><span class="cx">         break;
</span><span class="cx">     case JITType::FTLJIT:
</span><del>-        clippedValue = UpperTierExecutionCounter::clippedThreshold(targetValue);
</del><ins>+        clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
</ins><span class="cx">         break;
</span><span class="cx">     default:
</span><span class="cx">         RELEASE_ASSERT_NOT_REACHED();
</span><span class="lines">@@ -143,8 +142,11 @@
</span><span class="cx"> {
</span><span class="cx">     callerIsLLInt = Options::forceOSRExitToLLInt() || baselineCodeBlockForCaller->jitType() == JITType::InterpreterThunk;
</span><span class="cx"> 
</span><del>-    if (callBytecodeIndex.checkpoint())
</del><ins>+    if (callBytecodeIndex.checkpoint()) {
+        if (!callerIsLLInt)
+            baselineCodeBlockForCaller->m_hasLinkedOSRExit = true;
</ins><span class="cx">         return LLInt::checkpointOSRExitFromInlinedCallTrampolineThunk().code();
</span><ins>+    }
</ins><span class="cx"> 
</span><span class="cx">     MacroAssemblerCodePtr<JSEntryPtrTag> jumpTarget;
</span><span class="cx"> 
</span><span class="lines">@@ -196,6 +198,8 @@
</span><span class="cx"> #undef LLINT_RETURN_LOCATION
</span><span class="cx"> 
</span><span class="cx">     } else {
</span><ins>+        baselineCodeBlockForCaller->m_hasLinkedOSRExit = true;
+
</ins><span class="cx">         switch (trueCallerCallKind) {
</span><span class="cx">         case InlineCallFrame::Call:
</span><span class="cx">         case InlineCallFrame::Construct:
</span><span class="lines">@@ -309,10 +313,6 @@
</span><span class="cx">             CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
</span><span class="cx">             jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->metadataTable()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::metadataTableGPR));
</span><span class="cx">             jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->instructionsRawPointer()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::pbGPR));
</span><del>-        } else if (trueCaller) {
-            CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
-            jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->metadataTable()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, JIT::s_metadataGPR));
-            jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->baselineJITConstantPool()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, JIT::s_constantsGPR));
</del><span class="cx">         }
</span><span class="cx"> 
</span><span class="cx">         if (!inlineCallFrame->isVarargs())
</span><span class="lines">@@ -404,8 +404,7 @@
</span><span class="cx">         jit.move(CCallHelpers::TrustedImm32(bytecodeIndex.offset()), LLInt::Registers::pcGPR);
</span><span class="cx">         jumpTarget = destination.retagged<OSRExitPtrTag>().executableAddress();
</span><span class="cx">     } else {
</span><del>-        jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->metadataTable()), JIT::s_metadataGPR);
-        jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->baselineJITConstantPool()), JIT::s_constantsGPR);
</del><ins>+        codeBlockForExit->m_hasLinkedOSRExit = true;
</ins><span class="cx"> 
</span><span class="cx">         BytecodeIndex exitIndex = exit.m_codeOrigin.bytecodeIndex();
</span><span class="cx">         MacroAssemblerCodePtr<JSEntryPtrTag> destination;
</span><span class="lines">@@ -421,16 +420,12 @@
</span><span class="cx">         jumpTarget = destination.retagged<OSRExitPtrTag>().executableAddress();
</span><span class="cx">     }
</span><span class="cx"> 
</span><ins>+    jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
</ins><span class="cx">     if (exit.isExceptionHandler()) {
</span><del>-        ASSERT(!RegisterSet::vmCalleeSaveRegisters().contains(LLInt::Registers::pcGPR));
-        jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, AssemblyHelpers::selectScratchGPR(LLInt::Registers::pcGPR));
-
</del><span class="cx">         // Since we're jumping to op_catch, we need to set callFrameForCatch.
</span><span class="cx">         jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
</span><span class="cx">     }
</span><del>-
-    jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
-
</del><ins>+    
</ins><span class="cx">     jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
</span><span class="cx">     jit.farJump(GPRInfo::regT2, OSRExitPtrTag);
</span><span class="cx"> }
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGOperationscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGOperations.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGOperations.cpp        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGOperations.cpp   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -30,7 +30,6 @@
</span><span class="cx"> #include "CacheableIdentifierInlines.h"
</span><span class="cx"> #include "ClonedArguments.h"
</span><span class="cx"> #include "CodeBlock.h"
</span><del>-#include "CodeBlockInlines.h"
</del><span class="cx"> #include "CommonSlowPaths.h"
</span><span class="cx"> #include "DFGDriver.h"
</span><span class="cx"> #include "DFGJITCode.h"
</span><span class="lines">@@ -2870,7 +2869,7 @@
</span><span class="cx">     CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
</span><span class="cx">     JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
</span><span class="cx">     CodeBlock* codeBlock = callFrame->codeBlock();
</span><del>-    const SimpleJumpTable& linkedTable = codeBlock->dfgSwitchJumpTable(tableIndex);
</del><ins>+    const SimpleJumpTable& linkedTable = codeBlock->switchJumpTable(tableIndex);
</ins><span class="cx">     JSValue value = JSValue::decode(encodedValue);
</span><span class="cx">     ASSERT(value.isDouble());
</span><span class="cx">     double asDouble = value.asDouble();
</span><span class="lines">@@ -2891,7 +2890,7 @@
</span><span class="cx"> 
</span><span class="cx">     RETURN_IF_EXCEPTION(throwScope, nullptr);
</span><span class="cx">     CodeBlock* codeBlock = callFrame->codeBlock();
</span><del>-    const StringJumpTable& linkedTable = codeBlock->dfgStringSwitchJumpTable(tableIndex);
</del><ins>+    const StringJumpTable& linkedTable = codeBlock->stringSwitchJumpTable(tableIndex);
</ins><span class="cx">     return linkedTable.ctiForValue(*unlinkedTable, strImpl).executableAddress<char*>();
</span><span class="cx"> }
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGSpeculativeJITcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -3985,7 +3985,7 @@
</span><span class="cx">     RegisterSet usedRegisters = this->usedRegisters();
</span><span class="cx"> 
</span><span class="cx">     JITPutByValGenerator gen(
</span><del>-        m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, AccessType::PutPrivateName, usedRegisters,
</del><ins>+        m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, AccessType::PutByVal, usedRegisters,
</ins><span class="cx">         JSValueRegs::payloadOnly(baseGPR), JSValueRegs::payloadOnly(propertyGPR), valueRegs, InvalidGPRReg, stubInfoGPR);
</span><span class="cx">     gen.stubInfo()->propertyIsSymbol = true;
</span><span class="cx"> 
</span><span class="lines">@@ -4905,7 +4905,7 @@
</span><span class="cx">     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
</span><span class="cx">     BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
</span><span class="cx">     BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
</span><del>-    JITAddIC* addIC = m_jit.jitCode()->common.addJITAddIC(arithProfile);
</del><ins>+    JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
</ins><span class="cx">     auto repatchingFunction = operationValueAddOptimize;
</span><span class="cx">     auto nonRepatchingFunction = operationValueAdd;
</span><span class="cx">     
</span><span class="lines">@@ -4989,7 +4989,7 @@
</span><span class="cx">     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
</span><span class="cx">     BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
</span><span class="cx">     BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
</span><del>-    JITSubIC* subIC = m_jit.jitCode()->common.addJITSubIC(arithProfile);
</del><ins>+    JITSubIC* subIC = m_jit.codeBlock()->addJITSubIC(arithProfile);
</ins><span class="cx">     auto repatchingFunction = operationValueSubOptimize;
</span><span class="cx">     auto nonRepatchingFunction = operationValueSub;
</span><span class="cx"> 
</span><span class="lines">@@ -5571,7 +5571,7 @@
</span><span class="cx">     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
</span><span class="cx">     BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
</span><span class="cx">     UnaryArithProfile* arithProfile = baselineCodeBlock->unaryArithProfileForBytecodeIndex(bytecodeIndex);
</span><del>-    JITNegIC* negIC = m_jit.jitCode()->common.addJITNegIC(arithProfile);
</del><ins>+    JITNegIC* negIC = m_jit.codeBlock()->addJITNegIC(arithProfile);
</ins><span class="cx">     auto repatchingFunction = operationArithNegateOptimize;
</span><span class="cx">     auto nonRepatchingFunction = operationArithNegate;
</span><span class="cx">     compileMathIC(node, negIC, repatchingFunction, nonRepatchingFunction);
</span><span class="lines">@@ -5815,7 +5815,7 @@
</span><span class="cx">     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
</span><span class="cx">     BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
</span><span class="cx">     BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
</span><del>-    JITMulIC* mulIC = m_jit.jitCode()->common.addJITMulIC(arithProfile);
</del><ins>+    JITMulIC* mulIC = m_jit.codeBlock()->addJITMulIC(arithProfile);
</ins><span class="cx">     auto repatchingFunction = operationValueMulOptimize;
</span><span class="cx">     auto nonRepatchingFunction = operationValueMul;
</span><span class="cx"> 
</span><span class="lines">@@ -14770,7 +14770,7 @@
</span><span class="cx">     SpeculateCellOperand scope(this, node->child2());
</span><span class="cx">     GPRReg scopeReg = scope.gpr();
</span><span class="cx"> 
</span><del>-    m_jit.logShadowChickenTailPacket(shadowPacketReg, thisRegs, scopeReg, CCallHelpers::TrustedImmPtr(m_jit.codeBlock()), callSiteIndex);
</del><ins>+    m_jit.logShadowChickenTailPacket(shadowPacketReg, thisRegs, scopeReg, m_jit.codeBlock(), callSiteIndex);
</ins><span class="cx">     noResult(node);
</span><span class="cx"> }
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGSpeculativeJIT32_64cpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp       2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp  2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -730,7 +730,6 @@
</span><span class="cx">             shuffleData.callee = ValueRecovery::inPair(calleeTagGPR, calleePayloadGPR);
</span><span class="cx">             shuffleData.args.resize(numAllocatedArgs);
</span><span class="cx">             shuffleData.numPassedArgs = numPassedArgs;
</span><del>-            shuffleData.numParameters = m_jit.codeBlock()->numParameters();
</del><span class="cx"> 
</span><span class="cx">             for (unsigned i = 0; i < numPassedArgs; ++i) {
</span><span class="cx">                 Edge argEdge = m_jit.graph().varArgChild(node, i + 1);
</span><span class="lines">@@ -890,7 +889,7 @@
</span><span class="cx"> 
</span><span class="cx">     CCallHelpers::JumpList slowCases;
</span><span class="cx">     if (isTail) {
</span><del>-        slowCases = info->emitTailCallFastPath(m_jit, calleePayloadGPR, [&] {
</del><ins>+        slowCases = info->emitTailCallFastPath(m_jit, calleePayloadGPR, InvalidGPRReg, CallLinkInfo::UseDataIC::No, [&] {
</ins><span class="cx">             if (node->op() == TailCall) {
</span><span class="cx">                 info->setFrameShuffleData(shuffleData);
</span><span class="cx">                 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoredfgDFGSpeculativeJIT64cpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -828,7 +828,6 @@
</span><span class="cx">             shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);
</span><span class="cx">             shuffleData.args.resize(numAllocatedArgs);
</span><span class="cx">             shuffleData.numPassedArgs = numPassedArgs;
</span><del>-            shuffleData.numParameters = m_jit.codeBlock()->numParameters();
</del><span class="cx">             
</span><span class="cx">             for (unsigned i = 0; i < numPassedArgs; ++i) {
</span><span class="cx">                 Edge argEdge = m_jit.graph().varArgChild(node, i + 1);
</span><span class="lines">@@ -980,7 +979,7 @@
</span><span class="cx">     
</span><span class="cx">     CCallHelpers::JumpList slowCases;
</span><span class="cx">     if (isTail) {
</span><del>-        slowCases = callLinkInfo->emitTailCallFastPath(m_jit, calleeGPR, [&] {
</del><ins>+        slowCases = callLinkInfo->emitTailCallFastPath(m_jit, calleeGPR, InvalidGPRReg, CallLinkInfo::UseDataIC::No, [&] {
</ins><span class="cx">             if (node->op() == TailCall) {
</span><span class="cx">                 callLinkInfo->setFrameShuffleData(shuffleData);
</span><span class="cx">                 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreftlFTLCompilecpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/ftl/FTLCompile.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/ftl/FTLCompile.cpp   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/ftl/FTLCompile.cpp      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -156,7 +156,7 @@
</span><span class="cx"> 
</span><span class="cx">     if (vm.shouldBuilderPCToCodeOriginMapping()) {
</span><span class="cx">         B3::PCToOriginMap originMap = state.proc->releasePCToOriginMap();
</span><del>-        state.jitCode->common.m_pcToCodeOriginMap = makeUnique<PCToCodeOriginMap>(PCToCodeOriginMapBuilder(vm, WTFMove(originMap)), *state.finalizer->b3CodeLinkBuffer);
</del><ins>+        codeBlock->setPCToCodeOriginMap(makeUnique<PCToCodeOriginMap>(PCToCodeOriginMapBuilder(vm, WTFMove(originMap)), *state.finalizer->b3CodeLinkBuffer));
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     CodeLocationLabel<JSEntryPtrTag> label = state.finalizer->b3CodeLinkBuffer->locationOf<JSEntryPtrTag>(state.proc->code().entrypointLabel(0));
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreftlFTLJITCodeh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/ftl/FTLJITCode.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/ftl/FTLJITCode.h     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/ftl/FTLJITCode.h        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -68,8 +68,6 @@
</span><span class="cx">     DFG::CommonData* dfgCommon() override;
</span><span class="cx">     static ptrdiff_t commonDataOffset() { return OBJECT_OFFSETOF(JITCode, common); }
</span><span class="cx">     void shrinkToFit(const ConcurrentJSLocker&) override;
</span><del>-
-    PCToCodeOriginMap* pcToCodeOriginMap() override { return common.m_pcToCodeOriginMap.get(); }
</del><span class="cx">     
</span><span class="cx">     DFG::CommonData common;
</span><span class="cx">     Vector<OSRExit> m_osrExit;
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreftlFTLLinkcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/ftl/FTLLink.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/ftl/FTLLink.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/ftl/FTLLink.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -52,6 +52,14 @@
</span><span class="cx"> 
</span><span class="cx">     graph.registerFrozenValues();
</span><span class="cx"> 
</span><ins>+#if ASSERT_ENABLED
+    {
+        ConcurrentJSLocker locker(codeBlock->m_lock);
+        ASSERT(codeBlock->ensureJITData(locker).m_stringSwitchJumpTables.isEmpty());
+        ASSERT(codeBlock->ensureJITData(locker).m_switchJumpTables.isEmpty());
+    }
+#endif
+
</ins><span class="cx">     // Create the entrypoint. Note that we use this entrypoint totally differently
</span><span class="cx">     // depending on whether we're doing OSR entry or not.
</span><span class="cx">     CCallHelpers jit(codeBlock);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreftlFTLLowerDFGToB3cpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -88,7 +88,6 @@
</span><span class="cx"> #include "JSSetIterator.h"
</span><span class="cx"> #include "LLIntThunks.h"
</span><span class="cx"> #include "OperandsInlines.h"
</span><del>-#include "PCToCodeOriginMap.h"
</del><span class="cx"> #include "ProbeContext.h"
</span><span class="cx"> #include "RegExpObject.h"
</span><span class="cx"> #include "ScratchRegisterAllocator.h"
</span><span class="lines">@@ -2321,18 +2320,6 @@
</span><span class="cx">         compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type>
-    static JITAddIC* addMathIC(DFG::CommonData& common, BinaryArithProfile* profile) { return common.addJITAddIC(profile); }
-
-    template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type>
-    static JITMulIC* addMathIC(DFG::CommonData& common, BinaryArithProfile* profile) { return common.addJITMulIC(profile); }
-
-    template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type>
-    static JITNegIC* addMathIC(DFG::CommonData& common, UnaryArithProfile* profile) { return common.addJITNegIC(profile); }
-
-    template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type>
-    static JITSubIC* addMathIC(DFG::CommonData& common, BinaryArithProfile* profile) { return common.addJITSubIC(profile); }
-
</del><span class="cx">     void compileValueMul()
</span><span class="cx">     {
</span><span class="cx">         JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
</span><span class="lines">@@ -2404,7 +2391,7 @@
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><span class="cx">                 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
</span><del>-                JITUnaryMathIC<Generator>* mathIC = addMathIC<Generator>(state->jitCode->common, arithProfile);
</del><ins>+                JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
</ins><span class="cx">                 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
</span><span class="cx"> 
</span><span class="cx">                 bool shouldEmitProfiling = false;
</span><span class="lines">@@ -2513,7 +2500,7 @@
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><span class="cx">                 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
</span><del>-                JITBinaryMathIC<Generator>* mathIC = addMathIC<Generator>(state->jitCode->common, arithProfile);
</del><ins>+                JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
</ins><span class="cx">                 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
</span><span class="cx">                     JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
</span><span class="cx">                     params.fpScratch(1), params.gpScratch(0));
</span><span class="lines">@@ -4330,7 +4317,7 @@
</span><span class="cx">             GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(0) : InvalidGPRReg;
</span><span class="cx"> 
</span><span class="cx">             auto generator = Box<JITPutByValGenerator>::create(
</span><del>-                jit.codeBlock(), JITType::FTLJIT, nodeSemanticOrigin, callSiteIndex, AccessType::PutPrivateName,
</del><ins>+                jit.codeBlock(), JITType::FTLJIT, nodeSemanticOrigin, callSiteIndex, AccessType::PutByVal,
</ins><span class="cx">                 params.unavailableRegisters(), JSValueRegs(baseGPR), JSValueRegs(propertyGPR), JSValueRegs(valueGPR), InvalidGPRReg, stubInfoGPR);
</span><span class="cx"> 
</span><span class="cx">             generator->stubInfo()->propertyIsSymbol = true;
</span><span class="lines">@@ -10298,7 +10285,6 @@
</span><span class="cx">                     for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
</span><span class="cx">                         shuffleData.args.append(ValueRecovery::constant(jsUndefined()));
</span><span class="cx">                     shuffleData.numPassedArgs = numPassedArgs;
</span><del>-                    shuffleData.numParameters = jit.codeBlock()->numParameters();
</del><span class="cx">                     shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
</span><span class="cx">                     
</span><span class="cx">                     CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(semanticNodeOrigin);
</span><span class="lines">@@ -10457,7 +10443,6 @@
</span><span class="cx">                     shuffleData.args.append(params[1 + i].recoveryForJSValue());
</span><span class="cx"> 
</span><span class="cx">                 shuffleData.numPassedArgs = numArgs;
</span><del>-                shuffleData.numParameters = jit.codeBlock()->numParameters();
</del><span class="cx">                 
</span><span class="cx">                 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
</span><span class="cx"> 
</span><span class="lines">@@ -10464,7 +10449,7 @@
</span><span class="cx">                 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(codeOrigin);
</span><span class="cx">                 callLinkInfo->setUpCall(CallLinkInfo::TailCall, GPRInfo::regT0);
</span><span class="cx"> 
</span><del>-                auto slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, [&] {
</del><ins>+                auto slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No, [&] {
</ins><span class="cx">                     callLinkInfo->setFrameShuffleData(shuffleData);
</span><span class="cx">                     CallFrameShuffler(jit, shuffleData).prepareForTailCall();
</span><span class="cx">                 });
</span><span class="lines">@@ -10807,7 +10792,7 @@
</span><span class="cx">                 CCallHelpers::JumpList slowPath;
</span><span class="cx">                 CCallHelpers::Jump done;
</span><span class="cx">                 if (isTailCall) {
</span><del>-                    slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, [&] {
</del><ins>+                    slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No, [&] {
</ins><span class="cx">                         jit.emitRestoreCalleeSaves();
</span><span class="cx">                         jit.prepareForTailCallSlow();
</span><span class="cx">                     });
</span><span class="lines">@@ -11088,7 +11073,7 @@
</span><span class="cx">                 CCallHelpers::JumpList slowPath;
</span><span class="cx">                 CCallHelpers::Jump done;
</span><span class="cx">                 if (isTailCall) {
</span><del>-                    slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, [&] {
</del><ins>+                    slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No, [&] {
</ins><span class="cx">                         jit.emitRestoreCalleeSaves();
</span><span class="cx">                         jit.prepareForTailCallSlow();
</span><span class="cx">                     });
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoreftlFTLOSRExitCompilercpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -413,16 +413,28 @@
</span><span class="cx"> 
</span><span class="cx">     RegisterSet allFTLCalleeSaves = RegisterSet::ftlCalleeSaveRegisters();
</span><span class="cx">     const RegisterAtOffsetList* baselineCalleeSaves = baselineCodeBlock->calleeSaveRegisters();
</span><ins>+    RegisterAtOffsetList* vmCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
+    RegisterSet vmCalleeSavesToSkip = RegisterSet::stackRegisters();
+    if (exit.isExceptionHandler()) {
+        jit.loadPtr(&vm.topEntryFrame, GPRInfo::regT1);
+        jit.addPtr(CCallHelpers::TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), GPRInfo::regT1);
+    }
</ins><span class="cx"> 
</span><span class="cx">     for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
</span><span class="cx">         if (!allFTLCalleeSaves.get(reg)) {
</span><ins>+            if (exit.isExceptionHandler())
+                RELEASE_ASSERT(!vmCalleeSaves->find(reg));
</ins><span class="cx">             continue;
</span><span class="cx">         }
</span><span class="cx">         unsigned unwindIndex = codeBlock->calleeSaveRegisters()->indexOf(reg);
</span><span class="cx">         const RegisterAtOffset* baselineRegisterOffset = baselineCalleeSaves->find(reg);
</span><ins>+        RegisterAtOffset* vmCalleeSave = nullptr; 
+        if (exit.isExceptionHandler())
+            vmCalleeSave = vmCalleeSaves->find(reg);
</ins><span class="cx"> 
</span><span class="cx">         if (reg.isGPR()) {
</span><span class="cx">             GPRReg regToLoad = baselineRegisterOffset ? GPRInfo::regT0 : reg.gpr();
</span><ins>+            RELEASE_ASSERT(regToLoad != GPRInfo::regT1);
</ins><span class="cx"> 
</span><span class="cx">             if (unwindIndex == UINT_MAX) {
</span><span class="cx">                 // The FTL compilation didn't preserve this register. This means that it also
</span><span class="lines">@@ -440,6 +452,8 @@
</span><span class="cx"> 
</span><span class="cx">             if (baselineRegisterOffset)
</span><span class="cx">                 jit.store64(regToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
</span><ins>+            if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg()))
+                jit.store64(regToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
</ins><span class="cx">         } else {
</span><span class="cx">             FPRReg fpRegToLoad = baselineRegisterOffset ? FPRInfo::fpRegT0 : reg.fpr();
</span><span class="cx"> 
</span><span class="lines">@@ -450,9 +464,19 @@
</span><span class="cx"> 
</span><span class="cx">             if (baselineRegisterOffset)
</span><span class="cx">                 jit.storeDouble(fpRegToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
</span><ins>+            if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg()))
+                jit.storeDouble(fpRegToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
</ins><span class="cx">         }
</span><span class="cx">     }
</span><span class="cx"> 
</span><ins>+    if (exit.isExceptionHandler()) {
+        RegisterAtOffset* vmCalleeSave = vmCalleeSaves->find(GPRInfo::numberTagRegister);
+        jit.store64(GPRInfo::numberTagRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
+
+        vmCalleeSave = vmCalleeSaves->find(GPRInfo::notCellMaskRegister);
+        jit.store64(GPRInfo::notCellMaskRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
+    }
+
</ins><span class="cx">     size_t baselineVirtualRegistersForCalleeSaves = baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters();
</span><span class="cx"> 
</span><span class="cx">     if (exit.m_codeOrigin.inlineStackContainsActiveCheckpoint()) {
</span></span></pre></div>
<a id="trunkSourceJavaScriptCoregeneratorMetadatarb"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/generator/Metadata.rb (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/generator/Metadata.rb        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/generator/Metadata.rb   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -42,28 +42,17 @@
</span><span class="cx">     def struct(op)
</span><span class="cx">         return if empty?
</span><span class="cx"> 
</span><del>-        def generateOffsetOfFunctions(prefix, fieldNames)
-            fieldNames.map do |fieldName|
-                "#{prefix}static ptrdiff_t offsetOf#{fieldName[0].upcase}#{fieldName[1..-1]}() { return OBJECT_OFFSETOF(Metadata, m_#{fieldName}); }"
-            end.join("\n")
-        end
-
-        def convertFields(prefix, fields, fieldNames)
</del><ins>+        def convertFields(prefix, fields)
</ins><span class="cx">             fields.map do |field, type|
</span><span class="cx">                 if type.kind_of? Hash
</span><del>-                    "#{prefix}union {\n#{convertFields(prefix + '    ', type, fieldNames)}\n#{prefix}};"
</del><ins>+                    "#{prefix}union {\n#{convertFields(prefix + '    ', type)}\n#{prefix}};"
</ins><span class="cx">                 else
</span><del>-                    fieldName = field.to_s
-                    fieldNames.push(fieldName)
-                    "#{prefix}#{type.to_s} m_#{fieldName};"
</del><ins>+                    "#{prefix}#{type.to_s} m_#{field.to_s};"
</ins><span class="cx">                 end
</span><span class="cx">             end.join("\n")
</span><span class="cx">         end
</span><span class="cx"> 
</span><del>-        fieldNames = []
-        prefix = "        "
-        fields = convertFields(prefix, @fields, fieldNames)
-        fields = fields + "\n" + generateOffsetOfFunctions(prefix, fieldNames)
</del><ins>+        fields = convertFields("        ", @fields)
</ins><span class="cx"> 
</span><span class="cx">         inits = nil
</span><span class="cx">         if @initializers && (not @initializers.empty?)
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitAssemblyHelperscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/AssemblyHelpers.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/AssemblyHelpers.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/AssemblyHelpers.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -367,28 +367,6 @@
</span><span class="cx">         result);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void AssemblyHelpers::storeProperty(JSValueRegs value, GPRReg object, GPRReg offset, GPRReg scratch)
-{
-    Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
-    
-    loadPtr(Address(object, JSObject::butterflyOffset()), scratch);
-    neg32(offset);
-    signExtend32ToPtr(offset, offset);
-    Jump ready = jump();
-    
-    isInline.link(this);
-    addPtr(
-        TrustedImm32(
-            static_cast<int32_t>(sizeof(JSObject)) -
-            (static_cast<int32_t>(firstOutOfLineOffset) - 2) * static_cast<int32_t>(sizeof(EncodedJSValue))),
-        object, scratch);
-    
-    ready.link(this);
-    
-    storeValue(value,
-        BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
-}
-
</del><span class="cx"> void AssemblyHelpers::emitLoadStructure(VM& vm, RegisterID source, RegisterID dest, RegisterID scratch)
</span><span class="cx"> {
</span><span class="cx"> #if USE(JSVALUE64)
</span><span class="lines">@@ -720,13 +698,8 @@
</span><span class="cx"> 
</span><span class="cx"> void AssemblyHelpers::emitVirtualCall(VM& vm, JSGlobalObject* globalObject, CallLinkInfo* info)
</span><span class="cx"> {
</span><ins>+    move(TrustedImmPtr(info), GPRInfo::regT2);
</ins><span class="cx">     move(TrustedImmPtr(globalObject), GPRInfo::regT3);
</span><del>-    emitVirtualCallWithoutMovingGlobalObject(vm, info);
-}
-
-void AssemblyHelpers::emitVirtualCallWithoutMovingGlobalObject(VM& vm, CallLinkInfo* info)
-{
-    move(TrustedImmPtr(info), GPRInfo::regT2);
</del><span class="cx">     Call call = nearCall();
</span><span class="cx">     addLinkTask([=, &vm] (LinkBuffer& linkBuffer) {
</span><span class="cx">         auto callLocation = linkBuffer.locationOfNearCall<JITCompilationPtrTag>(call);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitAssemblyHelpersh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/AssemblyHelpers.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/AssemblyHelpers.h        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/AssemblyHelpers.h   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -234,9 +234,8 @@
</span><span class="cx"> #endif
</span><span class="cx">     }
</span><span class="cx">     
</span><del>-    // Note that these clobber offset.
</del><ins>+    // Note that this clobbers offset.
</ins><span class="cx">     void loadProperty(GPRReg object, GPRReg offset, JSValueRegs result);
</span><del>-    void storeProperty(JSValueRegs value, GPRReg object, GPRReg offset, GPRReg scratch);
</del><span class="cx"> 
</span><span class="cx">     void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs)
</span><span class="cx">     {
</span><span class="lines">@@ -380,26 +379,15 @@
</span><span class="cx"> #endif
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    void copyCalleeSavesToEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame, GPRReg scratch)
-    {
-#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
-        loadPtr(&topEntryFrame, scratch);
-        copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(scratch);
-#else
-        UNUSED_PARAM(topEntryFrame);
-        UNUSED_PARAM(scratch);
-#endif
-    }
-
</del><span class="cx">     void copyCalleeSavesToEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame)
</span><span class="cx">     {
</span><span class="cx"> #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
</span><span class="cx">         const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() };
</span><span class="cx">         GPRReg temp1 = usedRegisters.getFreeGPR(0);
</span><del>-        copyCalleeSavesToEntryFrameCalleeSavesBuffer(topEntryFrame, temp1);
</del><ins>+        loadPtr(&topEntryFrame, temp1);
+        copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(temp1);
</ins><span class="cx"> #else
</span><span class="cx">         UNUSED_PARAM(topEntryFrame);
</span><del>-        UNUSED_PARAM(topEntryFrame);
</del><span class="cx"> #endif
</span><span class="cx">     }
</span><span class="cx">     
</span><span class="lines">@@ -1732,7 +1720,6 @@
</span><span class="cx">     }
</span><span class="cx">     
</span><span class="cx">     void emitVirtualCall(VM&, JSGlobalObject*, CallLinkInfo*);
</span><del>-    void emitVirtualCallWithoutMovingGlobalObject(VM&, CallLinkInfo*);
</del><span class="cx">     
</span><span class="cx">     void makeSpaceOnStackForCCall();
</span><span class="cx">     void reclaimSpaceOnStackForCCall();
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitBaselineJITCodecpp"></a>
<div class="delfile"><h4>Deleted: trunk/Source/JavaScriptCore/jit/BaselineJITCode.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/BaselineJITCode.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/BaselineJITCode.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -1,61 +0,0 @@
</span><del>-/*
- * Copyright (C) 2021 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "BaselineJITCode.h"
-
-#if ENABLE(JIT)
-
-#include "JITMathIC.h"
-#include "JumpTable.h"
-#include "StructureStubInfo.h"
-
-namespace JSC {
-
-JITAddIC* MathICHolder::addJITAddIC(BinaryArithProfile* arithProfile) { return m_addICs.add(arithProfile); }
-JITMulIC* MathICHolder::addJITMulIC(BinaryArithProfile* arithProfile) { return m_mulICs.add(arithProfile); }
-JITSubIC* MathICHolder::addJITSubIC(BinaryArithProfile* arithProfile) { return m_subICs.add(arithProfile); }
-JITNegIC* MathICHolder::addJITNegIC(UnaryArithProfile* arithProfile) { return m_negICs.add(arithProfile); }
-
-void MathICHolder::adoptMathICs(MathICHolder& other)
-{
-    m_addICs = WTFMove(other.m_addICs);
-    m_mulICs = WTFMove(other.m_mulICs);
-    m_negICs = WTFMove(other.m_negICs);
-    m_subICs = WTFMove(other.m_subICs);
-}
-
-BaselineJITCode::BaselineJITCode(CodeRef<JSEntryPtrTag> code, CodePtr<JSEntryPtrTag> withArityCheck)
-    : DirectJITCode(WTFMove(code), withArityCheck, JITType::BaselineJIT)
-    , MathICHolder()
-{ }
-
-BaselineJITCode::~BaselineJITCode()
-{
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
</del></span></pre></div>
<a id="trunkSourceJavaScriptCorejitBaselineJITCodeh"></a>
<div class="delfile"><h4>Deleted: trunk/Source/JavaScriptCore/jit/BaselineJITCode.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/BaselineJITCode.h        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/BaselineJITCode.h   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -1,112 +0,0 @@
</span><del>-/*
- * Copyright (C) 2021 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-#include "CallLinkInfo.h"
-#include "JITCode.h"
-#include "JITCodeMap.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-class BinaryArithProfile;
-class UnaryArithProfile;
-struct UnlinkedStructureStubInfo;
-struct SimpleJumpTable;
-struct StringJumpTable;
-
-class MathICHolder {
-public:
-    void adoptMathICs(MathICHolder& other);
-    JITAddIC* addJITAddIC(BinaryArithProfile*);
-    JITMulIC* addJITMulIC(BinaryArithProfile*);
-    JITSubIC* addJITSubIC(BinaryArithProfile*);
-    JITNegIC* addJITNegIC(UnaryArithProfile*);
-
-private:
-    Bag<JITAddIC> m_addICs;
-    Bag<JITMulIC> m_mulICs;
-    Bag<JITNegIC> m_negICs;
-    Bag<JITSubIC> m_subICs;
-};
-
-class JITConstantPool {
-public:
-    using Constant = unsigned;
-
-    enum class Type : uint8_t {
-        GlobalObject,
-        CallLinkInfo,
-        StructureStubInfo,
-        FunctionDecl,
-        FunctionExpr,
-    };
-
-    struct Value {
-        Type type;
-        PackedPtr<void> payload;
-    };
-
-    JITConstantPool() = default;
-    JITConstantPool(JITConstantPool&&) = default;
-    JITConstantPool& operator=(JITConstantPool&&) = default;
-
-    Constant add(Type type, void* payload = nullptr)
-    {
-        unsigned result = m_constants.size();
-        m_constants.append(Value { type, payload });
-        return result;
-    }
-
-    size_t size() const { return m_constants.size(); }
-    Value at(size_t i) const { return m_constants[i]; }
-
-private:
-    Vector<Value> m_constants;
-};
-
-
-class BaselineJITCode : public DirectJITCode, public MathICHolder {
-public:
-    BaselineJITCode(CodeRef<JSEntryPtrTag>, CodePtr<JSEntryPtrTag> withArityCheck);
-    ~BaselineJITCode() override;
-    PCToCodeOriginMap* pcToCodeOriginMap() override { return m_pcToCodeOriginMap.get(); }
-
-    Bag<UnlinkedCallLinkInfo> m_unlinkedCalls;
-    Bag<CallLinkInfo> m_evalCallLinkInfos;
-    Bag<UnlinkedStructureStubInfo> m_unlinkedStubInfos;
-    FixedVector<SimpleJumpTable> m_switchJumpTables;
-    FixedVector<StringJumpTable> m_stringSwitchJumpTables;
-    JITCodeMap m_jitCodeMap;
-    JITConstantPool m_constantPool;
-    std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
-    bool m_isShareable { true };
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
</del></span></pre></div>
<a id="trunkSourceJavaScriptCorejitBaselineJITPlancpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/BaselineJITPlan.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/BaselineJITPlan.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/BaselineJITPlan.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -50,7 +50,7 @@
</span><span class="cx"> 
</span><span class="cx"> CompilationResult BaselineJITPlan::finalize()
</span><span class="cx"> {
</span><del>-    CompilationResult result = m_jit.finalizeOnMainThread(m_codeBlock);
</del><ins>+    CompilationResult result = m_jit.finalizeOnMainThread();
</ins><span class="cx">     switch (result) {
</span><span class="cx">     case CompilationFailed:
</span><span class="cx">         CODEBLOCK_LOG_EVENT(m_codeBlock, "delayJITCompile", ("compilation failed"));
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitCCallHelperscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/CCallHelpers.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/CCallHelpers.cpp 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/CCallHelpers.cpp    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -43,6 +43,16 @@
</span><span class="cx">     storePtr(scope, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, scope)));
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+void CCallHelpers::logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlock* codeBlock, CallSiteIndex callSiteIndex)
+{
+    storePtr(GPRInfo::callFrameRegister, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, frame)));
+    storePtr(TrustedImmPtr(ShadowChicken::Packet::tailMarker()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callee)));
+    storeValue(thisRegs, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, thisValue)));
+    storePtr(scope, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, scope)));
+    storePtr(TrustedImmPtr(codeBlock), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, codeBlock)));
+    store32(TrustedImm32(callSiteIndex.bits()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callSiteIndex)));
+}
+
</ins><span class="cx"> void CCallHelpers::ensureShadowChickenPacket(VM& vm, GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2)
</span><span class="cx"> {
</span><span class="cx">     ShadowChicken* shadowChicken = vm.shadowChicken();
</span><span class="lines">@@ -62,28 +72,6 @@
</span><span class="cx">     storePtr(scratch2, Address(scratch1NonArgGPR));
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-
-template <typename CodeBlockType>
-void CCallHelpers::logShadowChickenTailPacketImpl(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlockType codeBlock, CallSiteIndex callSiteIndex)
-{
-    storePtr(GPRInfo::callFrameRegister, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, frame)));
-    storePtr(TrustedImmPtr(ShadowChicken::Packet::tailMarker()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callee)));
-    storeValue(thisRegs, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, thisValue)));
-    storePtr(scope, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, scope)));
-    storePtr(codeBlock, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, codeBlock)));
-    store32(TrustedImm32(callSiteIndex.bits()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callSiteIndex)));
-}
-
-void CCallHelpers::logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, TrustedImmPtr codeBlock, CallSiteIndex callSiteIndex)
-{
-    logShadowChickenTailPacketImpl(shadowPacket, thisRegs, scope, codeBlock, callSiteIndex);
-}
-
-void CCallHelpers::logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, GPRReg codeBlock, CallSiteIndex callSiteIndex)
-{
-    logShadowChickenTailPacketImpl(shadowPacket, thisRegs, scope, codeBlock, callSiteIndex);
-}
-
</del><span class="cx"> void CCallHelpers::emitJITCodeOver(MacroAssemblerCodePtr<JSInternalPtrTag> where, ScopedLambda<void(CCallHelpers&)> emitCode, const char* description)
</span><span class="cx"> {
</span><span class="cx">     CCallHelpers jit;
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitCCallHelpersh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/CCallHelpers.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/CCallHelpers.h   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/CCallHelpers.h      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -840,14 +840,7 @@
</span><span class="cx">     // These operations clobber all volatile registers. They assume that there is room on the top of
</span><span class="cx">     // stack to marshall call arguments.
</span><span class="cx">     void logShadowChickenProloguePacket(GPRReg shadowPacket, GPRReg scratch1, GPRReg scope);
</span><del>-
-private:
-    template <typename CodeBlockType>
-    void logShadowChickenTailPacketImpl(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlockType codeBlock, CallSiteIndex callSiteIndex);
-public:
-    void logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, TrustedImmPtr codeBlock, CallSiteIndex callSiteIndex);
-    void logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, GPRReg codeBlock, CallSiteIndex callSiteIndex);
-
</del><ins>+    void logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlock*, CallSiteIndex);
</ins><span class="cx">     // Leaves behind a pointer to the Packet we should write to in shadowPacket.
</span><span class="cx">     void ensureShadowChickenPacket(VM&, GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2);
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitCallFrameShuffleDatacpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -37,12 +37,8 @@
</span><span class="cx"> 
</span><span class="cx"> void CallFrameShuffleData::setupCalleeSaveRegisters(CodeBlock* codeBlock)
</span><span class="cx"> {
</span><del>-    setupCalleeSaveRegisters(codeBlock->calleeSaveRegisters());
-}
-
-void CallFrameShuffleData::setupCalleeSaveRegisters(const RegisterAtOffsetList* registerSaveLocations)
-{
</del><span class="cx">     RegisterSet calleeSaveRegisters { RegisterSet::vmCalleeSaveRegisters() };
</span><ins>+    const RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
</ins><span class="cx"> 
</span><span class="cx">     for (size_t i = 0; i < registerSaveLocations->size(); ++i) {
</span><span class="cx">         RegisterAtOffset entry { registerSaveLocations->at(i) };
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitCallFrameShuffleDatah"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/CallFrameShuffleData.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/CallFrameShuffleData.h   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/CallFrameShuffleData.h      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -43,13 +43,11 @@
</span><span class="cx">     Vector<ValueRecovery> args;
</span><span class="cx">     unsigned numLocals { UINT_MAX };
</span><span class="cx">     unsigned numPassedArgs { UINT_MAX };
</span><del>-    unsigned numParameters { UINT_MAX }; // On our machine frame.
</del><span class="cx"> #if USE(JSVALUE64)
</span><span class="cx">     RegisterMap<ValueRecovery> registers;
</span><span class="cx">     GPRReg numberTagRegister { InvalidGPRReg };
</span><span class="cx"> 
</span><span class="cx">     void setupCalleeSaveRegisters(CodeBlock*);
</span><del>-    void setupCalleeSaveRegisters(const RegisterAtOffsetList*);
</del><span class="cx"> #endif
</span><span class="cx">     ValueRecovery callee;
</span><span class="cx"> };
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitCallFrameShufflercpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/CallFrameShuffler.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/CallFrameShuffler.cpp    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/CallFrameShuffler.cpp       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -38,13 +38,13 @@
</span><span class="cx">     : m_jit(jit)
</span><span class="cx">     , m_oldFrame(data.numLocals + CallerFrameAndPC::sizeInRegisters, nullptr)
</span><span class="cx">     , m_newFrame(data.args.size() + CallFrame::headerSizeInRegisters, nullptr)
</span><del>-    , m_alignedOldFrameSize(CallFrame::headerSizeInRegisters + roundArgumentCountToAlignFrame(data.numParameters))
</del><ins>+    , m_alignedOldFrameSize(CallFrame::headerSizeInRegisters
+        + roundArgumentCountToAlignFrame(jit.codeBlock()->numParameters()))
</ins><span class="cx">     , m_alignedNewFrameSize(CallFrame::headerSizeInRegisters
</span><span class="cx">         + roundArgumentCountToAlignFrame(data.args.size()))
</span><span class="cx">     , m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize)
</span><span class="cx">     , m_lockedRegisters(RegisterSet::allRegisters())
</span><span class="cx">     , m_numPassedArgs(data.numPassedArgs)
</span><del>-    , m_numParameters(data.numParameters)
</del><span class="cx"> {
</span><span class="cx">     // We are allowed all the usual registers...
</span><span class="cx">     for (unsigned i = GPRInfo::numberOfRegisters; i--; )
</span><span class="lines">@@ -421,7 +421,7 @@
</span><span class="cx">     m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, CallFrameSlot::argumentCountIncludingThis * static_cast<int>(sizeof(Register)) + PayloadOffset), m_newFrameBase);
</span><span class="cx">     MacroAssembler::Jump argumentCountOK =
</span><span class="cx">         m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase,
</span><del>-            MacroAssembler::TrustedImm32(m_numParameters));
</del><ins>+            MacroAssembler::TrustedImm32(m_jit.codeBlock()->numParameters()));
</ins><span class="cx">     m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + CallFrame::headerSizeInRegisters), m_newFrameBase);
</span><span class="cx">     m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase);
</span><span class="cx">     m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase);
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitCallFrameShufflerh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/CallFrameShuffler.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/CallFrameShuffler.h      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/CallFrameShuffler.h 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -104,7 +104,6 @@
</span><span class="cx">         CallFrameShuffleData data;
</span><span class="cx">         data.numLocals = numLocals();
</span><span class="cx">         data.numPassedArgs = m_numPassedArgs;
</span><del>-        data.numParameters = m_numParameters;
</del><span class="cx">         data.callee = getNew(VirtualRegister { CallFrameSlot::callee })->recovery();
</span><span class="cx">         data.args.resize(argCount());
</span><span class="cx">         for (size_t i = 0; i < argCount(); ++i)
</span><span class="lines">@@ -799,7 +798,6 @@
</span><span class="cx">     bool performSafeWrites();
</span><span class="cx">     
</span><span class="cx">     unsigned m_numPassedArgs { UINT_MAX };
</span><del>-    unsigned m_numParameters { UINT_MAX };
</del><span class="cx"> };
</span><span class="cx"> 
</span><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JIT.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JIT.cpp  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JIT.cpp     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -69,7 +69,7 @@
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> JIT::JIT(VM& vm, CodeBlock* codeBlock, BytecodeIndex loopOSREntryBytecodeIndex)
</span><del>-    : JSInterfaceJIT(&vm, nullptr)
</del><ins>+    : JSInterfaceJIT(&vm, codeBlock)
</ins><span class="cx">     , m_interpreter(vm.interpreter)
</span><span class="cx">     , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
</span><span class="cx">     , m_pcToCodeOriginMapBuilder(vm)
</span><span class="lines">@@ -77,9 +77,6 @@
</span><span class="cx">     , m_shouldEmitProfiling(false)
</span><span class="cx">     , m_loopOSREntryBytecodeIndex(loopOSREntryBytecodeIndex)
</span><span class="cx"> {
</span><del>-    m_globalObjectConstant = m_constantPool.add(JITConstantPool::Type::GlobalObject);
-    m_profiledCodeBlock = codeBlock;
-    m_unlinkedCodeBlock = codeBlock->unlinkedCodeBlock();
</del><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> JIT::~JIT()
</span><span class="lines">@@ -93,8 +90,8 @@
</span><span class="cx">         return;
</span><span class="cx"> 
</span><span class="cx">     JumpList skipOptimize;
</span><del>-    loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
-    skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), Address(regT0, CodeBlock::offsetOfJITExecuteCounter())));
</del><ins>+    
+    skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
</ins><span class="cx">     ASSERT(!m_bytecodeIndex.offset());
</span><span class="cx"> 
</span><span class="cx">     copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
</span><span class="lines">@@ -116,20 +113,15 @@
</span><span class="cx">     addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JIT::emitNotifyWriteWatchpoint(GPRReg pointerToSet)
</del><ins>+void JIT::emitNotifyWrite(GPRReg pointerToSet)
</ins><span class="cx"> {
</span><del>-    auto ok = branchTestPtr(Zero, pointerToSet);
</del><span class="cx">     addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
</span><del>-    ok.link(this);
</del><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JIT::emitVarReadOnlyCheck(ResolveType resolveType, GPRReg scratchGPR)
</del><ins>+void JIT::emitVarReadOnlyCheck(ResolveType resolveType)
</ins><span class="cx"> {
</span><del>-    if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) {
-        loadGlobalObject(scratchGPR);
-        loadPtr(Address(scratchGPR, OBJECT_OFFSETOF(JSGlobalObject, m_varReadOnlyWatchpoint)), scratchGPR);
-        addSlowCase(branch8(Equal, Address(scratchGPR, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
-    }
</del><ins>+    if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
+        addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varReadOnlyWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::assertStackPointerOffset()
</span><span class="lines">@@ -137,18 +129,12 @@
</span><span class="cx">     if (!ASSERT_ENABLED)
</span><span class="cx">         return;
</span><span class="cx">     
</span><del>-    addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, regT0);
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
</ins><span class="cx">     Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
</span><span class="cx">     breakpoint();
</span><span class="cx">     ok.link(this);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JIT::resetSP()
-{
-    addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
-    checkStackPointerAlignment();
-}
-
</del><span class="cx"> #define NEXT_OPCODE(name) \
</span><span class="cx">     m_bytecodeIndex = BytecodeIndex(m_bytecodeIndex.offset() + currentInstruction->size()); \
</span><span class="cx">     break;
</span><span class="lines">@@ -195,45 +181,62 @@
</span><span class="cx">     slowPathCall.call();
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JIT::emitPutCodeBlockToFrameInPrologue(GPRReg result)
-{
-    RELEASE_ASSERT(m_unlinkedCodeBlock->codeType() == FunctionCode);
-    emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, result);
-    loadPtr(Address(result, JSFunction::offsetOfExecutableOrRareData()), result);
-    auto hasExecutable = branchTestPtr(Zero, result, CCallHelpers::TrustedImm32(JSFunction::rareDataTag));
-    loadPtr(Address(result, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), result);
-    hasExecutable.link(this);
-    if (m_unlinkedCodeBlock->isConstructor())
-        loadPtr(Address(result, FunctionExecutable::offsetOfCodeBlockForConstruct()), result);
-    else
-        loadPtr(Address(result, FunctionExecutable::offsetOfCodeBlockForCall()), result);
-
-    loadPtr(Address(result, ExecutableToCodeBlockEdge::offsetOfCodeBlock()), result);
-    emitPutToCallFrameHeader(result, CallFrameSlot::codeBlock);
-
-#if ASSERT_ENABLED
-    probeDebug([=] (Probe::Context& ctx) {
-        CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock();
-        RELEASE_ASSERT(codeBlock->jitType() == JITType::BaselineJIT);
-    });
-#endif
-}
-
</del><span class="cx"> void JIT::privateCompileMainPass()
</span><span class="cx"> {
</span><span class="cx">     if (JITInternal::verbose)
</span><del>-        dataLog("Compiling ", *m_profiledCodeBlock, "\n");
</del><ins>+        dataLog("Compiling ", *m_codeBlock, "\n");
</ins><span class="cx">     
</span><span class="cx">     jitAssertTagsInPlace();
</span><span class="cx">     jitAssertArgumentCountSane();
</span><span class="cx">     
</span><del>-    auto& instructions = m_unlinkedCodeBlock->instructions();
-    unsigned instructionCount = m_unlinkedCodeBlock->instructions().size();
</del><ins>+    auto& instructions = m_codeBlock->instructions();
+    unsigned instructionCount = m_codeBlock->instructions().size();
</ins><span class="cx"> 
</span><span class="cx">     m_callLinkInfoIndex = 0;
</span><span class="cx"> 
</span><ins>+    VM& vm = m_codeBlock->vm();
</ins><span class="cx">     BytecodeIndex startBytecodeIndex(0);
</span><ins>+    if (m_loopOSREntryBytecodeIndex && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) {
+        // We can only do this optimization because we execute ProgramCodeBlock's exactly once.
+        // This optimization would be invalid otherwise. When the LLInt determines it wants to
+        // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it
+        // was executing at when it kicked off our compilation. We only need to compile code for
+        // anything reachable from that bytecode offset.
</ins><span class="cx"> 
</span><ins>+        // We only bother building the bytecode graph if it could save time and executable
+        // memory. We pick an arbitrary offset where we deem this is profitable.
+        if (m_loopOSREntryBytecodeIndex.offset() >= 200) {
+            // As a simplification, we don't find all bytecode ranges that are unreachable.
+            // Instead, we just find the minimum bytecode offset that is reachable, and
+            // compile code from that bytecode offset onwards.
+
+            BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions());
+            BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeIndex.offset());
+            RELEASE_ASSERT(block);
+
+            GraphNodeWorklist<BytecodeBasicBlock*> worklist;
+            startBytecodeIndex = BytecodeIndex();
+            worklist.push(block);
+
+            while (BytecodeBasicBlock* block = worklist.pop()) {
+                startBytecodeIndex = BytecodeIndex(std::min(startBytecodeIndex.offset(), block->leaderOffset()));
+                for (unsigned successorIndex : block->successors())
+                    worklist.push(&graph[successorIndex]);
+
+                // Also add catch blocks for bytecodes that throw.
+                if (m_codeBlock->numberOfExceptionHandlers()) {
+                    for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
+                        auto instruction = instructions.at(bytecodeOffset);
+                        if (auto* handler = m_codeBlock->handlerForBytecodeIndex(BytecodeIndex(bytecodeOffset)))
+                            worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target));
+
+                        bytecodeOffset += instruction->size();
+                    }
+                }
+            }
+        }
+    }
+
</ins><span class="cx">     m_bytecodeCountHavingSlowCase = 0;
</span><span class="cx">     for (m_bytecodeIndex = BytecodeIndex(0); m_bytecodeIndex.offset() < instructionCount; ) {
</span><span class="cx">         unsigned previousSlowCasesSize = m_slowCases.size();
</span><span class="lines">@@ -275,15 +278,12 @@
</span><span class="cx"> 
</span><span class="cx">         unsigned bytecodeOffset = m_bytecodeIndex.offset();
</span><span class="cx">         if (UNLIKELY(Options::traceBaselineJITExecution())) {
</span><ins>+            CodeBlock* codeBlock = m_codeBlock;
</ins><span class="cx">             probeDebug([=] (Probe::Context& ctx) {
</span><del>-                CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock();
</del><span class="cx">                 dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock);
</span><span class="cx">             });
</span><span class="cx">         }
</span><span class="cx"> 
</span><del>-        if (opcodeID != op_catch)
-            assertStackPointerOffset();
-
</del><span class="cx">         switch (opcodeID) {
</span><span class="cx">         DEFINE_SLOW_OP(less)
</span><span class="cx">         DEFINE_SLOW_OP(lesseq)
</span><span class="lines">@@ -527,7 +527,7 @@
</span><span class="cx"> 
</span><span class="cx">         BytecodeIndex firstTo = m_bytecodeIndex;
</span><span class="cx"> 
</span><del>-        const Instruction* currentInstruction = m_unlinkedCodeBlock->instructions().at(m_bytecodeIndex).ptr();
</del><ins>+        const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeIndex).ptr();
</ins><span class="cx">         
</span><span class="cx">         if (JITInternal::verbose)
</span><span class="cx">             dataLogLn("Baseline JIT emitting slow code for ", m_bytecodeIndex, " at offset ", (long)debugOffset());
</span><span class="lines">@@ -545,8 +545,8 @@
</span><span class="cx"> 
</span><span class="cx">         if (UNLIKELY(Options::traceBaselineJITExecution())) {
</span><span class="cx">             unsigned bytecodeOffset = m_bytecodeIndex.offset();
</span><ins>+            CodeBlock* codeBlock = m_codeBlock;
</ins><span class="cx">             probeDebug([=] (Probe::Context& ctx) {
</span><del>-                CodeBlock* codeBlock = ctx.fp<CallFrame*>()->codeBlock();
</del><span class="cx">                 dataLogLn("JIT [", bytecodeOffset, "] SLOW ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock);
</span><span class="cx">             });
</span><span class="cx">         }
</span><span class="lines">@@ -674,30 +674,19 @@
</span><span class="cx"> #endif
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JIT::emitMaterializeMetadataAndConstantPoolRegisters()
-{
-    loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
-    loadPtr(Address(regT0, CodeBlock::offsetOfMetadataTable()), s_metadataGPR);
-    loadPtr(Address(regT0, CodeBlock::offsetOfJITData()), regT0);
-    loadPtr(Address(regT0, CodeBlock::JITData::offsetOfJITConstantPool()), s_constantsGPR);
-}
-
-void JIT::emitRestoreCalleeSaves()
-{
-    Base::emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
-}
-
</del><span class="cx"> void JIT::compileAndLinkWithoutFinalizing(JITCompilationEffort effort)
</span><span class="cx"> {
</span><del>-    DFG::CapabilityLevel level = m_profiledCodeBlock->capabilityLevel();
</del><ins>+    DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
</ins><span class="cx">     switch (level) {
</span><span class="cx">     case DFG::CannotCompile:
</span><span class="cx">         m_canBeOptimized = false;
</span><ins>+        m_canBeOptimizedOrInlined = false;
</ins><span class="cx">         m_shouldEmitProfiling = false;
</span><span class="cx">         break;
</span><span class="cx">     case DFG::CanCompile:
</span><span class="cx">     case DFG::CanCompileAndInline:
</span><span class="cx">         m_canBeOptimized = true;
</span><ins>+        m_canBeOptimizedOrInlined = true;
</ins><span class="cx">         m_shouldEmitProfiling = true;
</span><span class="cx">         break;
</span><span class="cx">     default:
</span><span class="lines">@@ -704,25 +693,36 @@
</span><span class="cx">         RELEASE_ASSERT_NOT_REACHED();
</span><span class="cx">         break;
</span><span class="cx">     }
</span><ins>+    
+    switch (m_codeBlock->codeType()) {
+    case GlobalCode:
+    case ModuleCode:
+    case EvalCode:
+        m_codeBlock->m_shouldAlwaysBeInlined = false;
+        break;
+    case FunctionCode:
+        // We could have already set it to false because we detected an uninlineable call.
+        // Don't override that observation.
+        m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
+        break;
+    }
</ins><span class="cx"> 
</span><del>-    if (m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables() || m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables()) {
-        if (m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables())
-            m_switchJumpTables = FixedVector<SimpleJumpTable>(m_unlinkedCodeBlock->numberOfUnlinkedSwitchJumpTables());
-        if (m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables())
-            m_stringSwitchJumpTables = FixedVector<StringJumpTable>(m_unlinkedCodeBlock->numberOfUnlinkedStringSwitchJumpTables());
</del><ins>+    if (m_codeBlock->numberOfUnlinkedSwitchJumpTables() || m_codeBlock->numberOfUnlinkedStringSwitchJumpTables()) {
+        ConcurrentJSLocker locker(m_codeBlock->m_lock);
+        if (m_codeBlock->numberOfUnlinkedSwitchJumpTables())
+            m_codeBlock->ensureJITData(locker).m_switchJumpTables = FixedVector<SimpleJumpTable>(m_codeBlock->numberOfUnlinkedSwitchJumpTables());
+        if (m_codeBlock->numberOfUnlinkedStringSwitchJumpTables())
+            m_codeBlock->ensureJITData(locker).m_stringSwitchJumpTables = FixedVector<StringJumpTable>(m_codeBlock->numberOfUnlinkedStringSwitchJumpTables());
</ins><span class="cx">     }
</span><span class="cx"> 
</span><del>-    if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) {
-        // FIXME: build a disassembler off of UnlinkedCodeBlock.
-        m_disassembler = makeUnique<JITDisassembler>(m_profiledCodeBlock);
-    }
</del><ins>+    if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler())))
+        m_disassembler = makeUnique<JITDisassembler>(m_codeBlock);
</ins><span class="cx">     if (UNLIKELY(m_vm->m_perBytecodeProfiler)) {
</span><del>-        // FIXME: build profiler disassembler off UnlinkedCodeBlock.
</del><span class="cx">         m_compilation = adoptRef(
</span><span class="cx">             new Profiler::Compilation(
</span><del>-                m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_profiledCodeBlock),
</del><ins>+                m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
</ins><span class="cx">                 Profiler::Baseline));
</span><del>-        m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_profiledCodeBlock);
</del><ins>+        m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
</ins><span class="cx">     }
</span><span class="cx">     
</span><span class="cx">     m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(BytecodeIndex(0)));
</span><span class="lines">@@ -742,12 +742,11 @@
</span><span class="cx">         nop();
</span><span class="cx"> 
</span><span class="cx">     emitFunctionPrologue();
</span><del>-    if (m_unlinkedCodeBlock->codeType() == FunctionCode)
-        emitPutCodeBlockToFrameInPrologue();
</del><ins>+    emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
</ins><span class="cx"> 
</span><span class="cx">     Label beginLabel(this);
</span><span class="cx"> 
</span><del>-    int frameTopOffset = stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register);
</del><ins>+    int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register);
</ins><span class="cx">     unsigned maxFrameSize = -frameTopOffset;
</span><span class="cx">     addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1);
</span><span class="cx">     JumpList stackOverflow;
</span><span class="lines">@@ -758,20 +757,16 @@
</span><span class="cx">     move(regT1, stackPointerRegister);
</span><span class="cx">     checkStackPointerAlignment();
</span><span class="cx"> 
</span><del>-    emitSaveCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
</del><ins>+    emitSaveCalleeSaves();
</ins><span class="cx">     emitMaterializeTagCheckRegisters();
</span><del>-    emitMaterializeMetadataAndConstantPoolRegisters();
</del><span class="cx"> 
</span><del>-    if (m_unlinkedCodeBlock->codeType() == FunctionCode) {
</del><ins>+    if (m_codeBlock->codeType() == FunctionCode) {
</ins><span class="cx">         ASSERT(!m_bytecodeIndex);
</span><del>-        if (shouldEmitProfiling() && (!m_unlinkedCodeBlock->isConstructor() || m_unlinkedCodeBlock->numParameters() > 1)) {
-            emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, regT2);
-            loadPtr(Address(regT2, CodeBlock::offsetOfArgumentValueProfiles() + FixedVector<ValueProfile>::offsetOfStorage()), regT2);
-
-            for (unsigned argument = 0; argument < m_unlinkedCodeBlock->numParameters(); ++argument) {
</del><ins>+        if (shouldEmitProfiling()) {
+            for (unsigned argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
</ins><span class="cx">                 // If this is a constructor, then we want to put in a dummy profiling site (to
</span><span class="cx">                 // keep things consistent) but we don't actually want to record the dummy value.
</span><del>-                if (m_unlinkedCodeBlock->isConstructor() && !argument)
</del><ins>+                if (m_codeBlock->isConstructor() && !argument)
</ins><span class="cx">                     continue;
</span><span class="cx">                 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
</span><span class="cx"> #if USE(JSVALUE64)
</span><span class="lines">@@ -782,12 +777,12 @@
</span><span class="cx">                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultRegs.payloadGPR());
</span><span class="cx">                 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultRegs.tagGPR());
</span><span class="cx"> #endif
</span><del>-                storeValue(resultRegs, Address(regT2, argument * sizeof(ValueProfile) + ValueProfile::offsetOfFirstBucket()));
</del><ins>+                emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), resultRegs);
</ins><span class="cx">             }
</span><span class="cx">         }
</span><span class="cx">     }
</span><span class="cx">     
</span><del>-    RELEASE_ASSERT(!JITCode::isJIT(m_profiledCodeBlock->jitType()));
</del><ins>+    RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType()));
</ins><span class="cx"> 
</span><span class="cx">     if (UNLIKELY(sizeMarker))
</span><span class="cx">         m_vm->jitSizeStatistics->markEnd(WTFMove(*sizeMarker), *this);
</span><span class="lines">@@ -804,27 +799,24 @@
</span><span class="cx">     m_bytecodeIndex = BytecodeIndex(0);
</span><span class="cx">     if (maxFrameExtentForSlowPathCall)
</span><span class="cx">         addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
</span><del>-    emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, regT0);
-    callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, regT0);
</del><ins>+    callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
</ins><span class="cx"> 
</span><span class="cx">     // If the number of parameters is 1, we never require arity fixup.
</span><del>-    bool requiresArityFixup = m_unlinkedCodeBlock->numParameters() != 1;
-    if (m_unlinkedCodeBlock->codeType() == FunctionCode && requiresArityFixup) {
</del><ins>+    bool requiresArityFixup = m_codeBlock->m_numParameters != 1;
+    if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) {
</ins><span class="cx">         m_arityCheck = label();
</span><del>-
</del><ins>+        store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
</ins><span class="cx">         emitFunctionPrologue();
</span><del>-        emitPutCodeBlockToFrameInPrologue(regT0);
-        store8(TrustedImm32(0), Address(regT0, CodeBlock::offsetOfShouldAlwaysBeInlined()));
</del><ins>+        emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
</ins><span class="cx"> 
</span><span class="cx">         load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT1);
</span><del>-        branch32(AboveOrEqual, regT1, TrustedImm32(m_unlinkedCodeBlock->numParameters())).linkTo(beginLabel, this);
</del><ins>+        branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
</ins><span class="cx"> 
</span><span class="cx">         m_bytecodeIndex = BytecodeIndex(0);
</span><span class="cx"> 
</span><span class="cx">         if (maxFrameExtentForSlowPathCall)
</span><span class="cx">             addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
</span><del>-        loadPtr(Address(regT0, CodeBlock::offsetOfGlobalObject()), argumentGPR0);
-        callOperationWithCallFrameRollbackOnException(m_unlinkedCodeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, argumentGPR0);
</del><ins>+        callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, m_codeBlock->globalObject());
</ins><span class="cx">         if (maxFrameExtentForSlowPathCall)
</span><span class="cx">             addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
</span><span class="cx">         branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
</span><span class="lines">@@ -847,7 +839,7 @@
</span><span class="cx">         m_disassembler->setEndOfCode(label());
</span><span class="cx">     m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
</span><span class="cx"> 
</span><del>-    m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_unlinkedCodeBlock, LinkBuffer::Profile::BaselineJIT, effort));
</del><ins>+    m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, LinkBuffer::Profile::BaselineJIT, effort));
</ins><span class="cx">     link();
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -866,8 +858,8 @@
</span><span class="cx">         switch (record.type) {
</span><span class="cx">         case SwitchRecord::Immediate:
</span><span class="cx">         case SwitchRecord::Character: {
</span><del>-            const UnlinkedSimpleJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedSwitchJumpTable(tableIndex);
-            SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex];
</del><ins>+            const UnlinkedSimpleJumpTable& unlinkedTable = m_codeBlock->unlinkedSwitchJumpTable(tableIndex);
+            SimpleJumpTable& linkedTable = m_codeBlock->switchJumpTable(tableIndex);
</ins><span class="cx">             linkedTable.m_ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]);
</span><span class="cx">             for (unsigned j = 0; j < unlinkedTable.m_branchOffsets.size(); ++j) {
</span><span class="cx">                 unsigned offset = unlinkedTable.m_branchOffsets[j];
</span><span class="lines">@@ -879,8 +871,8 @@
</span><span class="cx">         }
</span><span class="cx"> 
</span><span class="cx">         case SwitchRecord::String: {
</span><del>-            const UnlinkedStringJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedStringSwitchJumpTable(tableIndex);
-            StringJumpTable& linkedTable = m_stringSwitchJumpTables[tableIndex];
</del><ins>+            const UnlinkedStringJumpTable& unlinkedTable = m_codeBlock->unlinkedStringSwitchJumpTable(tableIndex);
+            StringJumpTable& linkedTable = m_codeBlock->stringSwitchJumpTable(tableIndex);
</ins><span class="cx">             auto ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]);
</span><span class="cx">             for (auto& location : unlinkedTable.m_offsetTable.values()) {
</span><span class="cx">                 unsigned offset = location.m_branchOffset;
</span><span class="lines">@@ -914,27 +906,6 @@
</span><span class="cx">             patchBuffer.link(record.from, record.callee);
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-#if USE(JSVALUE64)
-    auto finalizeICs = [&] (auto& generators) {
-        for (auto& gen : generators) {
-            gen.m_unlinkedStubInfo->start = patchBuffer.locationOf<JITStubRoutinePtrTag>(gen.m_start);
-            gen.m_unlinkedStubInfo->doneLocation = patchBuffer.locationOf<JSInternalPtrTag>(gen.m_done);
-            gen.m_unlinkedStubInfo->slowPathStartLocation = patchBuffer.locationOf<JITStubRoutinePtrTag>(gen.m_slowPathBegin);
-        }
-    };
-
-    finalizeICs(m_getByIds);
-    finalizeICs(m_getByVals);
-    finalizeICs(m_getByIdsWithThis);
-    finalizeICs(m_putByIds);
-    finalizeICs(m_putByVals);
-    finalizeICs(m_delByIds);
-    finalizeICs(m_delByVals);
-    finalizeICs(m_inByIds);
-    finalizeICs(m_inByVals);
-    finalizeICs(m_instanceOfs);
-    finalizeICs(m_privateBrandAccesses);
-#else
</del><span class="cx">     finalizeInlineCaches(m_getByIds, patchBuffer);
</span><span class="cx">     finalizeInlineCaches(m_getByVals, patchBuffer);
</span><span class="cx">     finalizeInlineCaches(m_getByIdsWithThis, patchBuffer);
</span><span class="lines">@@ -946,25 +917,21 @@
</span><span class="cx">     finalizeInlineCaches(m_inByVals, patchBuffer);
</span><span class="cx">     finalizeInlineCaches(m_instanceOfs, patchBuffer);
</span><span class="cx">     finalizeInlineCaches(m_privateBrandAccesses, patchBuffer);
</span><del>-#endif
</del><span class="cx"> 
</span><span class="cx">     for (auto& compilationInfo : m_callCompilationInfo) {
</span><del>-#if USE(JSVALUE64)
-        UnlinkedCallLinkInfo& info = *compilationInfo.unlinkedCallLinkInfo;
-        info.doneLocation = patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.doneLocation);
-#else
</del><span class="cx">         CallLinkInfo& info = *compilationInfo.callLinkInfo;
</span><span class="cx">         info.setCodeLocations(
</span><span class="cx">             patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.slowPathStart),
</span><span class="cx">             patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.doneLocation));
</span><del>-#endif
-
</del><span class="cx">     }
</span><span class="cx"> 
</span><del>-    JITCodeMapBuilder jitCodeMapBuilder;
-    for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
-        if (m_labels[bytecodeOffset].isSet())
-            jitCodeMapBuilder.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset]));
</del><ins>+    {
+        JITCodeMapBuilder jitCodeMapBuilder;
+        for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
+            if (m_labels[bytecodeOffset].isSet())
+                jitCodeMapBuilder.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset]));
+        }
+        m_codeBlock->setJITCodeMap(jitCodeMapBuilder.finalize());
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     if (UNLIKELY(Options::dumpDisassembly())) {
</span><span class="lines">@@ -973,42 +940,26 @@
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     if (UNLIKELY(m_compilation)) {
</span><del>-        // FIXME: should we make the bytecode profiler know about UnlinkedCodeBlock?
</del><span class="cx">         if (Options::disassembleBaselineForProfiler())
</span><span class="cx">             m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
</span><del>-        m_vm->m_perBytecodeProfiler->addCompilation(m_profiledCodeBlock, *m_compilation);
</del><ins>+        m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, *m_compilation);
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     if (m_pcToCodeOriginMapBuilder.didBuildMapping())
</span><span class="cx">         m_pcToCodeOriginMap = makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer);
</span><span class="cx">     
</span><del>-    // FIXME: Make a version of CodeBlockWithJITType that knows about UnlinkedCodeBlock.
</del><span class="cx">     CodeRef<JSEntryPtrTag> result = FINALIZE_CODE(
</span><span class="cx">         patchBuffer, JSEntryPtrTag,
</span><del>-        "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_profiledCodeBlock, JITType::BaselineJIT)).data());
</del><ins>+        "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITType::BaselineJIT)).data());
</ins><span class="cx">     
</span><span class="cx">     MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck);
</span><del>-    m_jitCode = adoptRef(*new BaselineJITCode(result, withArityCheck));
</del><ins>+    m_jitCode = adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT));
</ins><span class="cx"> 
</span><del>-    m_jitCode->m_unlinkedCalls = WTFMove(m_unlinkedCalls);
-    m_jitCode->m_evalCallLinkInfos = WTFMove(m_evalCallLinkInfos);
-    m_jitCode->m_unlinkedStubInfos = WTFMove(m_unlinkedStubInfos);
-    m_jitCode->m_switchJumpTables = WTFMove(m_switchJumpTables);
-    m_jitCode->m_stringSwitchJumpTables = WTFMove(m_stringSwitchJumpTables);
-    m_jitCode->m_jitCodeMap = jitCodeMapBuilder.finalize();
-    m_jitCode->adoptMathICs(m_mathICs);
-    m_jitCode->m_constantPool = WTFMove(m_constantPool);
-#if USE(JSVALUE64)
-    m_jitCode->m_isShareable = m_isShareable;
-#else
-    m_jitCode->m_isShareable = false;
-#endif
-
</del><span class="cx">     if (JITInternal::verbose)
</span><del>-        dataLogF("JIT generated code for %p at [%p, %p).\n", m_unlinkedCodeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr());
</del><ins>+        dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr());
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-CompilationResult JIT::finalizeOnMainThread(CodeBlock* codeBlock)
</del><ins>+CompilationResult JIT::finalizeOnMainThread()
</ins><span class="cx"> {
</span><span class="cx">     RELEASE_ASSERT(!isCompilationThread());
</span><span class="cx"> 
</span><span class="lines">@@ -1017,14 +968,25 @@
</span><span class="cx"> 
</span><span class="cx">     m_linkBuffer->runMainThreadFinalizationTasks();
</span><span class="cx"> 
</span><ins>+    {
+        ConcurrentJSLocker locker(m_codeBlock->m_lock);
+        m_codeBlock->shrinkToFit(locker, CodeBlock::ShrinkMode::LateShrink);
+    }
+
+    for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
+        HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
+        // FIXME: <rdar://problem/39433318>.
+        handler.nativeCode = m_codeBlock->jitCodeMap().find(BytecodeIndex(handler.target)).retagged<ExceptionHandlerPtrTag>();
+    }
+
</ins><span class="cx">     if (m_pcToCodeOriginMap)
</span><del>-        m_jitCode->m_pcToCodeOriginMap = WTFMove(m_pcToCodeOriginMap);
</del><ins>+        m_codeBlock->setPCToCodeOriginMap(WTFMove(m_pcToCodeOriginMap));
</ins><span class="cx"> 
</span><span class="cx">     m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add(
</span><span class="cx">         static_cast<double>(m_jitCode->size()) /
</span><del>-        static_cast<double>(m_unlinkedCodeBlock->instructionsSize()));
</del><ins>+        static_cast<double>(m_codeBlock->instructionsSize()));
</ins><span class="cx"> 
</span><del>-    codeBlock->setupWithUnlinkedBaselineCode(m_jitCode.releaseNonNull());
</del><ins>+    m_codeBlock->setJITCode(m_jitCode.releaseNonNull());
</ins><span class="cx"> 
</span><span class="cx">     return CompilationSuccessful;
</span><span class="cx"> }
</span><span class="lines">@@ -1036,11 +998,11 @@
</span><span class="cx">     return m_linkBuffer->size();
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-CompilationResult JIT::privateCompile(CodeBlock* codeBlock, JITCompilationEffort effort)
</del><ins>+CompilationResult JIT::privateCompile(JITCompilationEffort effort)
</ins><span class="cx"> {
</span><span class="cx">     doMainThreadPreparationBeforeCompile();
</span><span class="cx">     compileAndLinkWithoutFinalizing(effort);
</span><del>-    return finalizeOnMainThread(codeBlock);
</del><ins>+    return finalizeOnMainThread();
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::privateCompileExceptionHandlers()
</span><span class="lines">@@ -1080,7 +1042,7 @@
</span><span class="cx">         m_vm->typeProfilerLog()->processLogEntries(*m_vm, "Preparing for JIT compilation."_s);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-unsigned JIT::frameRegisterCountFor(UnlinkedCodeBlock* codeBlock)
</del><ins>+unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
</ins><span class="cx"> {
</span><span class="cx">     ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals())));
</span><span class="cx"> 
</span><span class="lines">@@ -1087,21 +1049,11 @@
</span><span class="cx">     return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
</del><ins>+int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
</ins><span class="cx"> {
</span><del>-    return frameRegisterCountFor(codeBlock->unlinkedCodeBlock());
-}
-
-int JIT::stackPointerOffsetFor(UnlinkedCodeBlock* codeBlock)
-{
</del><span class="cx">     return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
-{
-    return stackPointerOffsetFor(codeBlock->unlinkedCodeBlock());
-}
-
</del><span class="cx"> HashMap<CString, Seconds> JIT::compileTimeStats()
</span><span class="cx"> {
</span><span class="cx">     HashMap<CString, Seconds> result;
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JIT.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JIT.h    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JIT.h       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -37,7 +37,6 @@
</span><span class="cx"> 
</span><span class="cx"> #define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(expected), static_cast<int>(actual));
</span><span class="cx"> 
</span><del>-#include "BaselineJITCode.h"
</del><span class="cx"> #include "CodeBlock.h"
</span><span class="cx"> #include "CommonSlowPaths.h"
</span><span class="cx"> #include "JITDisassembler.h"
</span><span class="lines">@@ -45,7 +44,6 @@
</span><span class="cx"> #include "JITMathIC.h"
</span><span class="cx"> #include "JITRightShiftGenerator.h"
</span><span class="cx"> #include "JSInterfaceJIT.h"
</span><del>-#include "LLIntData.h"
</del><span class="cx"> #include "PCToCodeOriginMap.h"
</span><span class="cx"> #include "UnusedPointer.h"
</span><span class="cx"> #include <wtf/UniqueRef.h>
</span><span class="lines">@@ -151,19 +149,14 @@
</span><span class="cx">     };
</span><span class="cx"> 
</span><span class="cx">     struct CallCompilationInfo {
</span><ins>+        MacroAssembler::Label slowPathStart;
</ins><span class="cx">         MacroAssembler::Label doneLocation;
</span><del>-#if USE(JSVALUE64)
-        UnlinkedCallLinkInfo* unlinkedCallLinkInfo;
-        JITConstantPool::Constant callLinkInfoConstant;
-#else
-        MacroAssembler::Label slowPathStart;
</del><span class="cx">         CallLinkInfo* callLinkInfo;
</span><del>-#endif
</del><span class="cx">     };
</span><span class="cx"> 
</span><span class="cx">     void ctiPatchCallByReturnAddress(ReturnAddressPtr, FunctionPtr<CFunctionPtrTag> newCalleeFunction);
</span><span class="cx"> 
</span><del>-    class JIT_CLASS_ALIGNMENT JIT : public JSInterfaceJIT {
</del><ins>+    class JIT_CLASS_ALIGNMENT JIT : private JSInterfaceJIT {
</ins><span class="cx">         friend class JITSlowPathCall;
</span><span class="cx">         friend class JITStubCall;
</span><span class="cx">         friend class JITThunks;
</span><span class="lines">@@ -178,8 +171,6 @@
</span><span class="cx">         // will compress the displacement, and we may not be able to fit a patched offset.
</span><span class="cx">         static constexpr int patchPutByIdDefaultOffset = 256;
</span><span class="cx"> 
</span><del>-        using Base = JSInterfaceJIT;
-
</del><span class="cx">     public:
</span><span class="cx">         JIT(VM&, CodeBlock* = nullptr, BytecodeIndex loopOSREntryBytecodeOffset = BytecodeIndex(0));
</span><span class="cx">         ~JIT();
</span><span class="lines">@@ -187,7 +178,7 @@
</span><span class="cx">         VM& vm() { return *JSInterfaceJIT::vm(); }
</span><span class="cx"> 
</span><span class="cx">         void compileAndLinkWithoutFinalizing(JITCompilationEffort);
</span><del>-        CompilationResult finalizeOnMainThread(CodeBlock*);
</del><ins>+        CompilationResult finalizeOnMainThread();
</ins><span class="cx">         size_t codeSize() const;
</span><span class="cx"> 
</span><span class="cx">         void doMainThreadPreparationBeforeCompile();
</span><span class="lines">@@ -194,26 +185,21 @@
</span><span class="cx">         
</span><span class="cx">         static CompilationResult compile(VM& vm, CodeBlock* codeBlock, JITCompilationEffort effort, BytecodeIndex bytecodeOffset = BytecodeIndex(0))
</span><span class="cx">         {
</span><del>-            return JIT(vm, codeBlock, bytecodeOffset).privateCompile(codeBlock, effort);
</del><ins>+            return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort);
</ins><span class="cx">         }
</span><span class="cx"> 
</span><del>-        static unsigned frameRegisterCountFor(UnlinkedCodeBlock*);
</del><span class="cx">         static unsigned frameRegisterCountFor(CodeBlock*);
</span><del>-        static int stackPointerOffsetFor(UnlinkedCodeBlock*);
</del><span class="cx">         static int stackPointerOffsetFor(CodeBlock*);
</span><span class="cx"> 
</span><span class="cx">         JS_EXPORT_PRIVATE static HashMap<CString, Seconds> compileTimeStats();
</span><span class="cx">         JS_EXPORT_PRIVATE static Seconds totalCompileTime();
</span><span class="cx"> 
</span><del>-        static constexpr GPRReg s_metadataGPR = LLInt::Registers::metadataTableGPR;
-        static constexpr GPRReg s_constantsGPR = LLInt::Registers::pbGPR;
-
</del><span class="cx">     private:
</span><span class="cx">         void privateCompileMainPass();
</span><span class="cx">         void privateCompileLinkPass();
</span><span class="cx">         void privateCompileSlowCases();
</span><span class="cx">         void link();
</span><del>-        CompilationResult privateCompile(CodeBlock*, JITCompilationEffort);
</del><ins>+        CompilationResult privateCompile(JITCompilationEffort);
</ins><span class="cx"> 
</span><span class="cx">         // Add a call out from JIT code, without an exception check.
</span><span class="cx">         Call appendCall(const FunctionPtr<CFunctionPtrTag> function)
</span><span class="lines">@@ -237,32 +223,6 @@
</span><span class="cx">         }
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><del>-        template <typename Bytecode>
-        void loadPtrFromMetadata(const Bytecode&, size_t offset, GPRReg);
-
-        template <typename Bytecode>
-        void load32FromMetadata(const Bytecode&, size_t offset, GPRReg);
-
-        template <typename Bytecode>
-        void load8FromMetadata(const Bytecode&, size_t offset, GPRReg);
-
-        template <typename ValueType, typename Bytecode>
-        void store8ToMetadata(ValueType, const Bytecode&, size_t offset);
-
-        template <typename Bytecode>
-        void store32ToMetadata(GPRReg, const Bytecode&, size_t offset);
-
-        template <typename Bytecode>
-        void materializePointerIntoMetadata(const Bytecode&, size_t offset, GPRReg);
-
-    public:
-        void loadConstant(unsigned constantIndex, GPRReg);
-    private:
-        void loadGlobalObject(GPRReg);
-        void loadCodeBlockConstant(VirtualRegister, GPRReg);
-
-        void emitPutCodeBlockToFrameInPrologue(GPRReg result = regT0);
-
</del><span class="cx">         void exceptionCheck(Jump jumpToHandler)
</span><span class="cx">         {
</span><span class="cx">             m_exceptionChecks.append(jumpToHandler);
</span><span class="lines">@@ -294,23 +254,10 @@
</span><span class="cx">         void compileOpCall(const Instruction*, unsigned callLinkInfoIndex);
</span><span class="cx">         template<typename Op>
</span><span class="cx">         void compileOpCallSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex);
</span><del>-#if USE(JSVALUE64)
</del><span class="cx">         template<typename Op>
</span><span class="cx">         std::enable_if_t<
</span><span class="cx">             Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs
</span><span class="cx">             && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments
</span><del>-        , void> compileSetupFrame(const Op&, JITConstantPool::Constant callLinkInfoConstant);
-
-        template<typename Op>
-        std::enable_if_t<
-            Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs
-            || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments
-        , void> compileSetupFrame(const Op&, JITConstantPool::Constant callLinkInfoConstant);
-#else
-        template<typename Op>
-        std::enable_if_t<
-            Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs
-            && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments
</del><span class="cx">         , void> compileSetupFrame(const Op&, CallLinkInfo*);
</span><span class="cx"> 
</span><span class="cx">         template<typename Op>
</span><span class="lines">@@ -318,10 +265,9 @@
</span><span class="cx">             Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs
</span><span class="cx">             || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments
</span><span class="cx">         , void> compileSetupFrame(const Op&, CallLinkInfo*);
</span><del>-#endif
</del><span class="cx"> 
</span><span class="cx">         template<typename Op>
</span><del>-        bool compileTailCall(const Op&, UnlinkedCallLinkInfo*, unsigned callLinkInfoIndex, JITConstantPool::Constant);
</del><ins>+        bool compileTailCall(const Op&, CallLinkInfo*, unsigned callLinkInfoIndex);
</ins><span class="cx">         template<typename Op>
</span><span class="cx">         bool compileCallEval(const Op&);
</span><span class="cx">         void compileCallEvalSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&);
</span><span class="lines">@@ -341,13 +287,14 @@
</span><span class="cx">         void emitWriteBarrier(VirtualRegister owner, VirtualRegister value, WriteBarrierMode);
</span><span class="cx">         void emitWriteBarrier(JSCell* owner, VirtualRegister value, WriteBarrierMode);
</span><span class="cx">         void emitWriteBarrier(JSCell* owner);
</span><del>-        void emitWriteBarrier(GPRReg owner);
</del><span class="cx"> 
</span><span class="cx">         // This assumes that the value to profile is in regT0 and that regT3 is available for
</span><span class="cx">         // scratch.
</span><span class="cx"> #if USE(JSVALUE64)
</span><del>-        template<typename Bytecode> void emitValueProfilingSite(const Bytecode&, GPRReg);
-        template<typename Bytecode> void emitValueProfilingSite(const Bytecode&, JSValueRegs);
</del><ins>+        void emitValueProfilingSite(ValueProfile&, GPRReg);
+        void emitValueProfilingSite(ValueProfile&, JSValueRegs);
+        template<typename Metadata> void emitValueProfilingSite(Metadata&, GPRReg);
+        template<typename Metadata> void emitValueProfilingSite(Metadata&, JSValueRegs);
</ins><span class="cx"> #else
</span><span class="cx">         void emitValueProfilingSite(ValueProfile&, JSValueRegs);
</span><span class="cx">         template<typename Metadata> void emitValueProfilingSite(Metadata&, JSValueRegs);
</span><span class="lines">@@ -358,10 +305,8 @@
</span><span class="cx">         std::enable_if_t<std::is_same<decltype(Op::Metadata::m_profile), ValueProfile>::value, void>
</span><span class="cx">         emitValueProfilingSiteIfProfiledOpcode(Op bytecode);
</span><span class="cx"> 
</span><del>-        template <typename Bytecode>
-        void emitArrayProfilingSiteWithCell(const Bytecode&, RegisterID cellGPR, RegisterID scratchGPR);
-        template <typename Bytecode>
-        void emitArrayProfilingSiteWithCell(const Bytecode&, ptrdiff_t, RegisterID cellGPR, RegisterID scratchGPR);
</del><ins>+        void emitArrayProfilingSiteWithCell(RegisterID cellGPR, ArrayProfile*, RegisterID scratchGPR);
+        void emitArrayProfilingSiteWithCell(RegisterID cellGPR, RegisterID arrayProfileGPR, RegisterID scratchGPR);
</ins><span class="cx"> 
</span><span class="cx">         template<typename Op>
</span><span class="cx">         ECMAMode ecmaMode(Op);
</span><span class="lines">@@ -648,6 +593,7 @@
</span><span class="cx">         void emitSlow_op_jneq(const Instruction*, Vector<SlowCaseEntry>::iterator&);
</span><span class="cx">         void emitSlow_op_jstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&);
</span><span class="cx">         void emitSlow_op_jnstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&);
</span><ins>+        void emitSlow_op_jtrue(const Instruction*, Vector<SlowCaseEntry>::iterator&);
</ins><span class="cx">         void emitSlow_op_loop_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&);
</span><span class="cx">         void emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator&);
</span><span class="cx">         void emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&);
</span><span class="lines">@@ -681,14 +627,14 @@
</span><span class="cx">         void emitRightShiftSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
</span><span class="cx"> 
</span><span class="cx">         void emitHasPrivate(VirtualRegister dst, VirtualRegister base, VirtualRegister propertyOrBrand, AccessType);
</span><del>-        void emitHasPrivateSlow(VirtualRegister dst, VirtualRegister base, VirtualRegister property, AccessType);
</del><ins>+        void emitHasPrivateSlow(VirtualRegister dst, AccessType);
</ins><span class="cx"> 
</span><span class="cx">         template<typename Op>
</span><span class="cx">         void emitNewFuncCommon(const Instruction*);
</span><span class="cx">         template<typename Op>
</span><span class="cx">         void emitNewFuncExprCommon(const Instruction*);
</span><del>-        void emitVarInjectionCheck(bool needsVarInjectionChecks, GPRReg);
-        void emitVarReadOnlyCheck(ResolveType, GPRReg scratchGPR);
</del><ins>+        void emitVarInjectionCheck(bool needsVarInjectionChecks);
+        void emitVarReadOnlyCheck(ResolveType);
</ins><span class="cx">         void emitResolveClosure(VirtualRegister dst, VirtualRegister scope, bool needsVarInjectionChecks, unsigned depth);
</span><span class="cx">         void emitLoadWithStructureCheck(VirtualRegister scope, Structure** structureSlot);
</span><span class="cx"> #if USE(JSVALUE64)
</span><span class="lines">@@ -700,7 +646,7 @@
</span><span class="cx"> #endif
</span><span class="cx">         void emitGetClosureVar(VirtualRegister scope, uintptr_t operand);
</span><span class="cx">         void emitNotifyWrite(WatchpointSet*);
</span><del>-        void emitNotifyWriteWatchpoint(GPRReg pointerToSet);
</del><ins>+        void emitNotifyWrite(GPRReg pointerToSet);
</ins><span class="cx">         void emitPutGlobalVariable(JSValue* operand, VirtualRegister value, WatchpointSet*);
</span><span class="cx">         void emitPutGlobalVariableIndirect(JSValue** addressOfOperand, VirtualRegister value, WatchpointSet**);
</span><span class="cx">         void emitPutClosureVar(VirtualRegister scope, uintptr_t operand, VirtualRegister value, WatchpointSet*);
</span><span class="lines">@@ -709,7 +655,6 @@
</span><span class="cx"> 
</span><span class="cx">         void emitPutIntToCallFrameHeader(RegisterID from, VirtualRegister);
</span><span class="cx"> 
</span><del>-        bool isKnownCell(VirtualRegister);
</del><span class="cx">         JSValue getConstantOperand(VirtualRegister);
</span><span class="cx">         bool isOperandConstantInt(VirtualRegister);
</span><span class="cx">         bool isOperandConstantChar(VirtualRegister);
</span><span class="lines">@@ -731,11 +676,13 @@
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_get_by_id_prepareCallGenerator(VM&);
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_get_by_id_with_this_prepareCallGenerator(VM&);
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_get_by_val_prepareCallGenerator(VM&);
</span><ins>+        static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_get_from_scopeGenerator(VM&);
</ins><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_get_private_name_prepareCallGenerator(VM&);
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_put_by_id_prepareCallGenerator(VM&);
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_put_by_val_prepareCallGenerator(VM&);
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_put_private_name_prepareCallGenerator(VM&);
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_put_to_scopeGenerator(VM&);
</span><ins>+        static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_resolve_scopeGenerator(VM&);
</ins><span class="cx"> 
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> op_check_traps_handlerGenerator(VM&);
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_handlerGenerator(VM&);
</span><span class="lines">@@ -749,13 +696,30 @@
</span><span class="cx">             return !(resolveType == GlobalVar || resolveType == ResolvedClosureVar || resolveType == ModuleVar);
</span><span class="cx">         }
</span><span class="cx"> 
</span><ins>+#define DECLARE_GET_FROM_SCOPE_GENERATOR(resolveType) \
+        static MacroAssemblerCodeRef<JITThunkPtrTag> op_get_from_scope_##resolveType##Generator(VM&);
+        FOR_EACH_RESOLVE_TYPE(DECLARE_GET_FROM_SCOPE_GENERATOR)
+#undef DECLARE_GET_FROM_SCOPE_GENERATOR
+
+        MacroAssemblerCodeRef<JITThunkPtrTag> generateOpGetFromScopeThunk(ResolveType, const char* thunkName);
+
+        static constexpr bool thunkIsUsedForOpResolveScope(ResolveType resolveType)
+        {
+            // ModuleVar because it is more efficient to emit inline than use a thunk.
+            // ResolvedClosureVar because we don't use these types with op_resolve_scope.
+            return !(resolveType == ResolvedClosureVar || resolveType == ModuleVar);
+        }
+
+#define DECLARE_RESOLVE_SCOPE_GENERATOR(resolveType) \
+        static MacroAssemblerCodeRef<JITThunkPtrTag> op_resolve_scope_##resolveType##Generator(VM&);
+        FOR_EACH_RESOLVE_TYPE(DECLARE_RESOLVE_SCOPE_GENERATOR)
+#undef DECLARE_RESOLVE_SCOPE_GENERATOR
+
+        MacroAssemblerCodeRef<JITThunkPtrTag> generateOpResolveScopeThunk(ResolveType, const char* thunkName);
+
</ins><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> valueIsFalseyGenerator(VM&);
</span><span class="cx">         static MacroAssemblerCodeRef<JITThunkPtrTag> valueIsTruthyGenerator(VM&);
</span><span class="cx"> 
</span><del>-        static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_get_from_scopeGenerator(VM&);
-        static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_resolve_scopeGenerator(VM&);
-        static MacroAssemblerCodeRef<JITThunkPtrTag> generateOpGetFromScopeThunk(VM&, std::optional<ResolveType>, const char* thunkName);
-        static MacroAssemblerCodeRef<JITThunkPtrTag> generateOpResolveScopeThunk(VM&, std::optional<ResolveType>, const char* thunkName);
</del><span class="cx"> #endif // ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> 
</span><span class="cx">         Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
</span><span class="lines">@@ -796,10 +760,10 @@
</span><span class="cx">         MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr<CFunctionPtrTag>);
</span><span class="cx">         MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr<CFunctionPtrTag>, VirtualRegister result);
</span><span class="cx">         void appendCallWithExceptionCheckSetJSValueResult(Address, VirtualRegister result);
</span><del>-        template<typename Bytecode>
-        MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const Bytecode&, const FunctionPtr<CFunctionPtrTag>, VirtualRegister result);
-        template<typename Bytecode>
-        void appendCallWithExceptionCheckSetJSValueResultWithProfile(const Bytecode&, Address, VirtualRegister result);
</del><ins>+        template<typename Metadata>
+        MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata&, const FunctionPtr<CFunctionPtrTag>, VirtualRegister result);
+        template<typename Metadata>
+        void appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata&, Address, VirtualRegister result);
</ins><span class="cx">         
</span><span class="cx">         template<typename OperationType, typename... Args>
</span><span class="cx">         std::enable_if_t<FunctionTraits<OperationType>::hasResult, MacroAssembler::Call>
</span><span class="lines">@@ -857,20 +821,20 @@
</span><span class="cx">             appendCallWithExceptionCheck(Address(GPRInfo::nonArgGPR0, target.offset));
</span><span class="cx">         }
</span><span class="cx"> 
</span><del>-        template<typename Bytecode, typename OperationType, typename... Args>
</del><ins>+        template<typename Metadata, typename OperationType, typename... Args>
</ins><span class="cx">         std::enable_if_t<FunctionTraits<OperationType>::hasResult, MacroAssembler::Call>
</span><del>-        callOperationWithProfile(const Bytecode& bytecode, OperationType operation, VirtualRegister result, Args... args)
</del><ins>+        callOperationWithProfile(Metadata& metadata, OperationType operation, VirtualRegister result, Args... args)
</ins><span class="cx">         {
</span><span class="cx">             setupArguments<OperationType>(args...);
</span><del>-            return appendCallWithExceptionCheckSetJSValueResultWithProfile(bytecode, operation, result);
</del><ins>+            return appendCallWithExceptionCheckSetJSValueResultWithProfile(metadata, operation, result);
</ins><span class="cx">         }
</span><span class="cx"> 
</span><del>-        template<typename OperationType, typename Bytecode, typename... Args>
</del><ins>+        template<typename OperationType, typename Metadata, typename... Args>
</ins><span class="cx">         std::enable_if_t<FunctionTraits<OperationType>::hasResult, void>
</span><del>-        callOperationWithProfile(const Bytecode& bytecode, Address target, VirtualRegister result, Args... args)
</del><ins>+        callOperationWithProfile(Metadata& metadata, Address target, VirtualRegister result, Args... args)
</ins><span class="cx">         {
</span><span class="cx">             setupArgumentsForIndirectCall<OperationType>(target, args...);
</span><del>-            return appendCallWithExceptionCheckSetJSValueResultWithProfile(bytecode, Address(GPRInfo::nonArgGPR0, target.offset), result);
</del><ins>+            return appendCallWithExceptionCheckSetJSValueResultWithProfile(metadata, Address(GPRInfo::nonArgGPR0, target.offset), result);
</ins><span class="cx">         }
</span><span class="cx"> 
</span><span class="cx">         template<typename OperationType, typename... Args>
</span><span class="lines">@@ -955,23 +919,26 @@
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(DFG_JIT)
</span><span class="cx">         bool canBeOptimized() { return m_canBeOptimized; }
</span><ins>+        bool canBeOptimizedOrInlined() { return m_canBeOptimizedOrInlined; }
</ins><span class="cx">         bool shouldEmitProfiling() { return m_shouldEmitProfiling; }
</span><span class="cx"> #else
</span><span class="cx">         bool canBeOptimized() { return false; }
</span><ins>+        bool canBeOptimizedOrInlined() { return false; }
</ins><span class="cx">         // Enables use of value profiler with tiered compilation turned off,
</span><span class="cx">         // in which case all code gets profiled.
</span><span class="cx">         bool shouldEmitProfiling() { return false; }
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><del>-        void emitMaterializeMetadataAndConstantPoolRegisters();
-
-        void emitRestoreCalleeSaves();
-
</del><span class="cx">         static bool reportCompileTimes();
</span><span class="cx">         static bool computeCompileTimes();
</span><ins>+        
+        // If you need to check a value from the metadata table and you need it to
+        // be consistent across the fast and slow path, then you want to use this.
+        // It will give the slow path the same value read by the fast path.
+        GetPutInfo copiedGetPutInfo(OpPutToScope);
+        template<typename BinaryOp>
+        BinaryArithProfile copiedArithProfile(BinaryOp);
</ins><span class="cx"> 
</span><del>-        void resetSP();
-
</del><span class="cx">         Interpreter* m_interpreter;
</span><span class="cx"> 
</span><span class="cx">         Vector<FarCallRecord> m_farCalls;
</span><span class="lines">@@ -997,6 +964,9 @@
</span><span class="cx">         Vector<SlowCaseEntry> m_slowCases;
</span><span class="cx">         Vector<SwitchRecord> m_switches;
</span><span class="cx"> 
</span><ins>+        HashMap<unsigned, unsigned> m_copiedGetPutInfos;
+        HashMap<uint64_t, BinaryArithProfile> m_copiedArithProfiles;
+
</ins><span class="cx">         JumpList m_exceptionChecks;
</span><span class="cx">         JumpList m_exceptionChecksWithCallFrameRollback;
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><span class="lines">@@ -1030,26 +1000,11 @@
</span><span class="cx">         HashMap<const Instruction*, UniqueRef<MathICGenerationState>> m_instructionToMathICGenerationState;
</span><span class="cx"> 
</span><span class="cx">         bool m_canBeOptimized;
</span><ins>+        bool m_canBeOptimizedOrInlined;
</ins><span class="cx">         bool m_shouldEmitProfiling;
</span><span class="cx">         BytecodeIndex m_loopOSREntryBytecodeIndex;
</span><span class="cx"> 
</span><del>-        CodeBlock* m_profiledCodeBlock { nullptr };
-        UnlinkedCodeBlock* m_unlinkedCodeBlock { nullptr };
-
-        MathICHolder m_mathICs;
-        RefPtr<BaselineJITCode> m_jitCode;
-
-        JITConstantPool m_constantPool;
-        JITConstantPool::Constant m_globalObjectConstant { std::numeric_limits<unsigned>::max() };
-        Bag<UnlinkedCallLinkInfo> m_unlinkedCalls;
-        Bag<CallLinkInfo> m_evalCallLinkInfos;
-        Bag<UnlinkedStructureStubInfo> m_unlinkedStubInfos;
-        FixedVector<SimpleJumpTable> m_switchJumpTables;
-        FixedVector<StringJumpTable> m_stringSwitchJumpTables;
-
-        struct NotACodeBlock { } m_codeBlock;
-
-        bool m_isShareable { true };
</del><ins>+        RefPtr<DirectJITCode> m_jitCode;
</ins><span class="cx">     };
</span><span class="cx"> 
</span><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITArithmeticcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -292,10 +292,9 @@
</span><span class="cx">     if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
</span><span class="cx">         linkAllSlowCases(iter);
</span><span class="cx"> 
</span><del>-        emitGetVirtualRegister(op1, argumentGPR1);
-        emitGetVirtualRegister(op2, argumentGPR2);
-        loadGlobalObject(argumentGPR0);
-        callOperation(operation, argumentGPR0, argumentGPR1, argumentGPR2);
</del><ins>+        emitGetVirtualRegister(op1, argumentGPR0);
+        emitGetVirtualRegister(op2, argumentGPR1);
+        callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), argumentGPR0, argumentGPR1);
</ins><span class="cx">         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
</span><span class="cx">         return;
</span><span class="cx">     }
</span><span class="lines">@@ -321,8 +320,7 @@
</span><span class="cx">         }
</span><span class="cx"> 
</span><span class="cx">         emitGetVirtualRegister(op2, regT1);
</span><del>-        loadGlobalObject(regT2);
-        callOperation(operation, regT2, regT0, regT1);
</del><ins>+        callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
</span><span class="cx">         return;
</span><span class="cx">     }
</span><span class="lines">@@ -348,8 +346,7 @@
</span><span class="cx">         }
</span><span class="cx"> 
</span><span class="cx">         emitGetVirtualRegister(op1, regT2);
</span><del>-        loadGlobalObject(regT3);
-        callOperation(operation, regT3, regT2, regT1);
</del><ins>+        callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), regT2, regT1);
</ins><span class="cx">         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
</span><span class="cx">         return;
</span><span class="cx">     }
</span><span class="lines">@@ -375,8 +372,7 @@
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     linkSlowCase(iter); // RHS is not Int.
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operation, regT2, regT0, regT1);
</del><ins>+    callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -577,8 +573,7 @@
</span><span class="cx"> 
</span><span class="cx">     emitLoad(op1, regT1, regT0);
</span><span class="cx">     emitLoad(op2, regT3, regT2);
</span><del>-    loadGlobalObject(regT4);
-    callOperation(operation, regT4, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</del><ins>+    callOperation(operation, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -722,8 +717,8 @@
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_negate(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><del>-    UnaryArithProfile* arithProfile = &m_unlinkedCodeBlock->unaryArithProfile(currentInstruction->as<OpNegate>().m_profileIndex);
-    JITNegIC* negateIC = m_mathICs.addJITNegIC(arithProfile);
</del><ins>+    UnaryArithProfile* arithProfile = &currentInstruction->as<OpNegate>().metadata(m_codeBlock).m_arithProfile;
+    JITNegIC* negateIC = m_codeBlock->addJITNegIC(arithProfile);
</ins><span class="cx">     m_instructionToMathIC.add(currentInstruction, negateIC);
</span><span class="cx">     // FIXME: it would be better to call those operationValueNegate, since the operand can be a BigInt
</span><span class="cx">     emitMathICFast<OpNegate>(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate);
</span><span class="lines">@@ -904,8 +899,8 @@
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_add(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><del>-    BinaryArithProfile* arithProfile = &m_unlinkedCodeBlock->binaryArithProfile(currentInstruction->as<OpAdd>().m_profileIndex);
-    JITAddIC* addIC = m_mathICs.addJITAddIC(arithProfile);
</del><ins>+    BinaryArithProfile* arithProfile = &currentInstruction->as<OpAdd>().metadata(m_codeBlock).m_arithProfile;
+    JITAddIC* addIC = m_codeBlock->addJITAddIC(arithProfile);
</ins><span class="cx">     m_instructionToMathIC.add(currentInstruction, addIC);
</span><span class="cx">     emitMathICFast<OpAdd>(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd);
</span><span class="cx"> }
</span><span class="lines">@@ -950,11 +945,10 @@
</span><span class="cx">     bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
</span><span class="cx">     if (!generatedInlineCode) {
</span><span class="cx">         UnaryArithProfile* arithProfile = mathIC->arithProfile();
</span><del>-        loadGlobalObject(scratchGPR);
</del><span class="cx">         if (arithProfile && shouldEmitProfiling())
</span><del>-            callOperationWithResult(profiledFunction, resultRegs, scratchGPR, srcRegs, arithProfile);
</del><ins>+            callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, arithProfile);
</ins><span class="cx">         else
</span><del>-            callOperationWithResult(nonProfiledFunction, resultRegs, scratchGPR, srcRegs);
</del><ins>+            callOperationWithResult(nonProfiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs);
</ins><span class="cx">     } else
</span><span class="cx">         addSlowCase(mathICGenerationState.slowPathJumps);
</span><span class="cx"> 
</span><span class="lines">@@ -1021,11 +1015,10 @@
</span><span class="cx">         else if (rightOperand.isConst())
</span><span class="cx">             emitGetVirtualRegister(op2, rightRegs);
</span><span class="cx">         BinaryArithProfile* arithProfile = mathIC->arithProfile();
</span><del>-        loadGlobalObject(scratchGPR);
</del><span class="cx">         if (arithProfile && shouldEmitProfiling())
</span><del>-            callOperationWithResult(profiledFunction, resultRegs, scratchGPR, leftRegs, rightRegs, arithProfile);
</del><ins>+            callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, arithProfile);
</ins><span class="cx">         else
</span><del>-            callOperationWithResult(nonProfiledFunction, resultRegs, scratchGPR, leftRegs, rightRegs);
</del><ins>+            callOperationWithResult(nonProfiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs);
</ins><span class="cx">     } else
</span><span class="cx">         addSlowCase(mathICGenerationState.slowPathJumps);
</span><span class="cx"> 
</span><span class="lines">@@ -1062,14 +1055,13 @@
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><span class="cx">     UnaryArithProfile* arithProfile = mathIC->arithProfile();
</span><del>-    loadGlobalObject(regT4);
</del><span class="cx">     if (arithProfile && shouldEmitProfiling()) {
</span><span class="cx">         if (mathICGenerationState.shouldSlowPathRepatch)
</span><del>-            mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(profiledRepatchFunction), resultRegs, regT4, srcRegs, TrustedImmPtr(mathIC));
</del><ins>+            mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(profiledRepatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, TrustedImmPtr(mathIC));
</ins><span class="cx">         else
</span><del>-            mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, regT4, srcRegs, arithProfile);
</del><ins>+            mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, arithProfile);
</ins><span class="cx">     } else
</span><del>-        mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(repatchFunction), resultRegs, regT4, srcRegs, TrustedImmPtr(mathIC));
</del><ins>+        mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(repatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, TrustedImmPtr(mathIC));
</ins><span class="cx"> 
</span><span class="cx"> #if ENABLE(MATH_IC_STATS)
</span><span class="cx">     auto slowPathEnd = label();
</span><span class="lines">@@ -1128,14 +1120,13 @@
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><span class="cx">     BinaryArithProfile* arithProfile = mathIC->arithProfile();
</span><del>-    loadGlobalObject(regT4);
</del><span class="cx">     if (arithProfile && shouldEmitProfiling()) {
</span><span class="cx">         if (mathICGenerationState.shouldSlowPathRepatch)
</span><del>-            mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(profiledRepatchFunction), resultRegs, regT4, leftRegs, rightRegs, TrustedImmPtr(mathIC));
</del><ins>+            mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(profiledRepatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, TrustedImmPtr(mathIC));
</ins><span class="cx">         else
</span><del>-            mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, regT4, leftRegs, rightRegs, arithProfile);
</del><ins>+            mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, arithProfile);
</ins><span class="cx">     } else
</span><del>-        mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(repatchFunction), resultRegs, regT4, leftRegs, rightRegs, TrustedImmPtr(mathIC));
</del><ins>+        mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(repatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, TrustedImmPtr(mathIC));
</ins><span class="cx"> 
</span><span class="cx"> #if ENABLE(MATH_IC_STATS)
</span><span class="cx">     auto slowPathEnd = label();
</span><span class="lines">@@ -1175,7 +1166,7 @@
</span><span class="cx"> 
</span><span class="cx">     BinaryArithProfile* arithProfile = nullptr;
</span><span class="cx">     if (shouldEmitProfiling())
</span><del>-        arithProfile = &m_unlinkedCodeBlock->binaryArithProfile(currentInstruction->as<OpDiv>().m_profileIndex);
</del><ins>+        arithProfile = &currentInstruction->as<OpDiv>().metadata(m_codeBlock).m_arithProfile;
</ins><span class="cx"> 
</span><span class="cx">     SnippetOperand leftOperand(bytecode.m_operandTypes.first());
</span><span class="cx">     SnippetOperand rightOperand(bytecode.m_operandTypes.second());
</span><span class="lines">@@ -1220,8 +1211,8 @@
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_mul(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><del>-    BinaryArithProfile* arithProfile = &m_unlinkedCodeBlock->binaryArithProfile(currentInstruction->as<OpMul>().m_profileIndex);
-    JITMulIC* mulIC = m_mathICs.addJITMulIC(arithProfile);
</del><ins>+    BinaryArithProfile* arithProfile = &currentInstruction->as<OpMul>().metadata(m_codeBlock).m_arithProfile;
+    JITMulIC* mulIC = m_codeBlock->addJITMulIC(arithProfile);
</ins><span class="cx">     m_instructionToMathIC.add(currentInstruction, mulIC);
</span><span class="cx">     emitMathICFast<OpMul>(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul);
</span><span class="cx"> }
</span><span class="lines">@@ -1236,8 +1227,8 @@
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_sub(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><del>-    BinaryArithProfile* arithProfile = &m_unlinkedCodeBlock->binaryArithProfile(currentInstruction->as<OpSub>().m_profileIndex);
-    JITSubIC* subIC = m_mathICs.addJITSubIC(arithProfile);
</del><ins>+    BinaryArithProfile* arithProfile = &currentInstruction->as<OpSub>().metadata(m_codeBlock).m_arithProfile;
+    JITSubIC* subIC = m_codeBlock->addJITSubIC(arithProfile);
</ins><span class="cx">     m_instructionToMathIC.add(currentInstruction, subIC);
</span><span class="cx">     emitMathICFast<OpSub>(subIC, currentInstruction, operationValueSubProfiled, operationValueSub);
</span><span class="cx"> }
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITCallcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITCall.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITCall.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITCall.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -34,7 +34,6 @@
</span><span class="cx"> #include "CallFrameShuffler.h"
</span><span class="cx"> #include "CodeBlock.h"
</span><span class="cx"> #include "JITInlines.h"
</span><del>-#include "ScratchRegisterAllocator.h"
</del><span class="cx"> #include "SetupVarargsFrame.h"
</span><span class="cx"> #include "SlowPathCall.h"
</span><span class="cx"> #include "StackAlignment.h"
</span><span class="lines">@@ -45,7 +44,7 @@
</span><span class="cx"> template<typename Op>
</span><span class="cx"> void JIT::emitPutCallResult(const Op& bytecode)
</span><span class="cx"> {
</span><del>-    emitValueProfilingSite(bytecode, regT0);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), regT0);
</ins><span class="cx">     emitPutVirtualRegister(destinationFor(bytecode, m_bytecodeIndex.checkpoint()).virtualRegister(), regT0);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -54,18 +53,18 @@
</span><span class="cx">     Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs
</span><span class="cx">     && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments
</span><span class="cx"> , void>
</span><del>-JIT::compileSetupFrame(const Op& bytecode, JITConstantPool::Constant)
</del><ins>+JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo*)
</ins><span class="cx"> {
</span><span class="cx">     unsigned checkpoint = m_bytecodeIndex.checkpoint();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     int argCountIncludingThis = argumentCountIncludingThisFor(bytecode, checkpoint);
</span><span class="cx">     int registerOffset = -static_cast<int>(stackOffsetInRegistersForCall(bytecode, checkpoint));
</span><span class="cx"> 
</span><del>-
</del><span class="cx">     if (Op::opcodeID == op_call && shouldEmitProfiling()) {
</span><span class="cx">         emitGetVirtualRegister(VirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0)), regT0);
</span><span class="cx">         Jump done = branchIfNotCell(regT0);
</span><span class="cx">         load32(Address(regT0, JSCell::structureIDOffset()), regT0);
</span><del>-        store32ToMetadata(regT0, bytecode, OpCall::Metadata::offsetOfCallLinkInfo() + LLIntCallLinkInfo::offsetOfArrayProfile() + ArrayProfile::offsetOfLastSeenStructureID());
</del><ins>+        store32(regT0, arrayProfileFor(metadata, checkpoint).addressOfLastSeenStructureID());
</ins><span class="cx">         done.link(this);
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="lines">@@ -79,7 +78,7 @@
</span><span class="cx">     Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs
</span><span class="cx">     || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments
</span><span class="cx"> , void>
</span><del>-JIT::compileSetupFrame(const Op& bytecode, JITConstantPool::Constant callLinkInfoConstant)
</del><ins>+JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo* info)
</ins><span class="cx"> {
</span><span class="cx">     VirtualRegister thisValue = bytecode.m_thisValue;
</span><span class="cx">     VirtualRegister arguments = bytecode.m_arguments;
</span><span class="lines">@@ -92,8 +91,7 @@
</span><span class="cx">         sizeOperation = operationSizeFrameForForwardArguments;
</span><span class="cx">     else
</span><span class="cx">         sizeOperation = operationSizeFrameForVarargs;
</span><del>-    loadGlobalObject(regT0);
-    callOperation(sizeOperation, regT0, regT1, -firstFreeRegister, firstVarArgOffset);
</del><ins>+    callOperation(sizeOperation, TrustedImmPtr(m_codeBlock->globalObject()), regT1, -firstFreeRegister, firstVarArgOffset);
</ins><span class="cx">     move(TrustedImm32(-firstFreeRegister), regT1);
</span><span class="cx">     emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
</span><span class="cx">     addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister);
</span><span class="lines">@@ -103,16 +101,14 @@
</span><span class="cx">         setupOperation = operationSetupForwardArgumentsFrame;
</span><span class="cx">     else
</span><span class="cx">         setupOperation = operationSetupVarargsFrame;
</span><del>-    loadGlobalObject(regT3);
-    callOperation(setupOperation, regT3, regT1, regT2, firstVarArgOffset, regT0);
</del><ins>+    callOperation(setupOperation, TrustedImmPtr(m_codeBlock->globalObject()), regT1, regT2, firstVarArgOffset, regT0);
</ins><span class="cx">     move(returnValueGPR, regT1);
</span><span class="cx"> 
</span><span class="cx">     // Profile the argument count.
</span><span class="cx">     load32(Address(regT1, CallFrameSlot::argumentCountIncludingThis * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
</span><del>-    loadConstant(callLinkInfoConstant, regT0);
-    load32(Address(regT0, CallLinkInfo::offsetOfMaxArgumentCountIncludingThis()), regT3);
-    Jump notBiggest = branch32(Above, regT3, regT2);
-    store32(regT2, Address(regT0, CallLinkInfo::offsetOfMaxArgumentCountIncludingThis()));
</del><ins>+    load32(info->addressOfMaxArgumentCountIncludingThis(), regT0);
+    Jump notBiggest = branch32(Above, regT0, regT2);
+    store32(regT2, info->addressOfMaxArgumentCountIncludingThis());
</ins><span class="cx">     notBiggest.link(this);
</span><span class="cx">     
</span><span class="cx">     // Initialize 'this'.
</span><span class="lines">@@ -131,16 +127,17 @@
</span><span class="cx"> template<>
</span><span class="cx"> bool JIT::compileCallEval(const OpCallEval& bytecode)
</span><span class="cx"> {
</span><del>-    addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, argumentGPR1);
-    storePtr(callFrameRegister, Address(argumentGPR1, CallFrame::callerFrameOffset()));
</del><ins>+    addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
+    storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
</ins><span class="cx"> 
</span><del>-    resetSP();
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+    checkStackPointerAlignment();
</ins><span class="cx"> 
</span><del>-    move(TrustedImm32(bytecode.m_ecmaMode.value()), argumentGPR2);
-    loadGlobalObject(argumentGPR0);
-    callOperation(operationCallEval, argumentGPR0, argumentGPR1, argumentGPR2);
-    addSlowCase(branchIfEmpty(returnValueGPR));
</del><ins>+    move(TrustedImm32(bytecode.m_ecmaMode.value()), regT2);
+    callOperation(operationCallEval, m_codeBlock->globalObject(), regT1, regT2);
</ins><span class="cx"> 
</span><ins>+    addSlowCase(branchIfEmpty(regT0));
+
</ins><span class="cx">     emitPutCallResult(bytecode);
</span><span class="cx"> 
</span><span class="cx">     return true;
</span><span class="lines">@@ -151,7 +148,7 @@
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = instruction->as<OpCallEval>();
</span><del>-    CallLinkInfo* info = m_evalCallLinkInfos.add(CodeOrigin(m_bytecodeIndex));
</del><ins>+    CallLinkInfo* info = m_codeBlock->addCallLinkInfo(CodeOrigin(m_bytecodeIndex));
</ins><span class="cx">     info->setUpCall(CallLinkInfo::Call, regT0);
</span><span class="cx"> 
</span><span class="cx">     int registerOffset = -bytecode.m_argv;
</span><span class="lines">@@ -159,47 +156,43 @@
</span><span class="cx">     addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
</span><span class="cx"> 
</span><span class="cx">     load64(Address(stackPointerRegister, sizeof(Register) * CallFrameSlot::callee - sizeof(CallerFrameAndPC)), regT0);
</span><del>-    loadGlobalObject(regT3);
-    emitVirtualCallWithoutMovingGlobalObject(*m_vm, info);
-    resetSP();
</del><ins>+    emitVirtualCall(*m_vm, m_codeBlock->globalObject(), info);
+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+    checkStackPointerAlignment();
</ins><span class="cx"> 
</span><span class="cx">     emitPutCallResult(bytecode);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> template<typename Op>
</span><del>-bool JIT::compileTailCall(const Op&, UnlinkedCallLinkInfo*, unsigned, JITConstantPool::Constant)
</del><ins>+bool JIT::compileTailCall(const Op&, CallLinkInfo*, unsigned)
</ins><span class="cx"> {
</span><span class="cx">     return false;
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> template<>
</span><del>-bool JIT::compileTailCall(const OpTailCall& bytecode, UnlinkedCallLinkInfo* info, unsigned callLinkInfoIndex, JITConstantPool::Constant callLinkInfoConstant)
</del><ins>+bool JIT::compileTailCall(const OpTailCall& bytecode, CallLinkInfo* info, unsigned callLinkInfoIndex)
</ins><span class="cx"> {
</span><del>-    std::unique_ptr<CallFrameShuffleData> shuffleData = makeUnique<CallFrameShuffleData>();
-    shuffleData->numPassedArgs = bytecode.m_argc;
-    shuffleData->numParameters = m_unlinkedCodeBlock->numParameters();
-    shuffleData->numberTagRegister = GPRInfo::numberTagRegister;
-    shuffleData->numLocals =
</del><ins>+    CallFrameShuffleData shuffleData;
+    shuffleData.numPassedArgs = bytecode.m_argc;
+    shuffleData.numberTagRegister = GPRInfo::numberTagRegister;
+    shuffleData.numLocals =
</ins><span class="cx">         bytecode.m_argv - sizeof(CallerFrameAndPC) / sizeof(Register);
</span><del>-    shuffleData->args.resize(bytecode.m_argc);
</del><ins>+    shuffleData.args.resize(bytecode.m_argc);
</ins><span class="cx">     for (unsigned i = 0; i < bytecode.m_argc; ++i) {
</span><del>-        shuffleData->args[i] =
</del><ins>+        shuffleData.args[i] =
</ins><span class="cx">             ValueRecovery::displacedInJSStack(
</span><span class="cx">                 virtualRegisterForArgumentIncludingThis(i) - bytecode.m_argv,
</span><span class="cx">                 DataFormatJS);
</span><span class="cx">     }
</span><del>-    shuffleData->callee = ValueRecovery::inGPR(regT0, DataFormatJS);
-    shuffleData->setupCalleeSaveRegisters(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
</del><ins>+    shuffleData.callee =
+        ValueRecovery::inGPR(regT0, DataFormatJS);
+    shuffleData.setupCalleeSaveRegisters(m_codeBlock);
+    info->setFrameShuffleData(shuffleData);
</ins><span class="cx"> 
</span><del>-    loadConstant(callLinkInfoConstant, regT2);
-    JumpList slowPaths = CallLinkInfo::emitTailCallDataICFastPath(*this, regT0, regT2, [&] {
-        CallFrameShuffler(*this, *shuffleData).prepareForTailCall();
</del><ins>+    JumpList slowPaths = info->emitTailCallFastPath(*this, regT0, regT2, CallLinkInfo::UseDataIC::Yes, [&] {
+        CallFrameShuffler(*this, shuffleData).prepareForTailCall();
</ins><span class="cx">     });
</span><span class="cx">     addSlowCase(slowPaths);
</span><del>-
-    shuffleData->shrinkToFit();
-    info->frameShuffleData = WTFMove(shuffleData);
-
</del><span class="cx">     auto doneLocation = label();
</span><span class="cx">     m_callCompilationInfo[callLinkInfoIndex].doneLocation = doneLocation;
</span><span class="cx">     
</span><span class="lines">@@ -225,25 +218,14 @@
</span><span class="cx">         - Caller initializes ReturnPC; CodeBlock.
</span><span class="cx">         - Caller restores callFrameRegister after return.
</span><span class="cx">     */
</span><ins>+    CallLinkInfo* info = nullptr;
+    if (opcodeID != op_call_eval)
+        info = m_codeBlock->addCallLinkInfo(CodeOrigin(m_bytecodeIndex));
+    compileSetupFrame(bytecode, info);
</ins><span class="cx"> 
</span><del>-    UnlinkedCallLinkInfo* info = nullptr;
-    JITConstantPool::Constant infoConstant = UINT_MAX;
-    if (opcodeID != op_call_eval) {
-        info = m_unlinkedCalls.add();
-        info->bytecodeIndex = m_bytecodeIndex;
-        info->callType = CallLinkInfo::callTypeFor(opcodeID);
-
-        infoConstant = m_constantPool.add(JITConstantPool::Type::CallLinkInfo, info);
-
-        ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
-        m_callCompilationInfo.append(CallCompilationInfo());
-        m_callCompilationInfo[callLinkInfoIndex].unlinkedCallLinkInfo = info;
-        m_callCompilationInfo[callLinkInfoIndex].callLinkInfoConstant = infoConstant;
-    }
-    compileSetupFrame(bytecode, infoConstant);
-
</del><span class="cx">     // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
</span><del>-    uint32_t locationBits = CallSiteIndex(m_bytecodeIndex).bits();
</del><ins>+    auto bytecodeIndex = m_codeBlock->bytecodeIndex(instruction);
+    uint32_t locationBits = CallSiteIndex(bytecodeIndex).bits();
</ins><span class="cx">     store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCountIncludingThis * static_cast<int>(sizeof(Register)) + TagOffset));
</span><span class="cx"> 
</span><span class="cx">     emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
</span><span class="lines">@@ -252,12 +234,16 @@
</span><span class="cx">     if (compileCallEval(bytecode))
</span><span class="cx">         return;
</span><span class="cx"> 
</span><del>-    if (compileTailCall(bytecode, info, callLinkInfoIndex, infoConstant))
</del><ins>+    ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
+    info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), regT0);
+    m_callCompilationInfo.append(CallCompilationInfo());
+    m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
+
+    if (compileTailCall(bytecode, info, callLinkInfoIndex))
</ins><span class="cx">         return;
</span><span class="cx"> 
</span><del>-    loadConstant(infoConstant, regT2);
</del><span class="cx">     if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
</span><del>-        auto slowPaths = CallLinkInfo::emitTailCallDataICFastPath(*this, regT0, regT2, [&] {
</del><ins>+        auto slowPaths = info->emitTailCallFastPath(*this, regT0, regT2, CallLinkInfo::UseDataIC::Yes, [&] {
</ins><span class="cx">             emitRestoreCalleeSaves();
</span><span class="cx">             prepareForTailCallSlow(regT2);
</span><span class="cx">         });
</span><span class="lines">@@ -267,13 +253,14 @@
</span><span class="cx">         return;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    auto slowPaths = CallLinkInfo::emitDataICFastPath(*this, regT0, regT2);
</del><ins>+    auto slowPaths = info->emitFastPath(*this, regT0, regT2, CallLinkInfo::UseDataIC::Yes);
</ins><span class="cx">     auto doneLocation = label();
</span><span class="cx">     addSlowCase(slowPaths);
</span><span class="cx"> 
</span><span class="cx">     m_callCompilationInfo[callLinkInfoIndex].doneLocation = doneLocation;
</span><span class="cx"> 
</span><del>-    resetSP();
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+    checkStackPointerAlignment();
</ins><span class="cx"> 
</span><span class="cx">     emitPutCallResult(bytecode);
</span><span class="cx"> }
</span><span class="lines">@@ -286,13 +273,13 @@
</span><span class="cx"> 
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><del>-    loadGlobalObject(regT3);
-    loadConstant(m_callCompilationInfo[callLinkInfoIndex].callLinkInfoConstant, regT2);
</del><ins>+    m_callCompilationInfo[callLinkInfoIndex].slowPathStart = label();
</ins><span class="cx"> 
</span><span class="cx">     if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
</span><span class="cx">         emitRestoreCalleeSaves();
</span><span class="cx"> 
</span><del>-    CallLinkInfo::emitDataICSlowPath(*m_vm, *this, regT2);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT3);
+    m_callCompilationInfo[callLinkInfoIndex].callLinkInfo->emitSlowPath(*m_vm, *this);
</ins><span class="cx"> 
</span><span class="cx">     if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
</span><span class="cx">         abortWithReason(JITDidReturnFromTailCall);
</span><span class="lines">@@ -299,7 +286,8 @@
</span><span class="cx">         return;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    resetSP();
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+    checkStackPointerAlignment();
</ins><span class="cx"> 
</span><span class="cx">     auto bytecode = instruction->as<Op>();
</span><span class="cx">     emitPutCallResult(bytecode);
</span><span class="lines">@@ -407,31 +395,17 @@
</span><span class="cx"> 
</span><span class="cx">     const Identifier* ident = &vm().propertyNames->next;
</span><span class="cx">     
</span><del>-    constexpr GPRReg baseGPR = BaselineGetByIdRegisters::base;
-    constexpr GPRReg resultGPR = BaselineGetByIdRegisters::result;
-    constexpr GPRReg stubInfoGPR = BaselineGetByIdRegisters::stubInfo;
</del><ins>+    emitJumpSlowCaseIfNotJSCell(regT0);
</ins><span class="cx"> 
</span><del>-    move(regT0, baseGPR);
-    emitJumpSlowCaseIfNotJSCell(baseGPR);
-
</del><span class="cx">     JITGetByIdGenerator gen(
</span><del>-        nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), RegisterSet::stubUnavailableRegisters(),
-        CacheableIdentifier::createFromImmortalIdentifier(ident->impl()), JSValueRegs(baseGPR), JSValueRegs(resultGPR), stubInfoGPR, AccessType::GetById);
-
-    UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-    stubInfo->accessType = AccessType::GetById;
-    stubInfo->bytecodeIndex = m_bytecodeIndex;
-    JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-    gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-    gen.m_unlinkedStubInfo = stubInfo;
-
-    gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-    resetSP(); // We might OSR exit here, so we need to conservatively reset SP
-    addSlowCase();
</del><ins>+        m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), RegisterSet::stubUnavailableRegisters(),
+        CacheableIdentifier::createFromImmortalIdentifier(ident->impl()), JSValueRegs(regT0), JSValueRegs(regT0), regT1, AccessType::GetById);
+    gen.generateFastPath(*this);
+    addSlowCase(gen.slowPathJump());
</ins><span class="cx">     m_getByIds.append(gen);
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode, resultGPR);
-    emitPutVirtualRegister(bytecode.m_next, JSValueRegs(resultGPR));
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), regT0);
+    emitPutVirtualRegister(bytecode.m_next);
</ins><span class="cx"> 
</span><span class="cx">     fastCase.link(this);
</span><span class="cx"> }
</span><span class="lines">@@ -442,8 +416,10 @@
</span><span class="cx">     compileOpCallSlowCase<OpIteratorOpen>(instruction, iter, m_callLinkInfoIndex++);
</span><span class="cx">     emitJumpSlowToHotForCheckpoint(jump());
</span><span class="cx"> 
</span><ins>+
</ins><span class="cx">     linkAllSlowCases(iter);
</span><del>-    GPRReg iteratorGPR = BaselineGetByIdRegisters::base;
</del><ins>+
+    GPRReg iteratorGPR = regT0;
</ins><span class="cx">     JumpList notObject;
</span><span class="cx">     notObject.append(branchIfNotCell(iteratorGPR));
</span><span class="cx">     notObject.append(branchIfNotObject(iteratorGPR));
</span><span class="lines">@@ -456,17 +432,19 @@
</span><span class="cx">     
</span><span class="cx">     Label coldPathBegin = label();
</span><span class="cx"> 
</span><del>-    move(iteratorGPR, argumentGPR2);
-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, argumentGPR1);
-    loadGlobalObject(argumentGPR0);
-    callOperationWithProfile<decltype(operationGetByIdOptimize)>(bytecode, Address(argumentGPR1, StructureStubInfo::offsetOfSlowOperation()), nextVReg, argumentGPR0, argumentGPR1, argumentGPR2, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
-    gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+    Call call;
+    if (JITCode::useDataIC(JITType::BaselineJIT)) {
+        gen.stubInfo()->m_slowOperation = operationGetByIdOptimize;
+        move(TrustedImmPtr(gen.stubInfo()), GPRInfo::nonArgGPR0);
+        callOperationWithProfile<decltype(operationGetByIdOptimize)>(bytecode.metadata(m_codeBlock), Address(GPRInfo::nonArgGPR0, StructureStubInfo::offsetOfSlowOperation()), nextVReg, TrustedImmPtr(m_codeBlock->globalObject()), GPRInfo::nonArgGPR0, iteratorGPR, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
+    } else
+        call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, nextVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), iteratorGPR, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
+    gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx"> 
</span><span class="cx">     auto done = jump();
</span><span class="cx"> 
</span><span class="cx">     notObject.link(this);
</span><del>-    loadGlobalObject(argumentGPR0);
-    callOperation(operationThrowIteratorResultIsNotObject, argumentGPR0);
</del><ins>+    callOperation(operationThrowIteratorResultIsNotObject, TrustedImmPtr(m_codeBlock->globalObject()));
</ins><span class="cx"> 
</span><span class="cx">     done.link(this);
</span><span class="cx"> }
</span><span class="lines">@@ -474,6 +452,7 @@
</span><span class="cx"> void JIT::emit_op_iterator_next(const Instruction* instruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = instruction->as<OpIteratorNext>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     auto* tryFastFunction = ([&] () {
</span><span class="cx">         switch (instruction->width()) {
</span><span class="cx">         case Narrow: return iterator_next_try_fast_narrow;
</span><span class="lines">@@ -491,79 +470,50 @@
</span><span class="cx">     Jump fastCase = branch32(NotEqual, GPRInfo::returnValueGPR2, TrustedImm32(static_cast<uint32_t>(IterationMode::Generic)));
</span><span class="cx"> 
</span><span class="cx">     genericCase.link(this);
</span><del>-    load8FromMetadata(bytecode, OpIteratorNext::Metadata::offsetOfIterationMetadata() + IterationModeMetadata::offsetOfSeenModes(), regT0);
-    or32(TrustedImm32(static_cast<uint8_t>(IterationMode::Generic)), regT0);
-    store8ToMetadata(regT0, bytecode, OpIteratorNext::Metadata::offsetOfIterationMetadata() + IterationModeMetadata::offsetOfSeenModes());
</del><ins>+    or8(TrustedImm32(static_cast<uint8_t>(IterationMode::Generic)), AbsoluteAddress(&metadata.m_iterationMetadata.seenModes));
</ins><span class="cx">     compileOpCall<OpIteratorNext>(instruction, m_callLinkInfoIndex++);
</span><span class="cx">     advanceToNextCheckpoint();
</span><span class="cx">     // call result ({ done, value } JSObject) in regT0
</span><span class="cx"> 
</span><del>-    constexpr GPRReg stubInfoGPR = BaselineGetByIdRegisters::stubInfo;
-    constexpr GPRReg iterCallResultGPR = BaselineGetByIdRegisters::dontClobberRegister;
-    move(returnValueGPR, iterCallResultGPR);
</del><ins>+    GPRReg valueGPR = regT0;
+    GPRReg iterResultGPR = regT2;
+    GPRReg doneGPR = regT1;
+    // iterResultGPR will get trashed by the first get by id below.
+    move(valueGPR, iterResultGPR);
</ins><span class="cx"> 
</span><del>-    constexpr GPRReg doneGPR = BaselineGetByIdRegisters::result;
</del><span class="cx">     {
</span><del>-        constexpr GPRReg baseGPR = BaselineGetByIdRegisters::base;
-        static_assert(returnValueGPR == baseGPR);
</del><ins>+        emitJumpSlowCaseIfNotJSCell(iterResultGPR);
</ins><span class="cx"> 
</span><del>-        emitJumpSlowCaseIfNotJSCell(baseGPR);
-
</del><span class="cx">         RegisterSet preservedRegs = RegisterSet::stubUnavailableRegisters();
</span><del>-        preservedRegs.add(iterCallResultGPR);
</del><ins>+        preservedRegs.add(valueGPR);
</ins><span class="cx">         JITGetByIdGenerator gen(
</span><del>-            nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), preservedRegs,
-            CacheableIdentifier::createFromImmortalIdentifier(vm().propertyNames->done.impl()), JSValueRegs(baseGPR), JSValueRegs(doneGPR), stubInfoGPR, AccessType::GetById);
-
-        UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-        stubInfo->accessType = AccessType::GetById;
-        stubInfo->bytecodeIndex = m_bytecodeIndex;
-        JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-        gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-        gen.m_unlinkedStubInfo = stubInfo;
-
-        gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-        resetSP(); // We might OSR exit here, so we need to conservatively reset SP
-        addSlowCase();
</del><ins>+            m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), preservedRegs,
+            CacheableIdentifier::createFromImmortalIdentifier(vm().propertyNames->done.impl()), JSValueRegs(iterResultGPR), JSValueRegs(doneGPR), regT3, AccessType::GetById);
+        gen.generateFastPath(*this);
+        addSlowCase(gen.slowPathJump());
</ins><span class="cx">         m_getByIds.append(gen);
</span><span class="cx"> 
</span><del>-        emitValueProfilingSite(bytecode, JSValueRegs { doneGPR });
</del><ins>+        emitValueProfilingSite(metadata, JSValueRegs { doneGPR });
</ins><span class="cx">         emitPutVirtualRegister(bytecode.m_done, doneGPR);
</span><span class="cx">         advanceToNextCheckpoint();
</span><span class="cx">     }
</span><span class="cx"> 
</span><ins>+
</ins><span class="cx">     {
</span><del>-        RegisterSet usedRegisters(doneGPR, iterCallResultGPR);
-        ScratchRegisterAllocator scratchAllocator(usedRegisters);
-        GPRReg scratch1 = scratchAllocator.allocateScratchGPR();
-        GPRReg scratch2 = scratchAllocator.allocateScratchGPR();
-        GPRReg globalGPR = scratchAllocator.allocateScratchGPR();
</del><ins>+        GPRReg scratch1 = regT2;
+        GPRReg scratch2 = regT3;
</ins><span class="cx">         const bool shouldCheckMasqueradesAsUndefined = false;
</span><del>-        loadGlobalObject(globalGPR);
-        JumpList iterationDone = branchIfTruthy(vm(), JSValueRegs(doneGPR), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, globalGPR);
</del><ins>+        JumpList iterationDone = branchIfTruthy(vm(), JSValueRegs(doneGPR), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
</ins><span class="cx"> 
</span><del>-        constexpr GPRReg baseGPR = BaselineGetByIdRegisters::base;
-        constexpr GPRReg resultGPR = BaselineGetByIdRegisters::result;
-        move(iterCallResultGPR, baseGPR);
-
</del><span class="cx">         JITGetByIdGenerator gen(
</span><del>-            nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), RegisterSet::stubUnavailableRegisters(),
-            CacheableIdentifier::createFromImmortalIdentifier(vm().propertyNames->value.impl()), JSValueRegs(baseGPR), JSValueRegs(resultGPR), stubInfoGPR, AccessType::GetById);
-
-        UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-        stubInfo->accessType = AccessType::GetById;
-        stubInfo->bytecodeIndex = m_bytecodeIndex;
-        JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-        gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-        gen.m_unlinkedStubInfo = stubInfo;
-
-        gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-        resetSP(); // We might OSR exit here, so we need to conservatively reset SP
-        addSlowCase();
</del><ins>+            m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), RegisterSet::stubUnavailableRegisters(),
+            CacheableIdentifier::createFromImmortalIdentifier(vm().propertyNames->value.impl()), JSValueRegs(valueGPR), JSValueRegs(valueGPR), regT4, AccessType::GetById);
+        gen.generateFastPath(*this);
+        addSlowCase(gen.slowPathJump());
</ins><span class="cx">         m_getByIds.append(gen);
</span><span class="cx"> 
</span><del>-        emitValueProfilingSite(bytecode, JSValueRegs { resultGPR });
-        emitPutVirtualRegister(bytecode.m_value, resultGPR);
</del><ins>+        emitValueProfilingSite(metadata, JSValueRegs { valueGPR });
+        emitPutVirtualRegister(bytecode.m_value, valueGPR);
</ins><span class="cx"> 
</span><span class="cx">         iterationDone.link(this);
</span><span class="cx">     }
</span><span class="lines">@@ -577,15 +527,14 @@
</span><span class="cx">     compileOpCallSlowCase<OpIteratorNext>(instruction, iter, m_callLinkInfoIndex++);
</span><span class="cx">     emitJumpSlowToHotForCheckpoint(jump());
</span><span class="cx"> 
</span><del>-    constexpr GPRReg iterCallResultGPR = BaselineGetByIdRegisters::dontClobberRegister;
-
</del><span class="cx">     auto bytecode = instruction->as<OpIteratorNext>();
</span><span class="cx">     {
</span><span class="cx">         VirtualRegister doneVReg = bytecode.m_done;
</span><ins>+        GPRReg iterResultGPR = regT2;
</ins><span class="cx"> 
</span><span class="cx">         linkAllSlowCases(iter);
</span><span class="cx">         JumpList notObject;
</span><del>-        notObject.append(branchIfNotCell(iterCallResultGPR));
</del><ins>+        notObject.append(branchIfNotCell(iterResultGPR));
</ins><span class="cx"> 
</span><span class="cx">         UniquedStringImpl* ident = vm().propertyNames->done.impl();
</span><span class="cx">         JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
</span><span class="lines">@@ -592,28 +541,29 @@
</span><span class="cx">         
</span><span class="cx">         Label coldPathBegin = label();
</span><span class="cx"> 
</span><del>-        notObject.append(branchIfNotObject(iterCallResultGPR));
</del><ins>+        notObject.append(branchIfNotObject(iterResultGPR));
</ins><span class="cx"> 
</span><del>-        move(iterCallResultGPR, argumentGPR2);
-        loadGlobalObject(argumentGPR0);
-        loadConstant(gen.m_unlinkedStubInfoConstantIndex, argumentGPR1);
-        callOperationWithProfile<decltype(operationGetByIdOptimize)>(bytecode, Address(argumentGPR1, StructureStubInfo::offsetOfSlowOperation()), doneVReg, argumentGPR0, argumentGPR1, argumentGPR2, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
</del><ins>+        Call call;
+        if (JITCode::useDataIC(JITType::BaselineJIT)) {
+            gen.stubInfo()->m_slowOperation = operationGetByIdOptimize;
+            move(TrustedImmPtr(gen.stubInfo()), GPRInfo::nonArgGPR0);
+            callOperationWithProfile<decltype(operationGetByIdOptimize)>(bytecode.metadata(m_codeBlock), Address(GPRInfo::nonArgGPR0, StructureStubInfo::offsetOfSlowOperation()), doneVReg, TrustedImmPtr(m_codeBlock->globalObject()), GPRInfo::nonArgGPR0, iterResultGPR, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
+        } else
+            call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, doneVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), iterResultGPR, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
+        gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx"> 
</span><del>-        gen.reportSlowPathCall(coldPathBegin, Call());
-
-        constexpr GPRReg doneGPR = BaselineGetByIdRegisters::result;
-        emitGetVirtualRegister(doneVReg, doneGPR);
-        emitGetVirtualRegister(bytecode.m_value, iterCallResultGPR);
</del><ins>+        emitGetVirtualRegister(doneVReg, regT1);
+        emitGetVirtualRegister(bytecode.m_value, regT0);
</ins><span class="cx">         emitJumpSlowToHotForCheckpoint(jump());
</span><span class="cx"> 
</span><span class="cx">         notObject.link(this);
</span><del>-        loadGlobalObject(argumentGPR0);
-        callOperation(operationThrowIteratorResultIsNotObject, argumentGPR0);
</del><ins>+        callOperation(operationThrowIteratorResultIsNotObject, TrustedImmPtr(m_codeBlock->globalObject()));
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     {   
</span><span class="cx">         linkAllSlowCases(iter);
</span><span class="cx">         VirtualRegister valueVReg = bytecode.m_value;
</span><ins>+        GPRReg iterResultGPR = regT0;
</ins><span class="cx"> 
</span><span class="cx">         UniquedStringImpl* ident = vm().propertyNames->value.impl();
</span><span class="cx">         JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
</span><span class="lines">@@ -620,12 +570,16 @@
</span><span class="cx"> 
</span><span class="cx">         Label coldPathBegin = label();
</span><span class="cx"> 
</span><del>-        move(iterCallResultGPR, argumentGPR2);
-        loadGlobalObject(argumentGPR0);
-        loadConstant(gen.m_unlinkedStubInfoConstantIndex, argumentGPR1);
-        callOperationWithProfile<decltype(operationGetByIdOptimize)>(bytecode, Address(argumentGPR1, StructureStubInfo::offsetOfSlowOperation()), valueVReg, argumentGPR0, argumentGPR1, argumentGPR2, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
-        gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+        Call call;
+        if (JITCode::useDataIC(JITType::BaselineJIT)) {
+            gen.stubInfo()->m_slowOperation = operationGetByIdOptimize;
+            move(TrustedImmPtr(gen.stubInfo()), GPRInfo::nonArgGPR0);
+            callOperationWithProfile<decltype(operationGetByIdOptimize)>(bytecode.metadata(m_codeBlock), Address(GPRInfo::nonArgGPR0, StructureStubInfo::offsetOfSlowOperation()), valueVReg, TrustedImmPtr(m_codeBlock->globalObject()), GPRInfo::nonArgGPR0, iterResultGPR, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
+        } else
+            call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, valueVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), iterResultGPR, CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
+        gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx">     }
</span><ins>+
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITCall32_64cpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITCall32_64.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITCall32_64.cpp 2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITCall32_64.cpp    2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -52,7 +52,7 @@
</span><span class="cx"> template<typename Op>
</span><span class="cx"> void JIT::emitPutCallResult(const Op& bytecode)
</span><span class="cx"> {
</span><del>-    emitValueProfilingSite(bytecode.metadata(m_profiledCodeBlock), JSValueRegs(regT1, regT0));
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), JSValueRegs(regT1, regT0));
</ins><span class="cx">     emitStore(destinationFor(bytecode, m_bytecodeIndex.checkpoint()).virtualRegister(), regT1, regT0);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -157,7 +157,7 @@
</span><span class="cx"> JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo*)
</span><span class="cx"> {
</span><span class="cx">     unsigned checkpoint = m_bytecodeIndex.checkpoint();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     int argCount = argumentCountIncludingThisFor(bytecode, checkpoint);
</span><span class="cx">     int registerOffset = -static_cast<int>(stackOffsetInRegistersForCall(bytecode, checkpoint));
</span><span class="cx"> 
</span><span class="lines">@@ -192,7 +192,7 @@
</span><span class="cx">         sizeOperation = operationSizeFrameForForwardArguments;
</span><span class="cx">     else
</span><span class="cx">         sizeOperation = operationSizeFrameForVarargs;
</span><del>-    callOperation(sizeOperation, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), -firstFreeRegister, firstVarArgOffset);
</del><ins>+    callOperation(sizeOperation, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), -firstFreeRegister, firstVarArgOffset);
</ins><span class="cx">     move(TrustedImm32(-firstFreeRegister), regT1);
</span><span class="cx">     emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
</span><span class="cx">     addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister);
</span><span class="lines">@@ -202,7 +202,7 @@
</span><span class="cx">         setupOperation = operationSetupForwardArgumentsFrame;
</span><span class="cx">     else
</span><span class="cx">         setupOperation = operationSetupVarargsFrame;
</span><del>-    callOperation(setupOperation, m_profiledCodeBlock->globalObject(), regT1, JSValueRegs(regT2, regT4), firstVarArgOffset, regT0);
</del><ins>+    callOperation(setupOperation, m_codeBlock->globalObject(), regT1, JSValueRegs(regT2, regT4), firstVarArgOffset, regT0);
</ins><span class="cx">     move(returnValueGPR, regT1);
</span><span class="cx"> 
</span><span class="cx">     // Profile the argument count.
</span><span class="lines">@@ -232,10 +232,10 @@
</span><span class="cx">     addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
</span><span class="cx">     storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
</span><span class="cx"> 
</span><del>-    addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</ins><span class="cx"> 
</span><span class="cx">     move(TrustedImm32(bytecode.m_ecmaMode.value()), regT2);
</span><del>-    callOperation(operationCallEval, m_profiledCodeBlock->globalObject(), regT1, regT2);
</del><ins>+    callOperation(operationCallEval, m_codeBlock->globalObject(), regT1, regT2);
</ins><span class="cx"> 
</span><span class="cx">     addSlowCase(branchIfEmpty(regT1));
</span><span class="cx"> 
</span><span class="lines">@@ -249,7 +249,7 @@
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = instruction->as<OpCallEval>();
</span><del>-    CallLinkInfo* info = m_profiledCodeBlock->addCallLinkInfo(CodeOrigin(m_bytecodeIndex));
</del><ins>+    CallLinkInfo* info = m_codeBlock->addCallLinkInfo(CodeOrigin(m_bytecodeIndex));
</ins><span class="cx">     info->setUpCall(CallLinkInfo::Call, regT0);
</span><span class="cx"> 
</span><span class="cx">     int registerOffset = -bytecode.m_argv;
</span><span class="lines">@@ -258,8 +258,8 @@
</span><span class="cx">     addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
</span><span class="cx"> 
</span><span class="cx">     emitLoad(callee, regT1, regT0);
</span><del>-    emitVirtualCall(*m_vm, m_profiledCodeBlock->globalObject(), info);
-    addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</del><ins>+    emitVirtualCall(*m_vm, m_codeBlock->globalObject(), info);
+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</ins><span class="cx">     checkStackPointerAlignment();
</span><span class="cx"> 
</span><span class="cx">     emitPutCallResult(bytecode);
</span><span class="lines">@@ -286,11 +286,11 @@
</span><span class="cx">     */
</span><span class="cx">     CallLinkInfo* info = nullptr;
</span><span class="cx">     if (opcodeID != op_call_eval)
</span><del>-        info = m_profiledCodeBlock->addCallLinkInfo(CodeOrigin(m_bytecodeIndex));
</del><ins>+        info = m_codeBlock->addCallLinkInfo(CodeOrigin(m_bytecodeIndex));
</ins><span class="cx">     compileSetupFrame(bytecode, info);
</span><span class="cx">     // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
</span><span class="cx">     
</span><del>-    auto bytecodeIndex = m_profiledCodeBlock->bytecodeIndex(instruction);
</del><ins>+    auto bytecodeIndex = m_codeBlock->bytecodeIndex(instruction);
</ins><span class="cx">     uint32_t locationBits = CallSiteIndex(bytecodeIndex).bits();
</span><span class="cx">     store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCountIncludingThis));
</span><span class="cx">     emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.
</span><span class="lines">@@ -324,7 +324,7 @@
</span><span class="cx">     addSlowCase(slowPaths);
</span><span class="cx">     m_callCompilationInfo[callLinkInfoIndex].doneLocation = label();
</span><span class="cx"> 
</span><del>-    addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</ins><span class="cx">     checkStackPointerAlignment();
</span><span class="cx"> 
</span><span class="cx">     emitPutCallResult(bytecode);
</span><span class="lines">@@ -341,11 +341,12 @@
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     linkAllSlowCases(iter);
</span><ins>+    m_callCompilationInfo[callLinkInfoIndex].slowPathStart = label();
</ins><span class="cx"> 
</span><span class="cx">     if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
</span><span class="cx">         emitRestoreCalleeSaves();
</span><span class="cx"> 
</span><del>-    move(TrustedImmPtr(m_profiledCodeBlock->globalObject()), regT3);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT3);
</ins><span class="cx">     m_callCompilationInfo[callLinkInfoIndex].callLinkInfo->emitSlowPath(*m_vm, *this);
</span><span class="cx"> 
</span><span class="cx">     if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
</span><span class="lines">@@ -353,7 +354,7 @@
</span><span class="cx">         return;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</ins><span class="cx">     checkStackPointerAlignment();
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = instruction->as<Op>();
</span><span class="lines">@@ -394,7 +395,7 @@
</span><span class="cx">     JSValueRegs nextRegs = JSValueRegs(tagNextGPR, payloadNextGPR);
</span><span class="cx"> 
</span><span class="cx">     JITGetByIdGenerator gen(
</span><del>-        m_profiledCodeBlock,
</del><ins>+        m_codeBlock,
</ins><span class="cx">         JITType::BaselineJIT,
</span><span class="cx">         CodeOrigin(m_bytecodeIndex),
</span><span class="cx">         CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())),
</span><span class="lines">@@ -409,7 +410,7 @@
</span><span class="cx">     addSlowCase(gen.slowPathJump());
</span><span class="cx">     m_getByIds.append(gen);
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode.metadata(m_profiledCodeBlock), nextRegs);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), nextRegs);
</ins><span class="cx">     emitPutVirtualRegister(bytecode.m_next, nextRegs);
</span><span class="cx"> 
</span><span class="cx">     fastCase.link(this);
</span><span class="lines">@@ -439,10 +440,10 @@
</span><span class="cx">     Label coldPathBegin = label();
</span><span class="cx"> 
</span><span class="cx">     Call call = callOperationWithProfile(
</span><del>-        bytecode.metadata(m_profiledCodeBlock), // metadata
</del><ins>+        bytecode.metadata(m_codeBlock), // metadata
</ins><span class="cx">         operationGetByIdOptimize, // operation
</span><span class="cx">         nextVReg, // result
</span><del>-        TrustedImmPtr(m_profiledCodeBlock->globalObject()), // arg1
</del><ins>+        TrustedImmPtr(m_codeBlock->globalObject()), // arg1
</ins><span class="cx">         gen.stubInfo(), // arg2
</span><span class="cx">         JSValueRegs(tagIteratorGPR, payloadIteratorGPR), // arg3
</span><span class="cx">         CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits()); // arg4
</span><span class="lines">@@ -451,7 +452,7 @@
</span><span class="cx">     auto done = jump();
</span><span class="cx"> 
</span><span class="cx">     notObject.link(this);
</span><del>-    callOperation(operationThrowIteratorResultIsNotObject, TrustedImmPtr(m_profiledCodeBlock->globalObject()));
</del><ins>+    callOperation(operationThrowIteratorResultIsNotObject, TrustedImmPtr(m_codeBlock->globalObject()));
</ins><span class="cx"> 
</span><span class="cx">     done.link(this);
</span><span class="cx"> }
</span><span class="lines">@@ -459,7 +460,7 @@
</span><span class="cx"> void JIT::emit_op_iterator_next(const Instruction* instruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = instruction->as<OpIteratorNext>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     auto* tryFastFunction = ([&] () {
</span><span class="cx">         switch (instruction->width()) {
</span><span class="cx">         case Narrow: return iterator_next_try_fast_narrow;
</span><span class="lines">@@ -505,7 +506,7 @@
</span><span class="cx">         preservedRegs.add(tagValueGPR);
</span><span class="cx">         preservedRegs.add(payloadValueGPR);
</span><span class="cx">         JITGetByIdGenerator gen(
</span><del>-            m_profiledCodeBlock,
</del><ins>+            m_codeBlock,
</ins><span class="cx">             JITType::BaselineJIT,
</span><span class="cx">             CodeOrigin(m_bytecodeIndex),
</span><span class="cx">             CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())),
</span><span class="lines">@@ -532,10 +533,10 @@
</span><span class="cx">         GPRReg scratch1 = regT6;
</span><span class="cx">         GPRReg scratch2 = regT7;
</span><span class="cx">         const bool shouldCheckMasqueradesAsUndefined = false;
</span><del>-        JumpList iterationDone = branchIfTruthy(vm(), JSValueRegs(tagDoneGPR, payloadDoneGPR), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_profiledCodeBlock->globalObject());
</del><ins>+        JumpList iterationDone = branchIfTruthy(vm(), JSValueRegs(tagDoneGPR, payloadDoneGPR), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
</ins><span class="cx"> 
</span><span class="cx">         JITGetByIdGenerator gen(
</span><del>-            m_profiledCodeBlock,
</del><ins>+            m_codeBlock,
</ins><span class="cx">             JITType::BaselineJIT,
</span><span class="cx">             CodeOrigin(m_bytecodeIndex),
</span><span class="cx">             CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())),
</span><span class="lines">@@ -587,10 +588,10 @@
</span><span class="cx">         Label coldPathBegin = label();
</span><span class="cx"> 
</span><span class="cx">         Call call = callOperationWithProfile(
</span><del>-            bytecode.metadata(m_profiledCodeBlock), // metadata
</del><ins>+            bytecode.metadata(m_codeBlock), // metadata
</ins><span class="cx">             operationGetByIdOptimize, // operation
</span><span class="cx">             doneVReg, // result
</span><del>-            TrustedImmPtr(m_profiledCodeBlock->globalObject()), // arg1
</del><ins>+            TrustedImmPtr(m_codeBlock->globalObject()), // arg1
</ins><span class="cx">             gen.stubInfo(), // arg2
</span><span class="cx">             JSValueRegs(tagIterResultGPR, payloadIterResultGPR), // arg3
</span><span class="cx">             CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits()); // arg4
</span><span class="lines">@@ -601,7 +602,7 @@
</span><span class="cx">         emitJumpSlowToHotForCheckpoint(jump());
</span><span class="cx"> 
</span><span class="cx">         notObject.link(this);
</span><del>-        callOperation(operationThrowIteratorResultIsNotObject, TrustedImmPtr(m_profiledCodeBlock->globalObject()));
</del><ins>+        callOperation(operationThrowIteratorResultIsNotObject, TrustedImmPtr(m_codeBlock->globalObject()));
</ins><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     {   
</span><span class="lines">@@ -617,10 +618,10 @@
</span><span class="cx">         Label coldPathBegin = label();
</span><span class="cx"> 
</span><span class="cx">         Call call = callOperationWithProfile(
</span><del>-            bytecode.metadata(m_profiledCodeBlock), // metadata
</del><ins>+            bytecode.metadata(m_codeBlock), // metadata
</ins><span class="cx">             operationGetByIdOptimize, // operation
</span><span class="cx">             valueVReg, // result
</span><del>-            TrustedImmPtr(m_profiledCodeBlock->globalObject()), // arg1
</del><ins>+            TrustedImmPtr(m_codeBlock->globalObject()), // arg1
</ins><span class="cx">             gen.stubInfo(), // arg2
</span><span class="cx">             JSValueRegs(tagIterResultGPR, payloadIterResultGPR), // arg3
</span><span class="cx">             CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits()); // arg4
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITCodeh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITCode.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITCode.h        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITCode.h   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -32,11 +32,8 @@
</span><span class="cx"> #include "MacroAssemblerCodeRef.h"
</span><span class="cx"> #include "RegisterSet.h"
</span><span class="cx"> 
</span><del>-
</del><span class="cx"> namespace JSC {
</span><span class="cx"> 
</span><del>-class PCToCodeOriginMap;
-
</del><span class="cx"> namespace DFG {
</span><span class="cx"> class CommonData;
</span><span class="cx"> class JITCode;
</span><span class="lines">@@ -162,10 +159,10 @@
</span><span class="cx"> 
</span><span class="cx">     static bool useDataIC(JITType jitType)
</span><span class="cx">     {
</span><ins>+        if (!Options::useDataIC())
+            return false;
</ins><span class="cx">         if (JITCode::isBaselineCode(jitType))
</span><span class="cx">             return true;
</span><del>-        if (!Options::useDataIC())
-            return false;
</del><span class="cx">         return Options::useDataICInOptimizingJIT();
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="lines">@@ -226,8 +223,6 @@
</span><span class="cx"> 
</span><span class="cx">     bool isShared() const { return m_shareAttribute == ShareAttribute::Shared; }
</span><span class="cx"> 
</span><del>-    virtual PCToCodeOriginMap* pcToCodeOriginMap() { return nullptr; }
-
</del><span class="cx"> private:
</span><span class="cx">     JITType m_jitType;
</span><span class="cx">     ShareAttribute m_shareAttribute;
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITCompilationKeycpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITCompilationKey.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITCompilationKey.cpp    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITCompilationKey.cpp       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -38,7 +38,7 @@
</span><span class="cx">         out.print("<empty>");
</span><span class="cx">         return;
</span><span class="cx">     }
</span><del>-    out.print("(Compile of ", RawPointer(m_codeBlock), " with ", m_mode, ")");
</del><ins>+    out.print("(Compile of ", *m_profiledBlock, " with ", m_mode, ")");
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITCompilationKeyh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITCompilationKey.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITCompilationKey.h      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITCompilationKey.h 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -30,24 +30,25 @@
</span><span class="cx"> 
</span><span class="cx"> namespace JSC {
</span><span class="cx"> 
</span><del>-class JSCell;
</del><ins>+class CodeBlock;
+class CodeBlockSet;
</ins><span class="cx"> 
</span><span class="cx"> class JITCompilationKey {
</span><span class="cx"> public:
</span><span class="cx">     JITCompilationKey()
</span><del>-        : m_codeBlock(nullptr)
</del><ins>+        : m_profiledBlock(nullptr)
</ins><span class="cx">         , m_mode(JITCompilationMode::InvalidCompilation)
</span><span class="cx">     {
</span><span class="cx">     }
</span><span class="cx">     
</span><span class="cx">     JITCompilationKey(WTF::HashTableDeletedValueType)
</span><del>-        : m_codeBlock(nullptr)
</del><ins>+        : m_profiledBlock(nullptr)
</ins><span class="cx">         , m_mode(JITCompilationMode::DFG)
</span><span class="cx">     {
</span><span class="cx">     }
</span><span class="cx">     
</span><del>-    JITCompilationKey(JSCell* profiledBlock, JITCompilationMode mode)
-        : m_codeBlock(profiledBlock)
</del><ins>+    JITCompilationKey(CodeBlock* profiledBlock, JITCompilationMode mode)
+        : m_profiledBlock(profiledBlock)
</ins><span class="cx">         , m_mode(mode)
</span><span class="cx">     {
</span><span class="cx">     }
</span><span class="lines">@@ -54,32 +55,32 @@
</span><span class="cx">     
</span><span class="cx">     bool operator!() const
</span><span class="cx">     {
</span><del>-        return !m_codeBlock && m_mode == JITCompilationMode::InvalidCompilation;
</del><ins>+        return !m_profiledBlock && m_mode == JITCompilationMode::InvalidCompilation;
</ins><span class="cx">     }
</span><span class="cx">     
</span><span class="cx">     bool isHashTableDeletedValue() const
</span><span class="cx">     {
</span><del>-        return !m_codeBlock && m_mode != JITCompilationMode::InvalidCompilation;
</del><ins>+        return !m_profiledBlock && m_mode != JITCompilationMode::InvalidCompilation;
</ins><span class="cx">     }
</span><span class="cx">     
</span><ins>+    CodeBlock* profiledBlock() const { return m_profiledBlock; }
</ins><span class="cx">     JITCompilationMode mode() const { return m_mode; }
</span><span class="cx">     
</span><span class="cx">     bool operator==(const JITCompilationKey& other) const
</span><span class="cx">     {
</span><del>-        return m_codeBlock == other.m_codeBlock
</del><ins>+        return m_profiledBlock == other.m_profiledBlock
</ins><span class="cx">             && m_mode == other.m_mode;
</span><span class="cx">     }
</span><span class="cx">     
</span><span class="cx">     unsigned hash() const
</span><span class="cx">     {
</span><del>-        return WTF::pairIntHash(WTF::PtrHash<JSCell*>::hash(m_codeBlock), static_cast<std::underlying_type<JITCompilationMode>::type>(m_mode));
</del><ins>+        return WTF::pairIntHash(WTF::PtrHash<CodeBlock*>::hash(m_profiledBlock), static_cast<std::underlying_type<JITCompilationMode>::type>(m_mode));
</ins><span class="cx">     }
</span><span class="cx">     
</span><span class="cx">     void dump(PrintStream&) const;
</span><span class="cx"> 
</span><span class="cx"> private:
</span><del>-    // Either CodeBlock* or UnlinkedCodeBlock* for basleline JIT.
-    JSCell* m_codeBlock;
</del><ins>+    CodeBlock* m_profiledBlock;
</ins><span class="cx">     JITCompilationMode m_mode;
</span><span class="cx"> };
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITInlineCacheGeneratorcpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -32,7 +32,6 @@
</span><span class="cx"> #include "CacheableIdentifierInlines.h"
</span><span class="cx"> #include "CodeBlock.h"
</span><span class="cx"> #include "InlineAccess.h"
</span><del>-#include "JIT.h"
</del><span class="cx"> #include "LinkBuffer.h"
</span><span class="cx"> #include "StructureStubInfo.h"
</span><span class="cx"> 
</span><span class="lines">@@ -47,9 +46,10 @@
</span><span class="cx"> JITInlineCacheGenerator::JITInlineCacheGenerator(
</span><span class="cx">     CodeBlock* codeBlock, JITType jitType, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType,
</span><span class="cx">     const RegisterSet& usedRegisters)
</span><del>-    : m_jitType(jitType)
</del><ins>+    : m_codeBlock(codeBlock)
+    , m_jitType(jitType)
</ins><span class="cx"> {
</span><del>-    m_stubInfo = codeBlock ? codeBlock->addStubInfo(accessType, codeOrigin) : garbageStubInfo();
</del><ins>+    m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo(accessType, codeOrigin) : garbageStubInfo();
</ins><span class="cx">     m_stubInfo->callSiteIndex = callSite;
</span><span class="cx"> 
</span><span class="cx">     m_stubInfo->usedRegisters = usedRegisters;
</span><span class="lines">@@ -59,6 +59,7 @@
</span><span class="cx">     LinkBuffer& fastPath, LinkBuffer& slowPath, CodeLocationLabel<JITStubRoutinePtrTag> start)
</span><span class="cx"> {
</span><span class="cx">     m_stubInfo->start = start;
</span><ins>+
</ins><span class="cx">     m_stubInfo->doneLocation = fastPath.locationOf<JSInternalPtrTag>(m_done);
</span><span class="cx"> 
</span><span class="cx">     if (!JITCode::useDataIC(m_jitType))
</span><span class="lines">@@ -66,15 +67,6 @@
</span><span class="cx">     m_stubInfo->slowPathStartLocation = slowPath.locationOf<JITStubRoutinePtrTag>(m_slowPathBegin);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JITInlineCacheGenerator::generateBaselineDataICFastPath(JIT& jit, unsigned stubInfo, GPRReg stubInfoGPR)
-{
-    m_start = jit.label();
-    RELEASE_ASSERT(JITCode::useDataIC(m_jitType));
-    jit.loadConstant(stubInfo, stubInfoGPR);
-    jit.farJump(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
-    m_done = jit.label();
-}
-
</del><span class="cx"> JITByIdGenerator::JITByIdGenerator(
</span><span class="cx">     CodeBlock* codeBlock, JITType jitType, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType,
</span><span class="cx">     const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value, GPRReg stubInfoGPR)
</span><span class="lines">@@ -118,7 +110,7 @@
</span><span class="cx">     CodeBlock* codeBlock, JITType jitType, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
</span><span class="cx">     CacheableIdentifier propertyName, JSValueRegs base, JSValueRegs value, GPRReg stubInfoGPR, AccessType accessType)
</span><span class="cx">     : JITByIdGenerator(codeBlock, jitType, codeOrigin, callSite, accessType, usedRegisters, base, value, stubInfoGPR)
</span><del>-    , m_isLengthAccess(codeBlock && propertyName.uid() == codeBlock->vm().propertyNames->length.impl())
</del><ins>+    , m_isLengthAccess(propertyName.uid() == codeBlock->vm().propertyNames->length.impl())
</ins><span class="cx"> {
</span><span class="cx">     RELEASE_ASSERT(base.payloadGPR() != value.tagGPR());
</span><span class="cx"> }
</span><span class="lines">@@ -128,37 +120,6 @@
</span><span class="cx">     generateFastCommon(jit, m_isLengthAccess ? InlineAccess::sizeForLengthAccess() : InlineAccess::sizeForPropertyAccess());
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-
-#if USE(JSVALUE64)
-static void generateGetByIdInlineAccess(JIT& jit, GPRReg stubInfoGPR, GPRReg base, GPRReg scratch, GPRReg result)
-{
-    CCallHelpers::JumpList done;
-
-    jit.load32(CCallHelpers::Address(base, JSCell::structureIDOffset()), scratch);
-    auto skipInlineAccess = jit.branch32(CCallHelpers::NotEqual, scratch, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfInlineAccessBaseStructure()));
-    jit.load32(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfByIdSelfOffset()), scratch);
-    jit.loadProperty(base, scratch, JSValueRegs { result });
-    auto finished = jit.jump();
-
-    skipInlineAccess.link(&jit);
-    jit.farJump(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
-
-    finished.link(&jit);
-}
-
-void JITGetByIdGenerator::generateBaselineDataICFastPath(JIT& jit, unsigned stubInfo, GPRReg stubInfoGPR)
-{
-    RELEASE_ASSERT(JITCode::useDataIC(m_jitType));
-
-    m_start = jit.label();
-
-    jit.loadConstant(stubInfo, stubInfoGPR);
-    generateGetByIdInlineAccess(jit, stubInfoGPR, BaselineGetByIdRegisters::base, BaselineGetByIdRegisters::scratch, BaselineGetByIdRegisters::result);
-
-    m_done = jit.label();
-}
-#endif
-
</del><span class="cx"> JITGetByIdWithThisGenerator::JITGetByIdWithThisGenerator(
</span><span class="cx">     CodeBlock* codeBlock, JITType jitType, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
</span><span class="cx">     CacheableIdentifier, JSValueRegs value, JSValueRegs base, JSValueRegs thisRegs, GPRReg stubInfoGPR)
</span><span class="lines">@@ -177,20 +138,6 @@
</span><span class="cx">     generateFastCommon(jit, InlineAccess::sizeForPropertyAccess());
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-#if USE(JSVALUE64)
-void JITGetByIdWithThisGenerator::generateBaselineDataICFastPath(JIT& jit, unsigned stubInfo, GPRReg stubInfoGPR)
-{
-    RELEASE_ASSERT(JITCode::useDataIC(m_jitType));
-
-    m_start = jit.label();
-
-    jit.loadConstant(stubInfo, stubInfoGPR);
-    generateGetByIdInlineAccess(jit, stubInfoGPR, BaselineGetByIdWithThisRegisters::base, BaselineGetByIdWithThisRegisters::scratch, BaselineGetByIdWithThisRegisters::result);
-
-    m_done = jit.label();
-}
-#endif
-
</del><span class="cx"> JITPutByIdGenerator::JITPutByIdGenerator(
</span><span class="cx">     CodeBlock* codeBlock, JITType jitType, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, CacheableIdentifier,
</span><span class="cx">     JSValueRegs base, JSValueRegs value, GPRReg stubInfoGPR, GPRReg scratch, 
</span><span class="lines">@@ -202,36 +149,6 @@
</span><span class="cx">     m_stubInfo->usedRegisters.clear(scratch);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-#if USE(JSVALUE64)
-void JITPutByIdGenerator::generateBaselineDataICFastPath(JIT& jit, unsigned stubInfo, GPRReg stubInfoGPR)
-{
-    RELEASE_ASSERT(JITCode::useDataIC(m_jitType));
-
-    m_start = jit.label();
-
-    jit.loadConstant(stubInfo, stubInfoGPR);
-
-    GPRReg base = BaselinePutByIdRegisters::base;
-    GPRReg scratch = BaselinePutByIdRegisters::scratch;
-    GPRReg scratch2 = BaselinePutByIdRegisters::scratch2;
-    GPRReg value = BaselinePutByIdRegisters::value;
-
-    CCallHelpers::JumpList done;
-
-    jit.load32(CCallHelpers::Address(base, JSCell::structureIDOffset()), scratch);
-    auto skipInlineAccess = jit.branch32(CCallHelpers::NotEqual, scratch, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfInlineAccessBaseStructure()));
-    jit.load32(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfByIdSelfOffset()), scratch);
-    jit.storeProperty(JSValueRegs { value }, base, scratch, scratch2);
-    auto finished = jit.jump();
-
-    skipInlineAccess.link(&jit);
-    jit.farJump(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
-
-    finished.link(&jit);
-    m_done = jit.label();
-}
-#endif
-
</del><span class="cx"> void JITPutByIdGenerator::generateFastPath(MacroAssembler& jit)
</span><span class="cx"> {
</span><span class="cx">     generateFastCommon(jit, InlineAccess::sizeForPropertyReplace());
</span><span class="lines">@@ -283,7 +200,7 @@
</span><span class="cx">     m_start = jit.label();
</span><span class="cx">     if (JITCode::useDataIC(m_jitType)) {
</span><span class="cx">         jit.move(CCallHelpers::TrustedImmPtr(m_stubInfo), m_stubInfo->m_stubInfoGPR);
</span><del>-        jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</del><ins>+        jit.call(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</ins><span class="cx">     } else
</span><span class="cx">         m_slowPathJump = jit.patchableJump();
</span><span class="cx">     m_done = jit.label();
</span><span class="lines">@@ -319,7 +236,7 @@
</span><span class="cx">     m_start = jit.label();
</span><span class="cx">     if (JITCode::useDataIC(m_jitType)) {
</span><span class="cx">         jit.move(CCallHelpers::TrustedImmPtr(m_stubInfo), m_stubInfo->m_stubInfoGPR);
</span><del>-        jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</del><ins>+        jit.call(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</ins><span class="cx">     } else
</span><span class="cx">         m_slowPathJump = jit.patchableJump();
</span><span class="cx">     m_done = jit.label();
</span><span class="lines">@@ -353,7 +270,7 @@
</span><span class="cx">     m_start = jit.label();
</span><span class="cx">     if (JITCode::useDataIC(m_jitType)) {
</span><span class="cx">         jit.move(CCallHelpers::TrustedImmPtr(m_stubInfo), m_stubInfo->m_stubInfoGPR);
</span><del>-        jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</del><ins>+        jit.call(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</ins><span class="cx">     } else
</span><span class="cx">         m_slowPathJump = jit.patchableJump();
</span><span class="cx">     m_done = jit.label();
</span><span class="lines">@@ -383,34 +300,6 @@
</span><span class="cx">     generateFastCommon(jit, InlineAccess::sizeForPropertyAccess());
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-#if USE(JSVALUE64)
-void JITInByIdGenerator::generateBaselineDataICFastPath(JIT& jit, unsigned stubInfo, GPRReg stubInfoGPR)
-{
-    RELEASE_ASSERT(JITCode::useDataIC(m_jitType));
-
-    m_start = jit.label();
-
-    jit.loadConstant(stubInfo, stubInfoGPR);
-
-    GPRReg base = BaselineInByIdRegisters::base;
-    GPRReg result = BaselineInByIdRegisters::result;
-    GPRReg scratch = BaselineInByIdRegisters::scratch;
-
-    CCallHelpers::JumpList done;
-
-    jit.load32(CCallHelpers::Address(base, JSCell::structureIDOffset()), scratch);
-    auto skipInlineAccess = jit.branch32(CCallHelpers::NotEqual, scratch, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfInlineAccessBaseStructure()));
-    jit.boxBoolean(true, JSValueRegs { result });
-    auto finished = jit.jump();
-
-    skipInlineAccess.link(&jit);
-    jit.farJump(CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
-
-    finished.link(&jit);
-    m_done = jit.label();
-}
-#endif
-
</del><span class="cx"> JITInstanceOfGenerator::JITInstanceOfGenerator(
</span><span class="cx">     CodeBlock* codeBlock, JITType jitType, CodeOrigin codeOrigin, CallSiteIndex callSiteIndex,
</span><span class="cx">     const RegisterSet& usedRegisters, GPRReg result, GPRReg value, GPRReg prototype, GPRReg stubInfoGPR,
</span><span class="lines">@@ -443,7 +332,7 @@
</span><span class="cx">     m_start = jit.label();
</span><span class="cx">     if (JITCode::useDataIC(m_jitType)) {
</span><span class="cx">         jit.move(CCallHelpers::TrustedImmPtr(m_stubInfo), m_stubInfo->m_stubInfoGPR);
</span><del>-        jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</del><ins>+        jit.call(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</ins><span class="cx">     } else
</span><span class="cx">         m_slowPathJump = jit.patchableJump();
</span><span class="cx">     m_done = jit.label();
</span><span class="lines">@@ -479,7 +368,7 @@
</span><span class="cx">     m_start = jit.label();
</span><span class="cx">     if (JITCode::useDataIC(m_jitType)) {
</span><span class="cx">         jit.move(CCallHelpers::TrustedImmPtr(m_stubInfo), m_stubInfo->m_stubInfoGPR);
</span><del>-        jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</del><ins>+        jit.call(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</ins><span class="cx">     } else
</span><span class="cx">         m_slowPathJump = jit.patchableJump();
</span><span class="cx">     m_done = jit.label();
</span><span class="lines">@@ -516,7 +405,7 @@
</span><span class="cx">     m_start = jit.label();
</span><span class="cx">     if (JITCode::useDataIC(m_jitType)) {
</span><span class="cx">         jit.move(CCallHelpers::TrustedImmPtr(m_stubInfo), m_stubInfo->m_stubInfoGPR);
</span><del>-        jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</del><ins>+        jit.call(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</ins><span class="cx">     } else
</span><span class="cx">         m_slowPathJump = jit.patchableJump();
</span><span class="cx">     m_done = jit.label();
</span><span class="lines">@@ -551,7 +440,7 @@
</span><span class="cx">     m_start = jit.label();
</span><span class="cx">     if (JITCode::useDataIC(m_jitType)) {
</span><span class="cx">         jit.move(CCallHelpers::TrustedImmPtr(m_stubInfo), m_stubInfo->m_stubInfoGPR);
</span><del>-        jit.farJump(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</del><ins>+        jit.call(CCallHelpers::Address(m_stubInfo->m_stubInfoGPR, StructureStubInfo::offsetOfCodePtr()), JITStubRoutinePtrTag);
</ins><span class="cx">     } else
</span><span class="cx">         m_slowPathJump = jit.patchableJump();
</span><span class="cx">     m_done = jit.label();
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITInlineCacheGeneratorh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -28,7 +28,6 @@
</span><span class="cx"> #if ENABLE(JIT)
</span><span class="cx"> 
</span><span class="cx"> #include "CodeOrigin.h"
</span><del>-#include "JITOperationValidation.h"
</del><span class="cx"> #include "JITOperations.h"
</span><span class="cx"> #include "JSCJSValue.h"
</span><span class="cx"> #include "PutKind.h"
</span><span class="lines">@@ -39,120 +38,11 @@
</span><span class="cx"> class CacheableIdentifier;
</span><span class="cx"> class CallSiteIndex;
</span><span class="cx"> class CodeBlock;
</span><del>-class JIT;
</del><span class="cx"> class StructureStubInfo;
</span><del>-struct UnlinkedStructureStubInfo;
</del><span class="cx"> 
</span><span class="cx"> enum class AccessType : int8_t;
</span><span class="cx"> enum class JITType : uint8_t;
</span><span class="cx"> 
</span><del>-#if USE(JSVALUE64)
-struct BaselineDelByValRegisters {
-    static constexpr GPRReg base = GPRInfo::regT1;
-    static constexpr GPRReg property = GPRInfo::regT0;
-    static constexpr GPRReg result = GPRInfo::regT0;
-    static constexpr GPRReg stubInfo = GPRInfo::regT3;
-    static constexpr GPRReg scratch = GPRInfo::regT2;
-};
-
-struct BaselineDelByIdRegisters {
-    static constexpr GPRReg base = GPRInfo::regT1;
-    static constexpr GPRReg result = GPRInfo::regT0;
-    static constexpr GPRReg stubInfo = GPRInfo::regT3;
-    static constexpr GPRReg scratch = GPRInfo::regT2;
-};
-
-struct BaselineGetByValRegisters {
-    static constexpr GPRReg base = GPRInfo::regT0;
-    static constexpr GPRReg property = GPRInfo::regT1;
-    static constexpr GPRReg result = GPRInfo::regT0;
-    static constexpr GPRReg stubInfo = GPRInfo::regT2;
-    static constexpr GPRReg scratch = GPRInfo::regT3;
-};
-
-struct BaselineEnumeratorGetByValRegisters {
-    static constexpr GPRReg base = GPRInfo::regT0;
-    static constexpr GPRReg property = GPRInfo::regT1;
-    static constexpr GPRReg result = GPRInfo::regT0;
-    static constexpr GPRReg stubInfo = GPRInfo::regT2;
-    // We rely on this when linking a CodeBlock and initializing registers for a GetByVal StubInfo.
-    static_assert(base == BaselineGetByValRegisters::base);
-    static_assert(property == BaselineGetByValRegisters::property);
-    static_assert(result == BaselineGetByValRegisters::result);
-    static_assert(stubInfo == BaselineGetByValRegisters::stubInfo);
-
-    static constexpr GPRReg scratch1 = GPRInfo::regT3;
-    static constexpr GPRReg scratch2 = GPRInfo::regT4;
-    static constexpr GPRReg scratch3 = GPRInfo::regT5;
-};
-
-struct BaselineInstanceofRegisters {
-    static constexpr GPRReg result = GPRInfo::regT0;
-    static constexpr GPRReg value = GPRInfo::argumentGPR2;
-    static constexpr GPRReg proto = GPRInfo::argumentGPR3;
-    static constexpr GPRReg stubInfo = GPRInfo::argumentGPR1;
-    static constexpr GPRReg scratch1 = GPRInfo::nonArgGPR0;
-    static constexpr GPRReg scratch2 = GPRInfo::nonArgGPR1;
-};
-
-struct BaselineInByValRegisters {
-    static constexpr GPRReg result = GPRInfo::regT0;
-    static constexpr GPRReg base = GPRInfo::regT0;
-    static constexpr GPRReg property = GPRInfo::regT1;
-    static_assert(base == BaselineGetByValRegisters::base);
-    static_assert(property == BaselineGetByValRegisters::property);
-    static constexpr GPRReg stubInfo = GPRInfo::regT2;
-    static constexpr GPRReg scratch = GPRInfo::regT3;
-};
-
-struct BaselineGetByIdRegisters {
-    static constexpr GPRReg result = GPRInfo::regT0;
-    static constexpr GPRReg base = GPRInfo::regT0;
-    static constexpr GPRReg stubInfo = GPRInfo::regT1;
-    static constexpr GPRReg scratch = GPRInfo::regT2;
-    static constexpr GPRReg dontClobberRegister = GPRInfo::regT3;
-};
-
-struct BaselineGetByIdWithThisRegisters {
-    static constexpr GPRReg result = GPRInfo::regT0;
-    static constexpr GPRReg base = GPRInfo::regT0;
-    static constexpr GPRReg thisValue = GPRInfo::regT1;
-    static constexpr GPRReg stubInfo = GPRInfo::regT2;
-    static constexpr GPRReg scratch = GPRInfo::regT3;
-};
-
-struct BaselineInByIdRegisters {
-    static constexpr GPRReg result = BaselineGetByIdRegisters::result;
-    static constexpr GPRReg base = BaselineGetByIdRegisters::base;
-    static constexpr GPRReg stubInfo = BaselineGetByIdRegisters::stubInfo;
-    static constexpr GPRReg scratch = BaselineGetByIdRegisters::scratch;
-};
-
-struct BaselinePutByIdRegisters {
-    static constexpr GPRReg base = GPRInfo::regT0;
-    static constexpr GPRReg value = GPRInfo::regT1;
-    static constexpr GPRReg stubInfo = GPRInfo::regT3;
-    static constexpr GPRReg scratch = GPRInfo::regT2;
-    static constexpr GPRReg scratch2 = GPRInfo::regT4;
-};
-
-struct BaselinePutByValRegisters {
-    static constexpr GPRReg base = GPRInfo::regT0;
-    static constexpr GPRReg property = GPRInfo::regT1;
-    static constexpr GPRReg value = GPRInfo::regT2;
-    static constexpr GPRReg profile = GPRInfo::regT3;
-    static constexpr GPRReg stubInfo = GPRInfo::regT4;
-};
-
-struct BaselinePrivateBrandRegisters {
-    static constexpr GPRReg base = GPRInfo::regT0;
-    static constexpr GPRReg brand = GPRInfo::regT1;
-    static_assert(base == BaselineGetByValRegisters::base);
-    static_assert(brand == BaselineGetByValRegisters::property);
-    static constexpr GPRReg stubInfo = GPRInfo::regT2;
-};
-#endif
-
</del><span class="cx"> class JITInlineCacheGenerator {
</span><span class="cx"> protected:
</span><span class="cx">     JITInlineCacheGenerator() { }
</span><span class="lines">@@ -172,18 +62,12 @@
</span><span class="cx">     void finalize(
</span><span class="cx">         LinkBuffer& fastPathLinkBuffer, LinkBuffer& slowPathLinkBuffer,
</span><span class="cx">         CodeLocationLabel<JITStubRoutinePtrTag> start);
</span><del>-
-    void generateBaselineDataICFastPath(JIT&, unsigned stubInfoConstant, GPRReg stubInfoGPR);
-
-    UnlinkedStructureStubInfo* m_unlinkedStubInfo { nullptr };
-    unsigned m_unlinkedStubInfoConstantIndex { std::numeric_limits<unsigned>::max() };
-
</del><ins>+    
</ins><span class="cx"> protected:
</span><ins>+    CodeBlock* m_codeBlock;
</ins><span class="cx">     JITType m_jitType;
</span><span class="cx">     StructureStubInfo* m_stubInfo;
</span><span class="cx"> 
</span><del>-public:
-    MacroAssembler::Label m_start;
</del><span class="cx">     MacroAssembler::Label m_done;
</span><span class="cx">     MacroAssembler::Label m_slowPathBegin;
</span><span class="cx">     MacroAssembler::Call m_slowPathCall;
</span><span class="lines">@@ -214,7 +98,7 @@
</span><span class="cx">     JSValueRegs m_base;
</span><span class="cx">     JSValueRegs m_value;
</span><span class="cx"> 
</span><del>-public:
</del><ins>+    MacroAssembler::Label m_start;
</ins><span class="cx">     MacroAssembler::Jump m_slowPathJump;
</span><span class="cx"> };
</span><span class="cx"> 
</span><span class="lines">@@ -227,7 +111,6 @@
</span><span class="cx">         JSValueRegs base, JSValueRegs value, GPRReg stubInfoGPR, AccessType);
</span><span class="cx">     
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><del>-    void generateBaselineDataICFastPath(JIT&, unsigned stubInfoConstant, GPRReg stubInfoGPR);
</del><span class="cx"> 
</span><span class="cx"> private:
</span><span class="cx">     bool m_isLengthAccess;
</span><span class="lines">@@ -241,7 +124,6 @@
</span><span class="cx">         CodeBlock*, JITType, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, CacheableIdentifier,
</span><span class="cx">         JSValueRegs value, JSValueRegs base, JSValueRegs thisRegs, GPRReg stubInfoGPR);
</span><span class="cx"> 
</span><del>-    void generateBaselineDataICFastPath(JIT&, unsigned stubInfoConstant, GPRReg stubInfoGPR);
</del><span class="cx">     void generateFastPath(MacroAssembler&);
</span><span class="cx"> };
</span><span class="cx"> 
</span><span class="lines">@@ -250,11 +132,10 @@
</span><span class="cx">     JITPutByIdGenerator() = default;
</span><span class="cx"> 
</span><span class="cx">     JITPutByIdGenerator(
</span><del>-        CodeBlock*, JITType, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, CacheableIdentifier,
-        JSValueRegs base, JSValueRegs value, GPRReg stubInfoGPR, GPRReg scratch, ECMAMode, PutKind);
</del><ins>+        CodeBlock*, JITType, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, CacheableIdentifier, JSValueRegs base,
+        JSValueRegs value, GPRReg stubInfoGPR, GPRReg scratch, ECMAMode, PutKind);
</ins><span class="cx">     
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><del>-    void generateBaselineDataICFastPath(JIT&, unsigned stubInfoConstant, GPRReg stubInfoGPR);
</del><span class="cx">     
</span><span class="cx">     V_JITOperation_GSsiJJC slowPathFunction();
</span><span class="cx"> 
</span><span class="lines">@@ -282,9 +163,11 @@
</span><span class="cx"> 
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><span class="cx"> 
</span><ins>+private:
</ins><span class="cx">     JSValueRegs m_base;
</span><span class="cx">     JSValueRegs m_value;
</span><span class="cx"> 
</span><ins>+    MacroAssembler::Label m_start;
</ins><span class="cx">     MacroAssembler::PatchableJump m_slowPathJump;
</span><span class="cx"> };
</span><span class="cx"> 
</span><span class="lines">@@ -308,6 +191,8 @@
</span><span class="cx"> 
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><span class="cx"> 
</span><ins>+private:
+    MacroAssembler::Label m_start;
</ins><span class="cx">     MacroAssembler::PatchableJump m_slowPathJump;
</span><span class="cx"> };
</span><span class="cx"> 
</span><span class="lines">@@ -331,6 +216,8 @@
</span><span class="cx"> 
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><span class="cx"> 
</span><ins>+private:
+    MacroAssembler::Label m_start;
</ins><span class="cx">     MacroAssembler::PatchableJump m_slowPathJump;
</span><span class="cx"> };
</span><span class="cx"> 
</span><span class="lines">@@ -354,6 +241,8 @@
</span><span class="cx"> 
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><span class="cx"> 
</span><ins>+private:
+    MacroAssembler::Label m_start;
</ins><span class="cx">     MacroAssembler::PatchableJump m_slowPathJump;
</span><span class="cx"> };
</span><span class="cx"> 
</span><span class="lines">@@ -366,7 +255,6 @@
</span><span class="cx">         JSValueRegs base, JSValueRegs value, GPRReg stubInfoGPR);
</span><span class="cx"> 
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><del>-    void generateBaselineDataICFastPath(JIT&, unsigned stubInfoConstant, GPRReg stubInfoGPR);
</del><span class="cx"> };
</span><span class="cx"> 
</span><span class="cx"> class JITInstanceOfGenerator final : public JITInlineCacheGenerator {
</span><span class="lines">@@ -389,6 +277,8 @@
</span><span class="cx"> 
</span><span class="cx">     void finalize(LinkBuffer& fastPathLinkBuffer, LinkBuffer& slowPathLinkBuffer);
</span><span class="cx"> 
</span><ins>+private:
+    MacroAssembler::Label m_start;
</ins><span class="cx">     MacroAssembler::PatchableJump m_slowPathJump;
</span><span class="cx"> };
</span><span class="cx"> 
</span><span class="lines">@@ -412,9 +302,11 @@
</span><span class="cx">     
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><span class="cx"> 
</span><ins>+private:
</ins><span class="cx">     JSValueRegs m_base;
</span><span class="cx">     JSValueRegs m_result;
</span><span class="cx"> 
</span><ins>+    MacroAssembler::Label m_start;
</ins><span class="cx">     MacroAssembler::PatchableJump m_slowPathJump;
</span><span class="cx"> };
</span><span class="cx"> 
</span><span class="lines">@@ -438,6 +330,8 @@
</span><span class="cx">     
</span><span class="cx">     void generateFastPath(MacroAssembler&);
</span><span class="cx"> 
</span><ins>+private:
+    MacroAssembler::Label m_start;
</ins><span class="cx">     MacroAssembler::PatchableJump m_slowPathJump;
</span><span class="cx"> };
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITInlinesh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITInlines.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITInlines.h     2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITInlines.h        2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -35,38 +35,13 @@
</span><span class="cx"> 
</span><span class="cx"> ALWAYS_INLINE bool JIT::isOperandConstantDouble(VirtualRegister src)
</span><span class="cx"> {
</span><del>-    if (!src.isConstant())
-        return false;
-    if (m_unlinkedCodeBlock->constantSourceCodeRepresentation(src) == SourceCodeRepresentation::LinkTimeConstant)
-        return false;
-    return getConstantOperand(src).isDouble();
</del><ins>+    return src.isConstant() && getConstantOperand(src).isDouble();
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-ALWAYS_INLINE bool JIT::isOperandConstantInt(VirtualRegister src)
-{
-    if (!src.isConstant())
-        return false;
-    if (m_unlinkedCodeBlock->constantSourceCodeRepresentation(src) == SourceCodeRepresentation::LinkTimeConstant)
-        return false;
-    return getConstantOperand(src).isInt32();
-}
-
-ALWAYS_INLINE bool JIT::isKnownCell(VirtualRegister src)
-{
-    if (!src.isConstant())
-        return false;
-    if (m_unlinkedCodeBlock->constantSourceCodeRepresentation(src) == SourceCodeRepresentation::LinkTimeConstant) {
-        // All link time constants are cells.
-        return true;
-    }
-    return getConstantOperand(src).isCell();
-}
-
</del><span class="cx"> ALWAYS_INLINE JSValue JIT::getConstantOperand(VirtualRegister src)
</span><span class="cx"> {
</span><span class="cx">     ASSERT(src.isConstant());
</span><del>-    RELEASE_ASSERT(m_unlinkedCodeBlock->constantSourceCodeRepresentation(src) != SourceCodeRepresentation::LinkTimeConstant);
-    return m_unlinkedCodeBlock->getConstant(src);
</del><ins>+    return m_codeBlock->getConstant(src);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, VirtualRegister entry)
</span><span class="lines">@@ -181,29 +156,29 @@
</span><span class="cx"> #endif
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-template<typename Bytecode>
-ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(const Bytecode& bytecode, const FunctionPtr<CFunctionPtrTag> function, VirtualRegister dst)
</del><ins>+template<typename Metadata>
+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata& metadata, const FunctionPtr<CFunctionPtrTag> function, VirtualRegister dst)
</ins><span class="cx"> {
</span><span class="cx">     MacroAssembler::Call call = appendCallWithExceptionCheck(function);
</span><span class="cx"> #if USE(JSVALUE64)
</span><del>-    emitValueProfilingSite(bytecode, returnValueGPR);
</del><ins>+    emitValueProfilingSite(metadata, returnValueGPR);
</ins><span class="cx">     emitPutVirtualRegister(dst, returnValueGPR);
</span><span class="cx"> #else
</span><del>-    emitValueProfilingSite(bytecode, JSValueRegs(returnValueGPR2, returnValueGPR));
</del><ins>+    emitValueProfilingSite(metadata, JSValueRegs(returnValueGPR2, returnValueGPR));
</ins><span class="cx">     emitStore(dst, returnValueGPR2, returnValueGPR);
</span><span class="cx"> #endif
</span><span class="cx">     return call;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-template<typename Bytecode>
-ALWAYS_INLINE void JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(const Bytecode& bytecode, Address function, VirtualRegister dst)
</del><ins>+template<typename Metadata>
+ALWAYS_INLINE void JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata& metadata, Address function, VirtualRegister dst)
</ins><span class="cx"> {
</span><span class="cx">     appendCallWithExceptionCheck(function);
</span><span class="cx"> #if USE(JSVALUE64)
</span><del>-    emitValueProfilingSite(bytecode, returnValueGPR);
</del><ins>+    emitValueProfilingSite(metadata, returnValueGPR);
</ins><span class="cx">     emitPutVirtualRegister(dst, returnValueGPR);
</span><span class="cx"> #else
</span><del>-    emitValueProfilingSite(bytecode, JSValueRegs(returnValueGPR2, returnValueGPR));
</del><ins>+    emitValueProfilingSite(metadata, JSValueRegs(returnValueGPR2, returnValueGPR));
</ins><span class="cx">     emitStore(dst, returnValueGPR2, returnValueGPR);
</span><span class="cx"> #endif
</span><span class="cx"> }
</span><span class="lines">@@ -210,7 +185,7 @@
</span><span class="cx"> 
</span><span class="cx"> ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, VirtualRegister reg)
</span><span class="cx"> {
</span><del>-    if (!isKnownCell(reg))
</del><ins>+    if (!m_codeBlock->isKnownCell(reg))
</ins><span class="cx">         linkSlowCase(iter);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -230,7 +205,7 @@
</span><span class="cx"> inline void JIT::advanceToNextCheckpoint()
</span><span class="cx"> {
</span><span class="cx">     ASSERT_WITH_MESSAGE(m_bytecodeIndex, "This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set");
</span><del>-    ASSERT(m_unlinkedCodeBlock->instructionAt(m_bytecodeIndex)->hasCheckpoints());
</del><ins>+    ASSERT(m_codeBlock->instructionAt(m_bytecodeIndex)->hasCheckpoints());
</ins><span class="cx">     m_bytecodeIndex = BytecodeIndex(m_bytecodeIndex.offset(), m_bytecodeIndex.checkpoint() + 1);
</span><span class="cx"> 
</span><span class="cx">     auto result = m_checkpointLabels.add(m_bytecodeIndex, label());
</span><span class="lines">@@ -240,7 +215,7 @@
</span><span class="cx"> inline void JIT::emitJumpSlowToHotForCheckpoint(Jump jump)
</span><span class="cx"> {
</span><span class="cx">     ASSERT_WITH_MESSAGE(m_bytecodeIndex, "This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set");
</span><del>-    ASSERT(m_unlinkedCodeBlock->instructionAt(m_bytecodeIndex)->hasCheckpoints());
</del><ins>+    ASSERT(m_codeBlock->instructionAt(m_bytecodeIndex)->hasCheckpoints());
</ins><span class="cx">     m_bytecodeIndex = BytecodeIndex(m_bytecodeIndex.offset(), m_bytecodeIndex.checkpoint() + 1);
</span><span class="cx"> 
</span><span class="cx">     auto iter = m_checkpointLabels.find(m_bytecodeIndex);
</span><span class="lines">@@ -318,29 +293,29 @@
</span><span class="cx"> 
</span><span class="cx"> ALWAYS_INLINE bool JIT::isOperandConstantChar(VirtualRegister src)
</span><span class="cx"> {
</span><del>-    if (!src.isConstant())
-        return false;
-    if (m_unlinkedCodeBlock->constantSourceCodeRepresentation(src) == SourceCodeRepresentation::LinkTimeConstant)
-        return false;
-    return getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
</del><ins>+    return src.isConstant() && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-#if USE(JSVALUE32_64)
</del><span class="cx"> inline void JIT::emitValueProfilingSite(ValueProfile& valueProfile, JSValueRegs value)
</span><span class="cx"> {
</span><span class="cx">     ASSERT(shouldEmitProfiling());
</span><span class="cx"> 
</span><ins>+    // We're in a simple configuration: only one bucket, so we can just do a direct
+    // store.
+#if USE(JSVALUE64)
+    store64(value.gpr(), valueProfile.m_buckets);
+#else
</ins><span class="cx">     EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile.m_buckets);
</span><span class="cx">     store32(value.payloadGPR(), &descriptor->asBits.payload);
</span><span class="cx">     store32(value.tagGPR(), &descriptor->asBits.tag);
</span><ins>+#endif
</ins><span class="cx"> }
</span><del>-#endif
</del><span class="cx"> 
</span><span class="cx"> template<typename Op>
</span><span class="cx"> inline std::enable_if_t<std::is_same<decltype(Op::Metadata::m_profile), ValueProfile>::value, void> JIT::emitValueProfilingSiteIfProfiledOpcode(Op bytecode)
</span><span class="cx"> {
</span><span class="cx"> #if USE(JSVALUE64)
</span><del>-    emitValueProfilingSite(bytecode, regT0);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), regT0);
</ins><span class="cx"> #else
</span><span class="cx">     emitValueProfilingSite(bytecode.metadata(m_codeBlock), JSValueRegs(regT1, regT0));
</span><span class="cx"> #endif
</span><span class="lines">@@ -348,37 +323,41 @@
</span><span class="cx"> 
</span><span class="cx"> inline void JIT::emitValueProfilingSiteIfProfiledOpcode(...) { }
</span><span class="cx"> 
</span><del>-#if USE(JSVALUE64)
-template<typename Bytecode>
-inline void JIT::emitValueProfilingSite(const Bytecode& bytecode, JSValueRegs value)
</del><ins>+template<typename Metadata>
+inline void JIT::emitValueProfilingSite(Metadata& metadata, JSValueRegs value)
</ins><span class="cx"> {
</span><span class="cx">     if (!shouldEmitProfiling())
</span><span class="cx">         return;
</span><ins>+    emitValueProfilingSite(valueProfileFor(metadata, m_bytecodeIndex.checkpoint()), value);
+}
</ins><span class="cx"> 
</span><del>-    ptrdiff_t offset = m_unlinkedCodeBlock->metadata().offsetInMetadataTable(bytecode) + valueProfileOffsetFor<Bytecode>(m_bytecodeIndex.checkpoint()) + ValueProfile::offsetOfFirstBucket();
-    store64(value.gpr(), Address(s_metadataGPR, offset));
</del><ins>+#if USE(JSVALUE64)
+inline void JIT::emitValueProfilingSite(ValueProfile& valueProfile, GPRReg resultReg)
+{
+    emitValueProfilingSite(valueProfile, JSValueRegs(resultReg));
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-template<typename Bytecode>
-inline void JIT::emitValueProfilingSite(const Bytecode& bytecode, GPRReg resultReg)
</del><ins>+template<typename Metadata>
+inline void JIT::emitValueProfilingSite(Metadata& metadata, GPRReg resultReg)
</ins><span class="cx"> {
</span><del>-    emitValueProfilingSite(bytecode, JSValueRegs(resultReg));
</del><ins>+    emitValueProfilingSite(metadata, JSValueRegs(resultReg));
</ins><span class="cx"> }
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><del>-template <typename Bytecode>
-inline void JIT::emitArrayProfilingSiteWithCell(const Bytecode& bytecode, ptrdiff_t offsetOfArrayProfile, RegisterID cellGPR, RegisterID scratchGPR)
</del><ins>+inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cellGPR, ArrayProfile* arrayProfile, RegisterID scratchGPR)
</ins><span class="cx"> {
</span><span class="cx">     if (shouldEmitProfiling()) {
</span><span class="cx">         load32(MacroAssembler::Address(cellGPR, JSCell::structureIDOffset()), scratchGPR);
</span><del>-        store32ToMetadata(scratchGPR, bytecode, offsetOfArrayProfile);
</del><ins>+        store32(scratchGPR, arrayProfile->addressOfLastSeenStructureID());
</ins><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-template <typename Bytecode>
-inline void JIT::emitArrayProfilingSiteWithCell(const Bytecode& bytecode, RegisterID cellGPR, RegisterID scratchGPR)
</del><ins>+inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cellGPR, RegisterID arrayProfileGPR, RegisterID scratchGPR)
</ins><span class="cx"> {
</span><del>-    emitArrayProfilingSiteWithCell(bytecode, Bytecode::Metadata::offsetOfArrayProfile() + ArrayProfile::offsetOfLastSeenStructureID(), cellGPR, scratchGPR);
</del><ins>+    if (shouldEmitProfiling()) {
+        load32(MacroAssembler::Address(cellGPR, JSCell::structureIDOffset()), scratchGPR);
+        store32(scratchGPR, Address(arrayProfileGPR, ArrayProfile::offsetOfLastSeenStructureID()));
+    }
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> ALWAYS_INLINE int32_t JIT::getOperandConstantInt(VirtualRegister src)
</span><span class="lines">@@ -401,7 +380,7 @@
</span><span class="cx"> inline void JIT::emitLoadDouble(VirtualRegister reg, FPRegisterID value)
</span><span class="cx"> {
</span><span class="cx">     if (reg.isConstant()) {
</span><del>-        WriteBarrier<Unknown>& inConstantPool = m_unlinkedCodeBlock->constantRegister(reg);
</del><ins>+        WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(reg);
</ins><span class="cx">         loadDouble(TrustedImmPtr(&inConstantPool), value);
</span><span class="cx">     } else
</span><span class="cx">         loadDouble(addressFor(reg), value);
</span><span class="lines">@@ -517,7 +496,7 @@
</span><span class="cx"> 
</span><span class="cx"> inline void JIT::emitJumpSlowCaseIfNotJSCell(VirtualRegister reg)
</span><span class="cx"> {
</span><del>-    if (!isKnownCell(reg)) {
</del><ins>+    if (!m_codeBlock->isKnownCell(reg)) {
</ins><span class="cx">         if (reg.isConstant())
</span><span class="cx">             addSlowCase(jump());
</span><span class="cx">         else
</span><span class="lines">@@ -527,7 +506,7 @@
</span><span class="cx"> 
</span><span class="cx"> inline void JIT::emitJumpSlowCaseIfNotJSCell(VirtualRegister reg, RegisterID tag)
</span><span class="cx"> {
</span><del>-    if (!isKnownCell(reg)) {
</del><ins>+    if (!m_codeBlock->isKnownCell(reg)) {
</ins><span class="cx">         if (reg.isConstant())
</span><span class="cx">             addSlowCase(jump());
</span><span class="cx">         else
</span><span class="lines">@@ -535,6 +514,11 @@
</span><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+ALWAYS_INLINE bool JIT::isOperandConstantInt(VirtualRegister src)
+{
+    return src.isConstant() && getConstantOperand(src).isInt32();
+}
+
</ins><span class="cx"> ALWAYS_INLINE bool JIT::getOperandConstantInt(VirtualRegister op1, VirtualRegister op2, VirtualRegister& op, int32_t& constant)
</span><span class="cx"> {
</span><span class="cx">     if (isOperandConstantInt(op1)) {
</span><span class="lines">@@ -560,11 +544,11 @@
</span><span class="cx">     ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
</span><span class="cx"> 
</span><span class="cx">     if (src.isConstant()) {
</span><del>-        if (m_profiledCodeBlock->isConstantOwnedByUnlinkedCodeBlock(src)) {
-            JSValue value = m_unlinkedCodeBlock->getConstant(src);
</del><ins>+        JSValue value = m_codeBlock->getConstant(src);
+        if (!value.isNumber())
+            move(TrustedImm64(JSValue::encode(value)), dst);
+        else
</ins><span class="cx">             move(Imm64(JSValue::encode(value)), dst);
</span><del>-        } else
-            loadCodeBlockConstant(src, dst);
</del><span class="cx">         return;
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="lines">@@ -582,6 +566,11 @@
</span><span class="cx">     emitGetVirtualRegister(src2, dst2);
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+ALWAYS_INLINE bool JIT::isOperandConstantInt(VirtualRegister src)
+{
+    return src.isConstant() && getConstantOperand(src).isInt32();
+}
+
</ins><span class="cx"> ALWAYS_INLINE void JIT::emitPutVirtualRegister(VirtualRegister dst, RegisterID from)
</span><span class="cx"> {
</span><span class="cx">     store64(from, addressFor(dst));
</span><span class="lines">@@ -606,7 +595,7 @@
</span><span class="cx"> 
</span><span class="cx"> ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, VirtualRegister vReg)
</span><span class="cx"> {
</span><del>-    if (!isKnownCell(vReg))
</del><ins>+    if (!m_codeBlock->isKnownCell(vReg))
</ins><span class="cx">         emitJumpSlowCaseIfNotJSCell(reg);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -648,9 +637,32 @@
</span><span class="cx"> {
</span><span class="cx">     if (target)
</span><span class="cx">         return target;
</span><del>-    return m_unlinkedCodeBlock->outOfLineJumpOffset(instruction);
</del><ins>+    return m_codeBlock->outOfLineJumpOffset(instruction);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><ins>+ALWAYS_INLINE GetPutInfo JIT::copiedGetPutInfo(OpPutToScope bytecode)
+{
+    unsigned key = bytecode.m_metadataID + 1; // HashMap doesn't like 0 as a key
+    auto iterator = m_copiedGetPutInfos.find(key);
+    if (iterator != m_copiedGetPutInfos.end())
+        return GetPutInfo(iterator->value);
+    GetPutInfo getPutInfo = bytecode.metadata(m_codeBlock).m_getPutInfo;
+    m_copiedGetPutInfos.add(key, getPutInfo.operand());
+    return getPutInfo;
+}
+
+template<typename BinaryOp>
+ALWAYS_INLINE BinaryArithProfile JIT::copiedArithProfile(BinaryOp bytecode)
+{
+    uint64_t key = (static_cast<uint64_t>(BinaryOp::opcodeID) + 1) << 32 | static_cast<uint64_t>(bytecode.m_metadataID);
+    auto iterator = m_copiedArithProfiles.find(key);
+    if (iterator != m_copiedArithProfiles.end())
+        return iterator->value;
+    BinaryArithProfile arithProfile = bytecode.metadata(m_codeBlock).m_arithProfile;
+    m_copiedArithProfiles.add(key, arithProfile);
+    return arithProfile;
+}
+
</ins><span class="cx"> template<typename Op>
</span><span class="cx"> ALWAYS_INLINE ECMAMode JIT::ecmaMode(Op op)
</span><span class="cx"> {
</span><span class="lines">@@ -669,60 +681,6 @@
</span><span class="cx">     return ECMAMode::strict();
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-template <typename Bytecode>
-ALWAYS_INLINE void JIT::loadPtrFromMetadata(const Bytecode& bytecode, size_t offset, GPRReg result)
-{
-    loadPtr(Address(s_metadataGPR, m_unlinkedCodeBlock->metadata().offsetInMetadataTable(bytecode) + offset), result);
-}
-
-template <typename Bytecode>
-ALWAYS_INLINE void JIT::load32FromMetadata(const Bytecode& bytecode, size_t offset, GPRReg result)
-{
-    load32(Address(s_metadataGPR, m_unlinkedCodeBlock->metadata().offsetInMetadataTable(bytecode) + offset), result);
-}
-
-template <typename Bytecode>
-ALWAYS_INLINE void JIT::load8FromMetadata(const Bytecode& bytecode, size_t offset, GPRReg result)
-{
-    load8(Address(s_metadataGPR, m_unlinkedCodeBlock->metadata().offsetInMetadataTable(bytecode) + offset), result);
-}
-
-template <typename ValueType, typename Bytecode>
-ALWAYS_INLINE void JIT::store8ToMetadata(ValueType value, const Bytecode& bytecode, size_t offset)
-{
-    store8(value, Address(s_metadataGPR, m_unlinkedCodeBlock->metadata().offsetInMetadataTable(bytecode) + offset));
-}
-
-template <typename Bytecode>
-ALWAYS_INLINE void JIT::store32ToMetadata(GPRReg value, const Bytecode& bytecode, size_t offset)
-{
-    store32(value, Address(s_metadataGPR, m_unlinkedCodeBlock->metadata().offsetInMetadataTable(bytecode) + offset));
-}
-
-template <typename Bytecode>
-ALWAYS_INLINE void JIT::materializePointerIntoMetadata(const Bytecode& bytecode, size_t offset, GPRReg result)
-{
-    addPtr(TrustedImm32(m_unlinkedCodeBlock->metadata().offsetInMetadataTable(bytecode) + offset), s_metadataGPR, result);
-}
-
-ALWAYS_INLINE void JIT::loadConstant(JITConstantPool::Constant constantIndex, GPRReg result)
-{
-    loadPtr(Address(s_constantsGPR, static_cast<uintptr_t>(constantIndex) * 8), result);
-}
-
-ALWAYS_INLINE void JIT::loadGlobalObject(GPRReg result)
-{
-    loadConstant(m_globalObjectConstant, result);
-}
-
-ALWAYS_INLINE void JIT::loadCodeBlockConstant(VirtualRegister constant, GPRReg result)
-{
-    RELEASE_ASSERT(constant.isConstant());
-    loadPtr(addressFor(CallFrameSlot::codeBlock), result);
-    loadPtr(Address(result, CodeBlock::offsetOfConstantsVectorBuffer()), result);
-    loadPtr(Address(result, constant.toConstantIndex() * sizeof(void*)), result);
-}
-
</del><span class="cx"> } // namespace JSC
</span><span class="cx"> 
</span><span class="cx"> #endif // ENABLE(JIT)
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITOpcodescpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITOpcodes.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITOpcodes.cpp   2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITOpcodes.cpp      2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -54,14 +54,11 @@
</span><span class="cx">     VirtualRegister src = bytecode.m_src;
</span><span class="cx"> 
</span><span class="cx">     if (src.isConstant()) {
</span><del>-        if (m_profiledCodeBlock->isConstantOwnedByUnlinkedCodeBlock(src)) {
-            JSValue value = m_unlinkedCodeBlock->getConstant(src);
</del><ins>+        JSValue value = m_codeBlock->getConstant(src);
+        if (!value.isNumber())
+            store64(TrustedImm64(JSValue::encode(value)), addressFor(dst));
+        else
</ins><span class="cx">             store64(Imm64(JSValue::encode(value)), addressFor(dst));
</span><del>-        } else {
-            loadCodeBlockConstant(src, regT0);
-            store64(regT0, addressFor(dst));
-        }
-
</del><span class="cx">         return;
</span><span class="cx">     }
</span><span class="cx"> 
</span><span class="lines">@@ -90,24 +87,26 @@
</span><span class="cx"> void JIT::emit_op_new_object(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpNewObject>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
+    Structure* structure = metadata.m_objectAllocationProfile.structure();
+    size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
+    Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(*m_vm, allocationSize, AllocatorForMode::AllocatorIfExists);
</ins><span class="cx"> 
</span><span class="cx">     RegisterID resultReg = regT0;
</span><span class="cx">     RegisterID allocatorReg = regT1;
</span><span class="cx">     RegisterID scratchReg = regT2;
</span><del>-    RegisterID structureReg = regT3;
</del><span class="cx"> 
</span><del>-    loadPtrFromMetadata(bytecode, OpNewObject::Metadata::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator(), allocatorReg);
-    loadPtrFromMetadata(bytecode, OpNewObject::Metadata::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure(), structureReg);
-
-    JumpList slowCases;
-    auto butterfly = TrustedImmPtr(nullptr);
-    emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, scratchReg, slowCases);
-    load8(Address(structureReg, Structure::inlineCapacityOffset()), scratchReg);
-    emitInitializeInlineStorage(resultReg, scratchReg);
-    mutatorFence(*m_vm);
-    emitPutVirtualRegister(bytecode.m_dst);
-
-    addSlowCase(slowCases);
</del><ins>+    if (!allocator)
+        addSlowCase(jump());
+    else {
+        JumpList slowCases;
+        auto butterfly = TrustedImmPtr(nullptr);
+        emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, scratchReg, slowCases);
+        emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
+        mutatorFence(*m_vm);
+        addSlowCase(slowCases);
+        emitPutVirtualRegister(bytecode.m_dst);
+    }
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
</span><span class="lines">@@ -114,11 +113,11 @@
</span><span class="cx"> {
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><del>-    RegisterID structureReg = regT3;
-
</del><span class="cx">     auto bytecode = currentInstruction->as<OpNewObject>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><del>-    callOperationNoExceptionCheck(operationNewObject, &vm(), structureReg);
</del><ins>+    Structure* structure = metadata.m_objectAllocationProfile.structure();
+    callOperationNoExceptionCheck(operationNewObject, &vm(), structure);
</ins><span class="cx">     emitStoreCell(dst, returnValueGPR);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -132,8 +131,7 @@
</span><span class="cx">     emitGetVirtualRegister(hasInstanceValue, regT0);
</span><span class="cx"> 
</span><span class="cx">     // We don't jump if we know what Symbol.hasInstance would do.
</span><del>-    loadGlobalObject(regT1);
-    Jump customhasInstanceValue = branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSGlobalObject, m_functionProtoHasInstanceSymbolFunction)));
</del><ins>+    Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
</ins><span class="cx"> 
</span><span class="cx">     emitGetVirtualRegister(constructor, regT0);
</span><span class="cx"> 
</span><span class="lines">@@ -156,38 +154,30 @@
</span><span class="cx">     VirtualRegister value = bytecode.m_value;
</span><span class="cx">     VirtualRegister proto = bytecode.m_prototype;
</span><span class="cx"> 
</span><del>-    constexpr GPRReg valueGPR = BaselineInstanceofRegisters::value;
-    constexpr GPRReg protoGPR = BaselineInstanceofRegisters::proto;
-    constexpr GPRReg resultGPR = BaselineInstanceofRegisters::result;
-    constexpr GPRReg stubInfoGPR = BaselineInstanceofRegisters::stubInfo;
-
-    emitGetVirtualRegister(value, valueGPR);
-    emitGetVirtualRegister(proto, protoGPR);
</del><ins>+    // Load the operands (baseVal, proto, and value respectively) into registers.
+    // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+    emitGetVirtualRegister(value, regT2);
+    emitGetVirtualRegister(proto, regT1);
</ins><span class="cx">     
</span><span class="cx">     // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
</span><del>-    emitJumpSlowCaseIfNotJSCell(valueGPR, value);
-    emitJumpSlowCaseIfNotJSCell(protoGPR, proto);
</del><ins>+    emitJumpSlowCaseIfNotJSCell(regT2, value);
+    emitJumpSlowCaseIfNotJSCell(regT1, proto);
</ins><span class="cx"> 
</span><span class="cx">     JITInstanceOfGenerator gen(
</span><del>-        nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex),
</del><ins>+        m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex),
</ins><span class="cx">         RegisterSet::stubUnavailableRegisters(),
</span><del>-        resultGPR,
-        valueGPR,
-        protoGPR,
-        stubInfoGPR,
-        BaselineInstanceofRegisters::scratch1, BaselineInstanceofRegisters::scratch2);
-
-    UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-    stubInfo->accessType = AccessType::InstanceOf;
-    stubInfo->bytecodeIndex = m_bytecodeIndex;
-    JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-    gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-    gen.m_unlinkedStubInfo = stubInfo;
-
-    gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-    addSlowCase();
</del><ins>+        regT0, // result
+        regT2, // value
+        regT1, // proto
+        regT5,
+        regT3, regT4); // scratch
+    gen.generateFastPath(*this);
+    if (!JITCode::useDataIC(JITType::BaselineJIT))
+        addSlowCase(gen.slowPathJump());
+    else
+        addSlowCase();
</ins><span class="cx">     m_instanceOfs.append(gen);
</span><del>-
</del><ins>+    
</ins><span class="cx">     emitPutVirtualRegister(dst);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -202,13 +192,14 @@
</span><span class="cx">     
</span><span class="cx">     Label coldPathBegin = label();
</span><span class="cx"> 
</span><del>-    static_assert(BaselineInstanceofRegisters::stubInfo == argumentGPR1);
-    static_assert(BaselineInstanceofRegisters::value == argumentGPR2);
-    static_assert(BaselineInstanceofRegisters::proto == argumentGPR3);
-    loadGlobalObject(argumentGPR0);
-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, argumentGPR1);
-    callOperation<decltype(operationInstanceOfOptimize)>(Address(argumentGPR1, StructureStubInfo::offsetOfSlowOperation()), resultVReg, argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3);
-    gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+    Call call;
+    if (JITCode::useDataIC(JITType::BaselineJIT)) {
+        gen.stubInfo()->m_slowOperation = operationInstanceOfOptimize;
+        move(TrustedImmPtr(gen.stubInfo()), GPRInfo::nonArgGPR0);
+        callOperation<decltype(operationInstanceOfOptimize)>(Address(GPRInfo::nonArgGPR0, StructureStubInfo::offsetOfSlowOperation()), resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), GPRInfo::nonArgGPR0, regT2, regT1);
+    } else
+        call = callOperation(operationInstanceOfOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT2, regT1);
+    gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_instanceof_custom(const Instruction*)
</span><span class="lines">@@ -249,7 +240,7 @@
</span><span class="cx"> 
</span><span class="cx">     isMasqueradesAsUndefined.link(this);
</span><span class="cx">     emitLoadStructure(vm(), regT0, regT1, regT2);
</span><del>-    loadGlobalObject(regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
</span><span class="cx">     comparePtr(Equal, regT0, regT1, regT0);
</span><span class="cx"> 
</span><span class="lines">@@ -392,9 +383,9 @@
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(EXTRA_CTI_THUNKS)
</span><del>-MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_ret_handlerGenerator(VM&)
</del><ins>+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_ret_handlerGenerator(VM& vm)
</ins><span class="cx"> {
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx">     jit.checkStackPointerAlignment();
</span><span class="cx">     jit.emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
</span><span class="lines">@@ -420,6 +411,7 @@
</span><span class="cx"> 
</span><span class="cx">     if (dst != src)
</span><span class="cx">         emitPutVirtualRegister(dst);
</span><ins>+
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_to_property_key(const Instruction* currentInstruction)
</span><span class="lines">@@ -442,10 +434,9 @@
</span><span class="cx"> void JIT::emit_op_set_function_name(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpSetFunctionName>();
</span><del>-    emitGetVirtualRegister(bytecode.m_function, argumentGPR1);
-    emitGetVirtualRegister(bytecode.m_name, argumentGPR2);
-    loadGlobalObject(argumentGPR0);
-    callOperation(operationSetFunctionName, argumentGPR0, argumentGPR1, argumentGPR2);
</del><ins>+    emitGetVirtualRegister(bytecode.m_function, regT0);
+    emitGetVirtualRegister(bytecode.m_name, regT1);
+    callOperation(operationSetFunctionName, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_not(const Instruction* currentInstruction)
</span><span class="lines">@@ -474,10 +465,8 @@
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx">     constexpr GPRReg scratch1 = regT1;
</span><span class="cx">     constexpr GPRReg scratch2 = regT2;
</span><del>-    constexpr GPRReg globalObjectGPR = regT3;
</del><span class="cx">     constexpr bool shouldCheckMasqueradesAsUndefined = true;
</span><del>-    loadGlobalObject(globalObjectGPR);
-    addJump(branchIfFalsey(vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, globalObjectGPR), target);
</del><ins>+    addJump(branchIfFalsey(vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
</ins><span class="cx"> #else
</span><span class="cx">     emitNakedNearCall(vm().getCTIStub(valueIsFalseyGenerator).retaggedCode<NoPtrTag>());
</span><span class="cx">     addJump(branchTest32(NonZero, regT0), target);
</span><span class="lines">@@ -491,7 +480,7 @@
</span><span class="cx">     // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
</span><span class="cx">     // DFG/FTL may inline functions belonging to other globalObjects, which may not match
</span><span class="cx">     // CallFrame::codeBlock().
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx">     constexpr GPRReg value = regT0;
</span><span class="cx">     constexpr GPRReg scratch1 = regT1;
</span><span class="lines">@@ -530,7 +519,7 @@
</span><span class="cx">     // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
</span><span class="cx">     Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
</span><span class="cx">     emitLoadStructure(vm(), regT0, regT2, regT1);
</span><del>-    loadGlobalObject(regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
</span><span class="cx">     Jump masqueradesGlobalObjectIsForeign = jump();
</span><span class="cx"> 
</span><span class="lines">@@ -554,7 +543,7 @@
</span><span class="cx">     // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
</span><span class="cx">     addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
</span><span class="cx">     emitLoadStructure(vm(), regT0, regT2, regT1);
</span><del>-    loadGlobalObject(regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
</span><span class="cx">     Jump wasNotImmediate = jump();
</span><span class="cx"> 
</span><span class="lines">@@ -593,13 +582,15 @@
</span><span class="cx"> void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpJneqPtr>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister src = bytecode.m_value;
</span><ins>+    JSValue specialPointer = getConstantOperand(bytecode.m_specialPointer);
+    ASSERT(specialPointer.isCell());
</ins><span class="cx">     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
</span><span class="cx">     
</span><span class="cx">     emitGetVirtualRegister(src, regT0);
</span><del>-    loadCodeBlockConstant(bytecode.m_specialPointer, regT1);
-    CCallHelpers::Jump equal = branchPtr(Equal, regT0, regT1);
-    store8ToMetadata(TrustedImm32(1), bytecode, OpJneqPtr::Metadata::offsetOfHasJumped());
</del><ins>+    CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(specialPointer.asCell()));
+    store8(TrustedImm32(1), &metadata.m_hasJumped);
</ins><span class="cx">     addJump(jump(), target);
</span><span class="cx">     equal.link(this);
</span><span class="cx"> }
</span><span class="lines">@@ -634,10 +625,8 @@
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx">     constexpr GPRReg scratch1 = regT1;
</span><span class="cx">     constexpr GPRReg scratch2 = regT2;
</span><del>-    constexpr GPRReg globalObjectGPR = regT3;
</del><span class="cx">     constexpr bool shouldCheckMasqueradesAsUndefined = true;
</span><del>-    loadGlobalObject(globalObjectGPR);
-    addJump(branchIfTruthy(vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, globalObjectGPR), target);
</del><ins>+    addJump(branchIfTruthy(vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
</ins><span class="cx"> #else
</span><span class="cx">     emitNakedNearCall(vm().getCTIStub(valueIsTruthyGenerator).retaggedCode<NoPtrTag>());
</span><span class="cx">     addJump(branchTest32(NonZero, regT0), target);
</span><span class="lines">@@ -651,7 +640,7 @@
</span><span class="cx">     // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
</span><span class="cx">     // DFG/FTL may inline functions belonging to other globalObjects, which may not match
</span><span class="cx">     // CallFrame::codeBlock().
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx">     constexpr GPRReg value = regT0;
</span><span class="cx">     constexpr GPRReg scratch1 = regT1;
</span><span class="lines">@@ -706,8 +695,7 @@
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx">     copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_value, regT0);
</span><del>-    loadGlobalObject(regT1);
-    callOperationNoExceptionCheck(operationThrow, regT1, regT0);
</del><ins>+    callOperationNoExceptionCheck(operationThrow, TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     jumpToExceptionHandler(vm());
</span><span class="cx"> #else
</span><span class="cx">     constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
</span><span class="lines">@@ -723,7 +711,7 @@
</span><span class="cx"> #if ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_throw_handlerGenerator(VM& vm)
</span><span class="cx"> {
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx">     constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
</span><span class="cx">     constexpr GPRReg thrownValueGPR = argumentGPR1;
</span><span class="lines">@@ -732,7 +720,7 @@
</span><span class="cx"> 
</span><span class="cx"> #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
</span><span class="cx">     jit.loadPtr(&vm.topEntryFrame, argumentGPR0);
</span><del>-    jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(argumentGPR0);
</del><ins>+    jit.copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(argumentGPR0);
</ins><span class="cx"> #endif
</span><span class="cx"> 
</span><span class="cx">     constexpr GPRReg globalObjectGPR = argumentGPR0;
</span><span class="lines">@@ -920,8 +908,7 @@
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpJstricteq>();
</span><span class="cx">     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operationCompareStrictEq, regT2, regT0, regT1);
</del><ins>+    callOperation(operationCompareStrictEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -931,8 +918,7 @@
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpJnstricteq>();
</span><span class="cx">     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operationCompareStrictEq, regT2, regT0, regT1);
</del><ins>+    callOperation(operationCompareStrictEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -945,7 +931,7 @@
</span><span class="cx">     
</span><span class="cx">     addSlowCase(branchIfNotNumber(regT0));
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode, regT0);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), regT0);
</ins><span class="cx">     if (srcVReg != dstVReg)
</span><span class="cx">         emitPutVirtualRegister(dstVReg);
</span><span class="cx"> }
</span><span class="lines">@@ -965,7 +951,7 @@
</span><span class="cx">     addSlowCase(branchIfNotNumber(regT0));
</span><span class="cx">     isBigInt.link(this);
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode, regT0);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), regT0);
</ins><span class="cx">     if (srcVReg != dstVReg)
</span><span class="cx">         emitPutVirtualRegister(dstVReg);
</span><span class="cx"> }
</span><span class="lines">@@ -992,7 +978,7 @@
</span><span class="cx">     addSlowCase(branchIfNotCell(regT0));
</span><span class="cx">     addSlowCase(branchIfNotObject(regT0));
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode, regT0);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), regT0);
</ins><span class="cx">     if (srcVReg != dstVReg)
</span><span class="cx">         emitPutVirtualRegister(dstVReg);
</span><span class="cx"> }
</span><span class="lines">@@ -1007,19 +993,8 @@
</span><span class="cx">     load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
</span><span class="cx">     storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
</span><span class="cx"> 
</span><del>-    addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</ins><span class="cx"> 
</span><del>-    // When the LLInt throws an exception, there is a chance that we've already tiered up
-    // the same CodeBlock to baseline, and we'll catch the exception in the baseline JIT (because
-    // we updated the exception handlers to point here). Because the LLInt uses a different value
-    // inside s_constantsGPR, the callee saves we restore above may not contain the correct register.
-    // So we replenish it here.
-    {
-        loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
-        loadPtr(Address(regT0, CodeBlock::offsetOfJITData()), regT0);
-        loadPtr(Address(regT0, CodeBlock::JITData::offsetOfJITConstantPool()), s_constantsGPR);
-    }
-
</del><span class="cx">     callOperationNoExceptionCheck(operationRetrieveAndClearExceptionIfCatchable, &vm());
</span><span class="cx">     Jump isCatchableException = branchTest32(NonZero, returnValueGPR);
</span><span class="cx">     jumpToExceptionHandler(vm());
</span><span class="lines">@@ -1036,11 +1011,23 @@
</span><span class="cx">     // argument type proofs, storing locals to the buffer, etc
</span><span class="cx">     // https://bugs.webkit.org/show_bug.cgi?id=175598
</span><span class="cx"> 
</span><del>-    callOperationNoExceptionCheck(operationTryOSREnterAtCatchAndValueProfile, &vm(), m_bytecodeIndex.asBits());
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
+    ValueProfileAndVirtualRegisterBuffer* buffer = metadata.m_buffer;
+    if (buffer || !shouldEmitProfiling())
+        callOperationNoExceptionCheck(operationTryOSREnterAtCatch, &vm(), m_bytecodeIndex.asBits());
+    else
+        callOperationNoExceptionCheck(operationTryOSREnterAtCatchAndValueProfile, &vm(), m_bytecodeIndex.asBits());
</ins><span class="cx">     auto skipOSREntry = branchTestPtr(Zero, returnValueGPR);
</span><span class="cx">     emitRestoreCalleeSaves();
</span><span class="cx">     farJump(returnValueGPR, ExceptionHandlerPtrTag);
</span><span class="cx">     skipOSREntry.link(this);
</span><ins>+    if (buffer && shouldEmitProfiling()) {
+        buffer->forEach([&] (ValueProfileAndVirtualRegister& profile) {
+            JSValueRegs regs(regT0);
+            emitGetVirtualRegister(profile.m_operand, regs);
+            emitValueProfilingSite(static_cast<ValueProfile&>(profile), regs);
+        });
+    }
</ins><span class="cx"> #endif // ENABLE(DFG_JIT)
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1066,8 +1053,8 @@
</span><span class="cx">     VirtualRegister scrutinee = bytecode.m_scrutinee;
</span><span class="cx"> 
</span><span class="cx">     // create jump table for switch destinations, track this switch statement.
</span><del>-    const UnlinkedSimpleJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedSwitchJumpTable(tableIndex);
-    SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex];
</del><ins>+    const UnlinkedSimpleJumpTable& unlinkedTable = m_codeBlock->unlinkedSwitchJumpTable(tableIndex);
+    SimpleJumpTable& linkedTable = m_codeBlock->switchJumpTable(tableIndex);
</ins><span class="cx">     m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
</span><span class="cx">     linkedTable.ensureCTITable(unlinkedTable);
</span><span class="cx"> 
</span><span class="lines">@@ -1094,14 +1081,13 @@
</span><span class="cx">     VirtualRegister scrutinee = bytecode.m_scrutinee;
</span><span class="cx"> 
</span><span class="cx">     // create jump table for switch destinations, track this switch statement.
</span><del>-    const UnlinkedSimpleJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedSwitchJumpTable(tableIndex);
-    SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex];
</del><ins>+    const UnlinkedSimpleJumpTable& unlinkedTable = m_codeBlock->unlinkedSwitchJumpTable(tableIndex);
+    SimpleJumpTable& linkedTable = m_codeBlock->switchJumpTable(tableIndex);
</ins><span class="cx">     m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
</span><span class="cx">     linkedTable.ensureCTITable(unlinkedTable);
</span><span class="cx"> 
</span><del>-    emitGetVirtualRegister(scrutinee, argumentGPR1);
-    loadGlobalObject(argumentGPR0);
-    callOperation(operationSwitchCharWithUnknownKeyType, argumentGPR0, argumentGPR1, tableIndex, unlinkedTable.m_min);
</del><ins>+    emitGetVirtualRegister(scrutinee, regT0);
+    callOperation(operationSwitchCharWithUnknownKeyType, TrustedImmPtr(m_codeBlock->globalObject()), regT0, tableIndex, unlinkedTable.m_min);
</ins><span class="cx">     farJump(returnValueGPR, JSSwitchPtrTag);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1113,14 +1099,13 @@
</span><span class="cx">     VirtualRegister scrutinee = bytecode.m_scrutinee;
</span><span class="cx"> 
</span><span class="cx">     // create jump table for switch destinations, track this switch statement.
</span><del>-    const UnlinkedStringJumpTable& unlinkedTable = m_unlinkedCodeBlock->unlinkedStringSwitchJumpTable(tableIndex);
-    StringJumpTable& linkedTable = m_stringSwitchJumpTables[tableIndex];
</del><ins>+    const UnlinkedStringJumpTable& unlinkedTable = m_codeBlock->unlinkedStringSwitchJumpTable(tableIndex);
+    StringJumpTable& linkedTable = m_codeBlock->stringSwitchJumpTable(tableIndex);
</ins><span class="cx">     m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::String));
</span><span class="cx">     linkedTable.ensureCTITable(unlinkedTable);
</span><span class="cx"> 
</span><del>-    emitGetVirtualRegister(scrutinee, argumentGPR1);
-    loadGlobalObject(argumentGPR0);
-    callOperation(operationSwitchStringWithUnknownKeyType, argumentGPR0, argumentGPR1, tableIndex);
</del><ins>+    emitGetVirtualRegister(scrutinee, regT0);
+    callOperation(operationSwitchStringWithUnknownKeyType, TrustedImmPtr(m_codeBlock->globalObject()), regT0, tableIndex);
</ins><span class="cx">     farJump(returnValueGPR, JSSwitchPtrTag);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1127,8 +1112,7 @@
</span><span class="cx"> void JIT::emit_op_debug(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpDebug>();
</span><del>-    loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
-    load32(Address(regT0, CodeBlock::offsetOfDebuggerRequests()), regT0);
</del><ins>+    load32(codeBlock()->debuggerRequestsAddress(), regT0);
</ins><span class="cx">     Jump noDebuggerRequests = branchTest32(Zero, regT0);
</span><span class="cx">     callOperation(operationDebug, &vm(), static_cast<int>(bytecode.m_debugHookType));
</span><span class="cx">     noDebuggerRequests.link(this);
</span><span class="lines">@@ -1149,7 +1133,7 @@
</span><span class="cx"> 
</span><span class="cx">     isMasqueradesAsUndefined.link(this);
</span><span class="cx">     emitLoadStructure(vm(), regT0, regT2, regT1);
</span><del>-    loadGlobalObject(regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
</span><span class="cx">     comparePtr(Equal, regT0, regT2, regT0);
</span><span class="cx">     Jump wasNotImmediate = jump();
</span><span class="lines">@@ -1164,6 +1148,7 @@
</span><span class="cx"> 
</span><span class="cx">     boxBoolean(regT0, JSValueRegs { regT0 });
</span><span class="cx">     emitPutVirtualRegister(dst);
</span><ins>+
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_neq_null(const Instruction* currentInstruction)
</span><span class="lines">@@ -1181,7 +1166,7 @@
</span><span class="cx"> 
</span><span class="cx">     isMasqueradesAsUndefined.link(this);
</span><span class="cx">     emitLoadStructure(vm(), regT0, regT2, regT1);
</span><del>-    loadGlobalObject(regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
</span><span class="cx">     comparePtr(NotEqual, regT0, regT2, regT0);
</span><span class="cx">     Jump wasNotImmediate = jump();
</span><span class="lines">@@ -1203,14 +1188,12 @@
</span><span class="cx">     // Even though CTI doesn't use them, we initialize our constant
</span><span class="cx">     // registers to zap stale pointers, to avoid unnecessarily prolonging
</span><span class="cx">     // object lifetime and increasing GC pressure.
</span><del>-    size_t count = m_unlinkedCodeBlock->numVars();
</del><ins>+    size_t count = m_codeBlock->numVars();
</ins><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx">     for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j)
</span><span class="cx">         emitInitRegister(virtualRegisterForLocal(j));
</span><span class="cx"> 
</span><del>-    
-    loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
-    emitWriteBarrier(regT0);
</del><ins>+    emitWriteBarrier(m_codeBlock);
</ins><span class="cx"> 
</span><span class="cx">     emitEnterOptimizationCheck();
</span><span class="cx"> #else
</span><span class="lines">@@ -1229,7 +1212,7 @@
</span><span class="cx"> #if ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_handlerGenerator(VM& vm)
</span><span class="cx"> {
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx"> #if CPU(X86_64)
</span><span class="cx">     jit.push(X86Registers::ebp);
</span><span class="lines">@@ -1327,12 +1310,14 @@
</span><span class="cx"> void JIT::emit_op_to_this(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpToThis>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
+    StructureID* cachedStructureID = &metadata.m_cachedStructureID;
</ins><span class="cx">     emitGetVirtualRegister(bytecode.m_srcDst, regT1);
</span><span class="cx"> 
</span><span class="cx">     emitJumpSlowCaseIfNotJSCell(regT1);
</span><span class="cx"> 
</span><span class="cx">     addSlowCase(branchIfNotType(regT1, FinalObjectType));
</span><del>-    load32FromMetadata(bytecode, OpToThis::Metadata::offsetOfCachedStructureID(), regT2);
</del><ins>+    load32(cachedStructureID, regT2);
</ins><span class="cx">     addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2));
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1339,7 +1324,9 @@
</span><span class="cx"> void JIT::emit_op_create_this(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpCreateThis>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister callee = bytecode.m_callee;
</span><ins>+    WriteBarrierBase<JSCell>* cachedFunction = &metadata.m_cachedCallee;
</ins><span class="cx">     RegisterID calleeReg = regT0;
</span><span class="cx">     RegisterID rareDataReg = regT4;
</span><span class="cx">     RegisterID resultReg = regT0;
</span><span class="lines">@@ -1355,7 +1342,7 @@
</span><span class="cx">     loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfAllocator() - JSFunction::rareDataTag), allocatorReg);
</span><span class="cx">     loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfStructure() - JSFunction::rareDataTag), structureReg);
</span><span class="cx"> 
</span><del>-    loadPtrFromMetadata(bytecode, OpCreateThis::Metadata::offsetOfCachedCallee(), cachedFunctionReg);
</del><ins>+    loadPtr(cachedFunction, cachedFunctionReg);
</ins><span class="cx">     Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
</span><span class="cx">     addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
</span><span class="cx">     hasSeenMultipleCallees.link(this);
</span><span class="lines">@@ -1385,8 +1372,7 @@
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpEq>();
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operationCompareEq, regT2, regT0, regT1);
</del><ins>+    callOperation(operationCompareEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">     boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
</span><span class="cx">     emitPutVirtualRegister(bytecode.m_dst, returnValueGPR);
</span><span class="cx"> }
</span><span class="lines">@@ -1396,8 +1382,7 @@
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpNeq>();
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operationCompareEq, regT2, regT0, regT1);
</del><ins>+    callOperation(operationCompareEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">     xor32(TrustedImm32(0x1), regT0);
</span><span class="cx">     boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
</span><span class="cx">     emitPutVirtualRegister(bytecode.m_dst, returnValueGPR);
</span><span class="lines">@@ -1409,8 +1394,7 @@
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpJeq>();
</span><span class="cx">     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operationCompareEq, regT2, regT0, regT1);
</del><ins>+    callOperation(operationCompareEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1420,8 +1404,7 @@
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpJneq>();
</span><span class="cx">     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operationCompareEq, regT2, regT0, regT1);
</del><ins>+    callOperation(operationCompareEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1435,11 +1418,10 @@
</span><span class="cx">     VirtualRegister constructor = bytecode.m_constructor;
</span><span class="cx">     VirtualRegister hasInstanceValue = bytecode.m_hasInstanceValue;
</span><span class="cx"> 
</span><del>-    emitGetVirtualRegister(value, GPRInfo::argumentGPR1);
-    emitGetVirtualRegister(constructor, GPRInfo::argumentGPR2);
-    emitGetVirtualRegister(hasInstanceValue, GPRInfo::argumentGPR3);
-    loadGlobalObject(GPRInfo::argumentGPR0);
-    callOperation(operationInstanceOfCustom, GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
</del><ins>+    emitGetVirtualRegister(value, regT0);
+    emitGetVirtualRegister(constructor, regT1);
+    emitGetVirtualRegister(hasInstanceValue, regT2);
+    callOperation(operationInstanceOfCustom, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, regT2);
</ins><span class="cx">     boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
</span><span class="cx">     emitPutVirtualRegister(dst, returnValueGPR);
</span><span class="cx"> }
</span><span class="lines">@@ -1448,7 +1430,7 @@
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_loop_hint(const Instruction* instruction)
</span><span class="cx"> {
</span><del>-    if (UNLIKELY(Options::returnEarlyFromInfiniteLoopsForFuzzing() && m_unlinkedCodeBlock->loopHintsAreEligibleForFuzzingEarlyReturn())) {
</del><ins>+    if (UNLIKELY(Options::returnEarlyFromInfiniteLoopsForFuzzing() && m_codeBlock->loopHintsAreEligibleForFuzzingEarlyReturn())) {
</ins><span class="cx">         uintptr_t* ptr = vm().getLoopHintExecutionCounter(instruction);
</span><span class="cx">         loadPtr(ptr, regT0);
</span><span class="cx">         auto skipEarlyReturn = branchPtr(Below, regT0, TrustedImmPtr(Options::earlyReturnFromInfiniteLoopsLimit()));
</span><span class="lines">@@ -1458,7 +1440,7 @@
</span><span class="cx"> #else
</span><span class="cx">         JSValueRegs resultRegs(GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR);
</span><span class="cx"> #endif
</span><del>-        loadGlobalObject(resultRegs.gpr());
</del><ins>+        moveValue(m_codeBlock->globalObject(), resultRegs);
</ins><span class="cx">         checkStackPointerAlignment();
</span><span class="cx">         emitRestoreCalleeSaves();
</span><span class="cx">         emitFunctionEpilogue();
</span><span class="lines">@@ -1471,9 +1453,8 @@
</span><span class="cx"> 
</span><span class="cx">     // Emit the JIT optimization check: 
</span><span class="cx">     if (canBeOptimized()) {
</span><del>-        loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
</del><span class="cx">         addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
</span><del>-            Address(regT0, CodeBlock::offsetOfJITExecuteCounter())));
</del><ins>+            AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
</ins><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1528,8 +1509,7 @@
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><del>-    loadGlobalObject(argumentGPR0);
-    callOperation(operationHandleTraps, argumentGPR0);
</del><ins>+    callOperation(operationHandleTraps, TrustedImmPtr(m_codeBlock->globalObject()));
</ins><span class="cx"> #else
</span><span class="cx">     constexpr GPRReg bytecodeOffsetGPR = argumentGPR3;
</span><span class="cx">     uint32_t bytecodeOffset = m_bytecodeIndex.offset();
</span><span class="lines">@@ -1542,7 +1522,7 @@
</span><span class="cx"> #if ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_check_traps_handlerGenerator(VM& vm)
</span><span class="cx"> {
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx"> #if CPU(X86_64)
</span><span class="cx">     jit.push(X86Registers::ebp);
</span><span class="lines">@@ -1584,9 +1564,7 @@
</span><span class="cx">     auto bytecode = currentInstruction->as<OpNewRegexp>();
</span><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx">     VirtualRegister regexp = bytecode.m_regexp;
</span><del>-    GPRReg globalGPR = argumentGPR0;
-    loadGlobalObject(globalGPR);
-    callOperation(operationNewRegexp, globalGPR, jsCast<RegExp*>(m_unlinkedCodeBlock->getConstant(regexp)));
</del><ins>+    callOperation(operationNewRegexp, TrustedImmPtr(m_codeBlock->globalObject()), jsCast<RegExp*>(m_codeBlock->getConstant(regexp)));
</ins><span class="cx">     emitStoreCell(dst, returnValueGPR);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1593,27 +1571,27 @@
</span><span class="cx"> template<typename Op>
</span><span class="cx"> void JIT::emitNewFuncCommon(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><ins>+    Jump lazyJump;
</ins><span class="cx">     auto bytecode = currentInstruction->as<Op>();
</span><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx"> 
</span><span class="cx"> #if USE(JSVALUE64)
</span><del>-    emitGetVirtualRegister(bytecode.m_scope, argumentGPR1);
</del><ins>+    emitGetVirtualRegister(bytecode.m_scope, regT0);
</ins><span class="cx"> #else
</span><del>-    emitLoadPayload(bytecode.m_scope, argumentGPR1);
</del><ins>+    emitLoadPayload(bytecode.m_scope, regT0);
</ins><span class="cx"> #endif
</span><del>-    auto constant = m_constantPool.add(JITConstantPool::Type::FunctionDecl, bitwise_cast<void*>(static_cast<uintptr_t>(bytecode.m_functionDecl)));
-    loadConstant(constant, argumentGPR2);
</del><ins>+    FunctionExecutable* funcExec = m_codeBlock->functionDecl(bytecode.m_functionDecl);
</ins><span class="cx"> 
</span><span class="cx">     OpcodeID opcodeID = Op::opcodeID;
</span><span class="cx">     if (opcodeID == op_new_func)
</span><del>-        callOperation(operationNewFunction, dst, &vm(), argumentGPR1, argumentGPR2);
</del><ins>+        callOperation(operationNewFunction, dst, &vm(), regT0, funcExec);
</ins><span class="cx">     else if (opcodeID == op_new_generator_func)
</span><del>-        callOperation(operationNewGeneratorFunction, dst, &vm(), argumentGPR1, argumentGPR2);
</del><ins>+        callOperation(operationNewGeneratorFunction, dst, &vm(), regT0, funcExec);
</ins><span class="cx">     else if (opcodeID == op_new_async_func)
</span><del>-        callOperation(operationNewAsyncFunction, dst, &vm(), argumentGPR1, argumentGPR2);
</del><ins>+        callOperation(operationNewAsyncFunction, dst, &vm(), regT0, funcExec);
</ins><span class="cx">     else {
</span><span class="cx">         ASSERT(opcodeID == op_new_async_generator_func);
</span><del>-        callOperation(operationNewAsyncGeneratorFunction, dst, &vm(), argumentGPR1, argumentGPR2);
</del><ins>+        callOperation(operationNewAsyncGeneratorFunction, dst, &vm(), regT0, funcExec);
</ins><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1643,24 +1621,23 @@
</span><span class="cx">     auto bytecode = currentInstruction->as<Op>();
</span><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx"> #if USE(JSVALUE64)
</span><del>-    emitGetVirtualRegister(bytecode.m_scope, argumentGPR1);
</del><ins>+    emitGetVirtualRegister(bytecode.m_scope, regT0);
</ins><span class="cx"> #else
</span><del>-    emitLoadPayload(bytecode.m_scope, argumentGPR1);
</del><ins>+    emitLoadPayload(bytecode.m_scope, regT0);
</ins><span class="cx"> #endif
</span><span class="cx"> 
</span><del>-    auto constant = m_constantPool.add(JITConstantPool::Type::FunctionExpr, bitwise_cast<void*>(static_cast<uintptr_t>(bytecode.m_functionDecl)));
-    loadConstant(constant, argumentGPR2);
</del><ins>+    FunctionExecutable* function = m_codeBlock->functionExpr(bytecode.m_functionDecl);
</ins><span class="cx">     OpcodeID opcodeID = Op::opcodeID;
</span><span class="cx"> 
</span><span class="cx">     if (opcodeID == op_new_func_exp)
</span><del>-        callOperation(operationNewFunction, dst, &vm(), argumentGPR1, argumentGPR2);
</del><ins>+        callOperation(operationNewFunction, dst, &vm(), regT0, function);
</ins><span class="cx">     else if (opcodeID == op_new_generator_func_exp)
</span><del>-        callOperation(operationNewGeneratorFunction, dst, &vm(), argumentGPR1, argumentGPR2);
</del><ins>+        callOperation(operationNewGeneratorFunction, dst, &vm(), regT0, function);
</ins><span class="cx">     else if (opcodeID == op_new_async_func_exp)
</span><del>-        callOperation(operationNewAsyncFunction, dst, &vm(), argumentGPR1, argumentGPR2);
</del><ins>+        callOperation(operationNewAsyncFunction, dst, &vm(), regT0, function);
</ins><span class="cx">     else {
</span><span class="cx">         ASSERT(opcodeID == op_new_async_generator_func_exp);
</span><del>-        callOperation(operationNewAsyncGeneratorFunction, dst, &vm(), argumentGPR1, argumentGPR2);
</del><ins>+        callOperation(operationNewAsyncGeneratorFunction, dst, &vm(), regT0, function);
</ins><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1687,30 +1664,29 @@
</span><span class="cx"> void JIT::emit_op_new_array(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpNewArray>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx">     VirtualRegister valuesStart = bytecode.m_argv;
</span><span class="cx">     int size = bytecode.m_argc;
</span><del>-    addPtr(TrustedImm32(valuesStart.offset() * sizeof(Register)), callFrameRegister, argumentGPR2);
-    materializePointerIntoMetadata(bytecode, OpNewArray::Metadata::offsetOfArrayAllocationProfile(), argumentGPR1);
-    loadGlobalObject(argumentGPR0);
-    callOperation(operationNewArrayWithProfile, dst, argumentGPR0, argumentGPR1, argumentGPR2, size);
</del><ins>+    addPtr(TrustedImm32(valuesStart.offset() * sizeof(Register)), callFrameRegister, regT0);
+    callOperation(operationNewArrayWithProfile, dst, TrustedImmPtr(m_codeBlock->globalObject()),
+        &metadata.m_arrayAllocationProfile, regT0, size);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_new_array_with_size(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx">     VirtualRegister sizeIndex = bytecode.m_length;
</span><span class="cx"> #if USE(JSVALUE64)
</span><del>-    materializePointerIntoMetadata(bytecode, OpNewArrayWithSize::Metadata::offsetOfArrayAllocationProfile(), argumentGPR1);
-    emitGetVirtualRegister(sizeIndex, argumentGPR2);
-    loadGlobalObject(argumentGPR0);
-    callOperation(operationNewArrayWithSizeAndProfile, dst, argumentGPR0, argumentGPR1, argumentGPR2);
</del><ins>+    emitGetVirtualRegister(sizeIndex, regT0);
+    callOperation(operationNewArrayWithSizeAndProfile, dst, TrustedImmPtr(m_codeBlock->globalObject()),
+        &metadata.m_arrayAllocationProfile, regT0);
</ins><span class="cx"> #else
</span><del>-    materializePointerIntoMetadata(bytecode, OpNewArrayWithSize::Metadata::offsetOfArrayAllocationProfile(), regT2);
</del><span class="cx">     emitLoad(sizeIndex, regT1, regT0);
</span><del>-    loadGlobalObject(regT3);
-    callOperation(operationNewArrayWithSizeAndProfile, dst, regT3, regT2, JSValueRegs(regT1, regT0));
</del><ins>+    callOperation(operationNewArrayWithSizeAndProfile, dst, TrustedImmPtr(m_codeBlock->globalObject()),
+        &metadata.m_arrayAllocationProfile, JSValueRegs(regT1, regT0));
</ins><span class="cx"> #endif
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1718,10 +1694,8 @@
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_profile_type(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><del>-    m_isShareable = false;
-
</del><span class="cx">     auto bytecode = currentInstruction->as<OpProfileType>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     TypeLocation* cachedTypeLocation = metadata.m_typeLocation;
</span><span class="cx">     VirtualRegister valueToProfile = bytecode.m_targetVirtualRegister;
</span><span class="cx"> 
</span><span class="lines">@@ -1803,15 +1777,12 @@
</span><span class="cx">     static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
</span><span class="cx">     auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
</span><span class="cx">     GPRReg shadowPacketReg = regT0;
</span><del>-    {
-        GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
-        GPRReg scratch2Reg = regT2;
-        ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
-    }
</del><ins>+    GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
+    GPRReg scratch2Reg = regT2;
+    ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
</ins><span class="cx">     emitGetVirtualRegister(bytecode.m_thisValue, regT2);
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_scope, regT3);
</span><del>-    loadPtr(addressFor(CallFrameSlot::codeBlock), regT1);
-    logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, regT1, CallSiteIndex(m_bytecodeIndex));
</del><ins>+    logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeIndex));
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> #endif // USE(JSVALUE64)
</span><span class="lines">@@ -1818,10 +1789,8 @@
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_profile_control_flow(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><del>-    m_isShareable = false;
-
</del><span class="cx">     auto bytecode = currentInstruction->as<OpProfileControlFlow>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     BasicBlockLocation* basicBlockLocation = metadata.m_basicBlockLocation;
</span><span class="cx"> #if USE(JSVALUE64)
</span><span class="cx">     basicBlockLocation->emitExecuteCode(*this);
</span><span class="lines">@@ -1891,7 +1860,7 @@
</span><span class="cx">     moveValue(jsUndefined(), resultRegs);
</span><span class="cx"> 
</span><span class="cx">     done.link(this);
</span><del>-    emitValueProfilingSite(bytecode, resultRegs);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), resultRegs);
</ins><span class="cx">     emitPutVirtualRegister(dst, resultRegs);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1917,7 +1886,7 @@
</span><span class="cx">     emitLoadPrototype(vm(), valueRegs.payloadGPR(), resultRegs, scratchGPR, slowCases);
</span><span class="cx">     addSlowCase(slowCases);
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode, resultRegs);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), resultRegs);
</ins><span class="cx">     emitPutVirtualRegister(bytecode.m_dst, resultRegs);
</span><span class="cx"> }
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITOpcodes32_64cpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -84,7 +84,7 @@
</span><span class="cx"> void JIT::emit_op_new_object(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpNewObject>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     Structure* structure = metadata.m_objectAllocationProfile.structure();
</span><span class="cx">     size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
</span><span class="cx">     Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(*m_vm, allocationSize, AllocatorForMode::AllocatorIfExists);
</span><span class="lines">@@ -110,7 +110,7 @@
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpNewObject>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx">     Structure* structure = metadata.m_objectAllocationProfile.structure();
</span><span class="cx">     callOperationNoExceptionCheck(operationNewObject, &vm(), structure);
</span><span class="lines">@@ -127,7 +127,7 @@
</span><span class="cx">     emitLoadPayload(hasInstanceValue, regT0);
</span><span class="cx">     // We don't jump if we know what Symbol.hasInstance would do.
</span><span class="cx">     Jump hasInstanceValueNotCell = emitJumpIfNotJSCell(hasInstanceValue);
</span><del>-    Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_profiledCodeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
</del><ins>+    Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
</ins><span class="cx"> 
</span><span class="cx">     // We know that constructor is an object from the way bytecode is emitted for instanceof expressions.
</span><span class="cx">     emitLoadPayload(constructor, regT0);
</span><span class="lines">@@ -162,7 +162,7 @@
</span><span class="cx">     emitJumpSlowCaseIfNotJSCell(proto);
</span><span class="cx">     
</span><span class="cx">     JITInstanceOfGenerator gen(
</span><del>-        m_profiledCodeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex),
</del><ins>+        m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex),
</ins><span class="cx">         RegisterSet::stubUnavailableRegisters(),
</span><span class="cx">         regT0, // result
</span><span class="cx">         regT2, // value
</span><span class="lines">@@ -196,7 +196,7 @@
</span><span class="cx">     Label coldPathBegin = label();
</span><span class="cx">     emitLoadTag(value, regT0);
</span><span class="cx">     emitLoadTag(proto, regT3);
</span><del>-    Call call = callOperation(operationInstanceOfOptimize, dst, m_profiledCodeBlock->globalObject(), gen.stubInfo(), JSValueRegs(regT0, regT2), JSValueRegs(regT3, regT1));
</del><ins>+    Call call = callOperation(operationInstanceOfOptimize, dst, m_codeBlock->globalObject(), gen.stubInfo(), JSValueRegs(regT0, regT2), JSValueRegs(regT3, regT1));
</ins><span class="cx">     gen.reportSlowPathCall(coldPathBegin, call);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -213,7 +213,7 @@
</span><span class="cx">     emitLoad(value, regT1, regT0);
</span><span class="cx">     emitLoadPayload(constructor, regT2);
</span><span class="cx">     emitLoad(hasInstanceValue, regT4, regT3);
</span><del>-    callOperation(operationInstanceOfCustom, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), regT2, JSValueRegs(regT4, regT3));
</del><ins>+    callOperation(operationInstanceOfCustom, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), regT2, JSValueRegs(regT4, regT3));
</ins><span class="cx">     emitStoreBool(dst, returnValueGPR);
</span><span class="cx"> }
</span><span class="cx">     
</span><span class="lines">@@ -248,7 +248,7 @@
</span><span class="cx">     
</span><span class="cx">     isMasqueradesAsUndefined.link(this);
</span><span class="cx">     loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1);
</span><del>-    move(TrustedImmPtr(m_profiledCodeBlock->globalObject()), regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
</span><span class="cx">     compare32(Equal, regT0, regT1, regT0);
</span><span class="cx"> 
</span><span class="lines">@@ -378,7 +378,7 @@
</span><span class="cx">     VirtualRegister name = bytecode.m_name;
</span><span class="cx">     emitLoadPayload(func, regT1);
</span><span class="cx">     emitLoad(name, regT3, regT2);
</span><del>-    callOperation(operationSetFunctionName, m_profiledCodeBlock->globalObject(), regT1, JSValueRegs(regT3, regT2));
</del><ins>+    callOperation(operationSetFunctionName, m_codeBlock->globalObject(), regT1, JSValueRegs(regT3, regT2));
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_not(const Instruction* currentInstruction)
</span><span class="lines">@@ -408,7 +408,7 @@
</span><span class="cx">     GPRReg scratch1 = regT2;
</span><span class="cx">     GPRReg scratch2 = regT3;
</span><span class="cx">     bool shouldCheckMasqueradesAsUndefined = true;
</span><del>-    addJump(branchIfFalsey(vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_profiledCodeBlock->globalObject()), target);
</del><ins>+    addJump(branchIfFalsey(vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_jtrue(const Instruction* currentInstruction)
</span><span class="lines">@@ -422,7 +422,7 @@
</span><span class="cx">     JSValueRegs value(regT1, regT0);
</span><span class="cx">     GPRReg scratch1 = regT2;
</span><span class="cx">     GPRReg scratch2 = regT3;
</span><del>-    addJump(branchIfTruthy(vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_profiledCodeBlock->globalObject()), target);
</del><ins>+    addJump(branchIfTruthy(vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_jeq_null(const Instruction* currentInstruction)
</span><span class="lines">@@ -437,7 +437,7 @@
</span><span class="cx"> 
</span><span class="cx">     Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
</span><span class="cx">     loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
</span><del>-    move(TrustedImmPtr(m_profiledCodeBlock->globalObject()), regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
</span><span class="cx">     Jump masqueradesGlobalObjectIsForeign = jump();
</span><span class="cx"> 
</span><span class="lines">@@ -463,7 +463,7 @@
</span><span class="cx"> 
</span><span class="cx">     addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
</span><span class="cx">     loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
</span><del>-    move(TrustedImmPtr(m_profiledCodeBlock->globalObject()), regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
</span><span class="cx">     Jump wasNotImmediate = jump();
</span><span class="cx"> 
</span><span class="lines">@@ -504,7 +504,7 @@
</span><span class="cx"> void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpJneqPtr>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister src = bytecode.m_value;
</span><span class="cx">     JSValue specialPointer = getConstantOperand(bytecode.m_specialPointer);
</span><span class="cx">     ASSERT(specialPointer.isCell());
</span><span class="lines">@@ -552,13 +552,13 @@
</span><span class="cx">     genericCase.append(branchIfNotString(regT2));
</span><span class="cx"> 
</span><span class="cx">     // String case.
</span><del>-    callOperation(operationCompareStringEq, m_profiledCodeBlock->globalObject(), regT0, regT2);
</del><ins>+    callOperation(operationCompareStringEq, m_codeBlock->globalObject(), regT0, regT2);
</ins><span class="cx">     storeResult.append(jump());
</span><span class="cx"> 
</span><span class="cx">     // Generic case.
</span><span class="cx">     genericCase.append(getSlowCase(iter)); // doubles
</span><span class="cx">     genericCase.link(this);
</span><del>-    callOperation(operationCompareEq, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</del><ins>+    callOperation(operationCompareEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</ins><span class="cx"> 
</span><span class="cx">     storeResult.link(this);
</span><span class="cx">     emitStoreBool(dst, returnValueGPR);
</span><span class="lines">@@ -591,7 +591,7 @@
</span><span class="cx">     genericCase.append(branchIfNotString(regT2));
</span><span class="cx"> 
</span><span class="cx">     // String case.
</span><del>-    callOperation(operationCompareStringEq, m_profiledCodeBlock->globalObject(), regT0, regT2);
</del><ins>+    callOperation(operationCompareStringEq, m_codeBlock->globalObject(), regT0, regT2);
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(type == CompileOpEqType::Eq ? NonZero : Zero, returnValueGPR), jumpTarget);
</span><span class="cx">     done.append(jump());
</span><span class="cx"> 
</span><span class="lines">@@ -598,7 +598,7 @@
</span><span class="cx">     // Generic case.
</span><span class="cx">     genericCase.append(getSlowCase(iter)); // doubles
</span><span class="cx">     genericCase.link(this);
</span><del>-    callOperation(operationCompareEq, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</del><ins>+    callOperation(operationCompareEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(type == CompileOpEqType::Eq ? NonZero : Zero, returnValueGPR), jumpTarget);
</span><span class="cx"> 
</span><span class="cx">     done.link(this);
</span><span class="lines">@@ -643,13 +643,13 @@
</span><span class="cx">     genericCase.append(branchIfNotString(regT2));
</span><span class="cx"> 
</span><span class="cx">     // String case.
</span><del>-    callOperation(operationCompareStringEq, m_profiledCodeBlock->globalObject(), regT0, regT2);
</del><ins>+    callOperation(operationCompareStringEq, m_codeBlock->globalObject(), regT0, regT2);
</ins><span class="cx">     storeResult.append(jump());
</span><span class="cx"> 
</span><span class="cx">     // Generic case.
</span><span class="cx">     genericCase.append(getSlowCase(iter)); // doubles
</span><span class="cx">     genericCase.link(this);
</span><del>-    callOperation(operationCompareEq, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</del><ins>+    callOperation(operationCompareEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</ins><span class="cx"> 
</span><span class="cx">     storeResult.link(this);
</span><span class="cx">     xor32(TrustedImm32(0x1), returnValueGPR);
</span><span class="lines">@@ -762,7 +762,7 @@
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpJstricteq>();
</span><span class="cx">     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
</span><del>-    callOperation(operationCompareStrictEq, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</del><ins>+    callOperation(operationCompareStrictEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -772,7 +772,7 @@
</span><span class="cx"> 
</span><span class="cx">     auto bytecode = currentInstruction->as<OpJnstricteq>();
</span><span class="cx">     unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
</span><del>-    callOperation(operationCompareStrictEq, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</del><ins>+    callOperation(operationCompareStrictEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
</ins><span class="cx">     emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -791,7 +791,7 @@
</span><span class="cx"> 
</span><span class="cx">     isMasqueradesAsUndefined.link(this);
</span><span class="cx">     loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
</span><del>-    move(TrustedImmPtr(m_profiledCodeBlock->globalObject()), regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
</span><span class="cx">     compare32(Equal, regT0, regT2, regT1);
</span><span class="cx">     Jump wasNotImmediate = jump();
</span><span class="lines">@@ -823,7 +823,7 @@
</span><span class="cx"> 
</span><span class="cx">     isMasqueradesAsUndefined.link(this);
</span><span class="cx">     loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
</span><del>-    move(TrustedImmPtr(m_profiledCodeBlock->globalObject()), regT0);
</del><ins>+    move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
</ins><span class="cx">     loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
</span><span class="cx">     compare32(NotEqual, regT0, regT2, regT1);
</span><span class="cx">     Jump wasNotImmediate = jump();
</span><span class="lines">@@ -846,7 +846,7 @@
</span><span class="cx">     ASSERT(regT0 == returnValueGPR);
</span><span class="cx">     copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
</span><span class="cx">     emitLoad(bytecode.m_value, regT1, regT0);
</span><del>-    callOperationNoExceptionCheck(operationThrow, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0));
</del><ins>+    callOperationNoExceptionCheck(operationThrow, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0));
</ins><span class="cx">     jumpToExceptionHandler(vm());
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -862,7 +862,7 @@
</span><span class="cx">     addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
</span><span class="cx">     isInt32.link(this);
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode.metadata(m_profiledCodeBlock), JSValueRegs(regT1, regT0));
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), JSValueRegs(regT1, regT0));
</ins><span class="cx">     if (src != dst)
</span><span class="cx">         emitStore(dst, regT1, regT0);
</span><span class="cx"> }
</span><span class="lines">@@ -884,7 +884,7 @@
</span><span class="cx">     addSlowCase(branchIfNotNumber(argumentValueRegs, regT2));
</span><span class="cx">     isBigInt.link(this);
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode.metadata(m_profiledCodeBlock), JSValueRegs(regT1, regT0));
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), JSValueRegs(regT1, regT0));
</ins><span class="cx">     if (src != dst)
</span><span class="cx">         emitStore(dst, regT1, regT0);
</span><span class="cx"> }
</span><span class="lines">@@ -915,7 +915,7 @@
</span><span class="cx">     addSlowCase(branchIfNotCell(regT1));
</span><span class="cx">     addSlowCase(branchIfNotObject(regT0));
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode.metadata(m_profiledCodeBlock), JSValueRegs(regT1, regT0));
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), JSValueRegs(regT1, regT0));
</ins><span class="cx">     if (src != dst)
</span><span class="cx">         emitStore(dst, regT1, regT0);
</span><span class="cx"> }
</span><span class="lines">@@ -931,19 +931,8 @@
</span><span class="cx">     load32(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
</span><span class="cx">     storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
</span><span class="cx"> 
</span><del>-    addPtr(TrustedImm32(stackPointerOffsetFor(m_unlinkedCodeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</del><ins>+    addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
</ins><span class="cx"> 
</span><del>-    // When the LLInt throws an exception, there is a chance that we've already tiered up
-    // the same CodeBlock to baseline, and we'll catch the exception in the baseline JIT (because
-    // we updated the exception handlers to point here). Because the LLInt uses a different value
-    // inside s_constantsGPR, the callee saves we restore above may not contain the correct register.
-    // So we replenish it here.
-    {
-        loadPtr(addressFor(CallFrameSlot::codeBlock), regT0);
-        loadPtr(Address(regT0, CodeBlock::offsetOfJITData()), regT0);
-        loadPtr(Address(regT0, CodeBlock::JITData::offsetOfJITConstantPool()), s_constantsGPR);
-    }
-
</del><span class="cx">     callOperationNoExceptionCheck(operationRetrieveAndClearExceptionIfCatchable, &vm());
</span><span class="cx">     Jump isCatchableException = branchTest32(NonZero, returnValueGPR);
</span><span class="cx">     jumpToExceptionHandler(vm());
</span><span class="lines">@@ -965,7 +954,7 @@
</span><span class="cx">     // argument type proofs, storing locals to the buffer, etc
</span><span class="cx">     // https://bugs.webkit.org/show_bug.cgi?id=175598
</span><span class="cx"> 
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     ValueProfileAndVirtualRegisterBuffer* buffer = metadata.m_buffer;
</span><span class="cx">     if (buffer || !shouldEmitProfiling())
</span><span class="cx">         callOperationNoExceptionCheck(operationTryOSREnterAtCatch, &vm(), m_bytecodeIndex.asBits());
</span><span class="lines">@@ -1007,8 +996,8 @@
</span><span class="cx">     VirtualRegister scrutinee = bytecode.m_scrutinee;
</span><span class="cx"> 
</span><span class="cx">     // create jump table for switch destinations, track this switch statement.
</span><del>-    const UnlinkedSimpleJumpTable& unlinkedTable = m_profiledCodeBlock->unlinkedSwitchJumpTable(tableIndex);
-    SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex];
</del><ins>+    const UnlinkedSimpleJumpTable& unlinkedTable = m_codeBlock->unlinkedSwitchJumpTable(tableIndex);
+    SimpleJumpTable& linkedTable = m_codeBlock->switchJumpTable(tableIndex);
</ins><span class="cx">     m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
</span><span class="cx">     linkedTable.ensureCTITable(unlinkedTable);
</span><span class="cx"> 
</span><span class="lines">@@ -1034,13 +1023,13 @@
</span><span class="cx">     VirtualRegister scrutinee = bytecode.m_scrutinee;
</span><span class="cx"> 
</span><span class="cx">     // create jump table for switch destinations, track this switch statement.
</span><del>-    const UnlinkedSimpleJumpTable& unlinkedTable = m_profiledCodeBlock->unlinkedSwitchJumpTable(tableIndex);
-    SimpleJumpTable& linkedTable = m_switchJumpTables[tableIndex];
</del><ins>+    const UnlinkedSimpleJumpTable& unlinkedTable = m_codeBlock->unlinkedSwitchJumpTable(tableIndex);
+    SimpleJumpTable& linkedTable = m_codeBlock->switchJumpTable(tableIndex);
</ins><span class="cx">     m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
</span><span class="cx">     linkedTable.ensureCTITable(unlinkedTable);
</span><span class="cx"> 
</span><span class="cx">     emitLoad(scrutinee, regT1, regT0);
</span><del>-    callOperation(operationSwitchCharWithUnknownKeyType, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), tableIndex, unlinkedTable.m_min);
</del><ins>+    callOperation(operationSwitchCharWithUnknownKeyType, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), tableIndex, unlinkedTable.m_min);
</ins><span class="cx">     farJump(returnValueGPR, NoPtrTag);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1052,13 +1041,13 @@
</span><span class="cx">     VirtualRegister scrutinee = bytecode.m_scrutinee;
</span><span class="cx"> 
</span><span class="cx">     // create jump table for switch destinations, track this switch statement.
</span><del>-    const UnlinkedStringJumpTable& unlinkedTable = m_profiledCodeBlock->unlinkedStringSwitchJumpTable(tableIndex);
-    StringJumpTable& linkedTable = m_stringSwitchJumpTables[tableIndex];
</del><ins>+    const UnlinkedStringJumpTable& unlinkedTable = m_codeBlock->unlinkedStringSwitchJumpTable(tableIndex);
+    StringJumpTable& linkedTable = m_codeBlock->stringSwitchJumpTable(tableIndex);
</ins><span class="cx">     m_switches.append(SwitchRecord(tableIndex, m_bytecodeIndex, defaultOffset, SwitchRecord::String));
</span><span class="cx">     linkedTable.ensureCTITable(unlinkedTable);
</span><span class="cx"> 
</span><span class="cx">     emitLoad(scrutinee, regT1, regT0);
</span><del>-    callOperation(operationSwitchStringWithUnknownKeyType, m_profiledCodeBlock->globalObject(), JSValueRegs(regT1, regT0), tableIndex);
</del><ins>+    callOperation(operationSwitchStringWithUnknownKeyType, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), tableIndex);
</ins><span class="cx">     farJump(returnValueGPR, NoPtrTag);
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="lines">@@ -1079,7 +1068,7 @@
</span><span class="cx">     // Even though JIT code doesn't use them, we initialize our constant
</span><span class="cx">     // registers to zap stale pointers, to avoid unnecessarily prolonging
</span><span class="cx">     // object lifetime and increasing GC pressure.
</span><del>-    for (int i = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_profiledCodeBlock->numVars(); ++i)
</del><ins>+    for (int i = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_codeBlock->numVars(); ++i)
</ins><span class="cx">         emitStore(virtualRegisterForLocal(i), jsUndefined());
</span><span class="cx"> 
</span><span class="cx">     JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter);
</span><span class="lines">@@ -1098,7 +1087,7 @@
</span><span class="cx"> void JIT::emit_op_create_this(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpCreateThis>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister callee = bytecode.m_callee;
</span><span class="cx">     WriteBarrierBase<JSCell>* cachedFunction = &metadata.m_cachedCallee;
</span><span class="cx">     RegisterID calleeReg = regT0;
</span><span class="lines">@@ -1133,7 +1122,7 @@
</span><span class="cx"> void JIT::emit_op_to_this(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpToThis>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     StructureID* cachedStructureID = &metadata.m_cachedStructureID;
</span><span class="cx">     VirtualRegister thisRegister = bytecode.m_srcDst;
</span><span class="cx"> 
</span><span class="lines">@@ -1156,7 +1145,7 @@
</span><span class="cx"> void JIT::emit_op_profile_type(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpProfileType>();
</span><del>-    auto& metadata = bytecode.metadata(m_profiledCodeBlock);
</del><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     TypeLocation* cachedTypeLocation = metadata.m_typeLocation;
</span><span class="cx">     VirtualRegister valueToProfile = bytecode.m_targetVirtualRegister;
</span><span class="cx"> 
</span><span class="lines">@@ -1250,7 +1239,7 @@
</span><span class="cx">     emitLoadTag(bytecode.m_thisValue, regT1);
</span><span class="cx">     JSValueRegs thisRegs(regT1, regT2);
</span><span class="cx">     emitLoadPayload(bytecode.m_scope, regT3);
</span><del>-    logShadowChickenTailPacket(shadowPacketReg, thisRegs, regT3, TrustedImmPtr(m_profiledCodeBlock), CallSiteIndex(m_bytecodeIndex));
</del><ins>+    logShadowChickenTailPacket(shadowPacketReg, thisRegs, regT3, m_codeBlock, CallSiteIndex(m_bytecodeIndex));
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> } // namespace JSC
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITOperationscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITOperations.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITOperations.cpp        2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITOperations.cpp   2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -31,7 +31,6 @@
</span><span class="cx"> #include "ArithProfile.h"
</span><span class="cx"> #include "ArrayConstructor.h"
</span><span class="cx"> #include "CacheableIdentifierInlines.h"
</span><del>-#include "CodeBlockInlines.h"
</del><span class="cx"> #include "CommonSlowPathsInlines.h"
</span><span class="cx"> #include "DFGDriver.h"
</span><span class="cx"> #include "DFGOSREntry.h"
</span><span class="lines">@@ -2856,7 +2855,7 @@
</span><span class="cx">     JSValue key = JSValue::decode(encodedKey);
</span><span class="cx">     CodeBlock* codeBlock = callFrame->codeBlock();
</span><span class="cx"> 
</span><del>-    const SimpleJumpTable& linkedTable = codeBlock->baselineSwitchJumpTable(tableIndex);
</del><ins>+    const SimpleJumpTable& linkedTable = codeBlock->switchJumpTable(tableIndex);
</ins><span class="cx">     ASSERT(codeBlock->unlinkedSwitchJumpTable(tableIndex).m_min == min);
</span><span class="cx">     void* result = linkedTable.m_ctiDefault.executableAddress();
</span><span class="cx"> 
</span><span class="lines">@@ -2881,7 +2880,7 @@
</span><span class="cx">     JSValue key = JSValue::decode(encodedKey);
</span><span class="cx">     CodeBlock* codeBlock = callFrame->codeBlock();
</span><span class="cx"> 
</span><del>-    const SimpleJumpTable& linkedTable = codeBlock->baselineSwitchJumpTable(tableIndex);
</del><ins>+    const SimpleJumpTable& linkedTable = codeBlock->switchJumpTable(tableIndex);
</ins><span class="cx">     ASSERT(codeBlock->unlinkedSwitchJumpTable(tableIndex).m_min == min);
</span><span class="cx">     void* result;
</span><span class="cx">     if (key.isInt32())
</span><span class="lines">@@ -2904,7 +2903,7 @@
</span><span class="cx">     auto throwScope = DECLARE_THROW_SCOPE(vm);
</span><span class="cx"> 
</span><span class="cx">     void* result;
</span><del>-    const StringJumpTable& linkedTable = codeBlock->baselineStringSwitchJumpTable(tableIndex);
</del><ins>+    const StringJumpTable& linkedTable = codeBlock->stringSwitchJumpTable(tableIndex);
</ins><span class="cx"> 
</span><span class="cx">     if (key.isString()) {
</span><span class="cx">         StringImpl* value = asString(key)->value(globalObject).impl();
</span><span class="lines">@@ -2920,6 +2919,7 @@
</span><span class="cx">     return reinterpret_cast<char*>(result);
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+#if ENABLE(EXTRA_CTI_THUNKS)
</ins><span class="cx"> JSC_DEFINE_JIT_OPERATION(operationResolveScopeForBaseline, EncodedJSValue, (JSGlobalObject* globalObject, const Instruction* pc))
</span><span class="cx"> {
</span><span class="cx">     VM& vm = globalObject->vm();
</span><span class="lines">@@ -2971,6 +2971,7 @@
</span><span class="cx"> 
</span><span class="cx">     return JSValue::encode(resolvedScope);
</span><span class="cx"> }
</span><ins>+#endif
</ins><span class="cx"> 
</span><span class="cx"> JSC_DEFINE_JIT_OPERATION(operationGetFromScope, EncodedJSValue, (JSGlobalObject* globalObject, const Instruction* pc))
</span><span class="cx"> {
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITOperationsh"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITOperations.h (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITOperations.h  2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITOperations.h     2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -290,7 +290,9 @@
</span><span class="cx"> JSC_DECLARE_JIT_OPERATION(operationSwitchCharWithUnknownKeyType, char*, (JSGlobalObject*, EncodedJSValue key, size_t tableIndex, int32_t min));
</span><span class="cx"> JSC_DECLARE_JIT_OPERATION(operationSwitchImmWithUnknownKeyType, char*, (VM*, EncodedJSValue key, size_t tableIndex, int32_t min));
</span><span class="cx"> JSC_DECLARE_JIT_OPERATION(operationSwitchStringWithUnknownKeyType, char*, (JSGlobalObject*, EncodedJSValue key, size_t tableIndex));
</span><ins>+#if ENABLE(EXTRA_CTI_THUNKS)
</ins><span class="cx"> JSC_DECLARE_JIT_OPERATION(operationResolveScopeForBaseline, EncodedJSValue, (JSGlobalObject*, const Instruction* bytecodePC));
</span><ins>+#endif
</ins><span class="cx"> JSC_DECLARE_JIT_OPERATION(operationGetFromScope, EncodedJSValue, (JSGlobalObject*, const Instruction* bytecodePC));
</span><span class="cx"> JSC_DECLARE_JIT_OPERATION(operationPutToScope, void, (JSGlobalObject*, const Instruction* bytecodePC));
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITPlancpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITPlan.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITPlan.cpp      2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITPlan.cpp 2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -87,12 +87,7 @@
</span><span class="cx"> 
</span><span class="cx"> JITCompilationKey JITPlan::key()
</span><span class="cx"> {
</span><del>-    JSCell* codeBlock;
-    if (m_mode == JITCompilationMode::Baseline)
-        codeBlock = m_codeBlock->unlinkedCodeBlock();
-    else
-        codeBlock = m_codeBlock->baselineAlternative();
-    return JITCompilationKey(codeBlock, m_mode);
</del><ins>+    return JITCompilationKey(m_codeBlock->baselineAlternative(), m_mode);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> bool JITPlan::isKnownToBeLiveAfterGC()
</span></span></pre></div>
<a id="trunkSourceJavaScriptCorejitJITPropertyAccesscpp"></a>
<div class="modfile"><h4>Modified: trunk/Source/JavaScriptCore/jit/JITPropertyAccess.cpp (283088 => 283089)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/JavaScriptCore/jit/JITPropertyAccess.cpp    2021-09-26 17:23:15 UTC (rev 283088)
+++ trunk/Source/JavaScriptCore/jit/JITPropertyAccess.cpp       2021-09-26 21:20:52 UTC (rev 283089)
</span><span class="lines">@@ -49,113 +49,92 @@
</span><span class="cx"> void JIT::emit_op_get_by_val(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<OpGetByVal>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx">     VirtualRegister base = bytecode.m_base;
</span><span class="cx">     VirtualRegister property = bytecode.m_property;
</span><ins>+    ArrayProfile* profile = &metadata.m_arrayProfile;
</ins><span class="cx"> 
</span><del>-    constexpr GPRReg baseGPR = BaselineGetByValRegisters::base;
-    constexpr GPRReg propertyGPR = BaselineGetByValRegisters::property;
-    constexpr GPRReg scratchGPR = BaselineGetByValRegisters::scratch;
-    constexpr GPRReg stubInfoGPR = BaselineGetByValRegisters::stubInfo;
</del><ins>+    emitGetVirtualRegister(base, regT0);
+    emitGetVirtualRegister(property, regT1);
</ins><span class="cx"> 
</span><del>-    emitGetVirtualRegister(base, baseGPR);
-    emitGetVirtualRegister(property, propertyGPR);
-
-    if (bytecode.metadata(m_profiledCodeBlock).m_seenIdentifiers.count() > Options::getByValICMaxNumberOfIdentifiers()) {
-        auto notCell = branchIfNotCell(baseGPR);
-        emitArrayProfilingSiteWithCell(bytecode, baseGPR, scratchGPR);
</del><ins>+    if (metadata.m_seenIdentifiers.count() > Options::getByValICMaxNumberOfIdentifiers()) {
+        auto notCell = branchIfNotCell(regT0);
+        emitArrayProfilingSiteWithCell(regT0, profile, regT2);
</ins><span class="cx">         notCell.link(this);
</span><del>-        loadGlobalObject(scratchGPR);
-        callOperationWithProfile(bytecode, operationGetByVal, dst, scratchGPR, baseGPR, propertyGPR);
</del><ins>+        callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByVal, dst, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
</ins><span class="cx">     } else {
</span><del>-        emitJumpSlowCaseIfNotJSCell(baseGPR, base);
-        emitArrayProfilingSiteWithCell(bytecode, baseGPR, scratchGPR);
</del><ins>+        emitJumpSlowCaseIfNotJSCell(regT0, base);
+        emitArrayProfilingSiteWithCell(regT0, profile, regT2);
</ins><span class="cx"> 
</span><del>-        JSValueRegs resultRegs = JSValueRegs(BaselineGetByValRegisters::result);
</del><ins>+        JSValueRegs resultRegs = JSValueRegs(regT0);
</ins><span class="cx"> 
</span><span class="cx">         JITGetByValGenerator gen(
</span><del>-            nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::GetByVal, RegisterSet::stubUnavailableRegisters(),
-            JSValueRegs(baseGPR), JSValueRegs(propertyGPR), resultRegs, stubInfoGPR);
-
</del><ins>+            m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::GetByVal, RegisterSet::stubUnavailableRegisters(),
+            JSValueRegs(regT0), JSValueRegs(regT1), resultRegs, regT2);
</ins><span class="cx">         if (isOperandConstantInt(property))
</span><span class="cx">             gen.stubInfo()->propertyIsInt32 = true;
</span><del>-
-        UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-        stubInfo->accessType = AccessType::GetByVal;
-        stubInfo->bytecodeIndex = m_bytecodeIndex;
-        JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-        gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-        gen.m_unlinkedStubInfo = stubInfo;
-
-        gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-        resetSP(); // We might OSR exit here, so we need to conservatively reset SP
-
-        addSlowCase();
</del><ins>+        gen.generateFastPath(*this);
+        if (!JITCode::useDataIC(JITType::BaselineJIT))
+            addSlowCase(gen.slowPathJump());
+        else
+            addSlowCase();
</ins><span class="cx">         m_getByVals.append(gen);
</span><span class="cx"> 
</span><del>-        emitValueProfilingSite(bytecode, resultRegs);
</del><ins>+        emitValueProfilingSite(bytecode.metadata(m_codeBlock), resultRegs);
</ins><span class="cx">         emitPutVirtualRegister(dst);
</span><span class="cx">     }
</span><ins>+
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-#if !OS(WINDOWS)
-static constexpr GPRReg viableArgumentGPR4 = GPRInfo::argumentGPR4;
-static constexpr GPRReg viableArgumentGPR5 = GPRInfo::argumentGPR5;
-#else
-static constexpr GPRReg viableArgumentGPR4 = GPRInfo::nonArgGPR0;
-static constexpr GPRReg viableArgumentGPR5 = GPRInfo::nonArgGPR1;
-#endif
-
</del><span class="cx"> template<typename OpcodeType>
</span><span class="cx"> void JIT::generateGetByValSlowCase(const OpcodeType& bytecode, Vector<SlowCaseEntry>::iterator& iter)
</span><span class="cx"> {
</span><del>-    if (!hasAnySlowCases(iter))
-        return;
</del><ins>+    if (hasAnySlowCases(iter)) {
+        VirtualRegister dst = bytecode.m_dst;
+        auto& metadata = bytecode.metadata(m_codeBlock);
+        ArrayProfile* profile = &metadata.m_arrayProfile;
</ins><span class="cx"> 
</span><del>-    VirtualRegister dst = bytecode.m_dst;
</del><ins>+        linkAllSlowCases(iter);
</ins><span class="cx"> 
</span><del>-    linkAllSlowCases(iter);
</del><ins>+        JITGetByValGenerator& gen = m_getByVals[m_getByValIndex++];
</ins><span class="cx"> 
</span><del>-    JITGetByValGenerator& gen = m_getByVals[m_getByValIndex++];
</del><ins>+        Label coldPathBegin = label();
</ins><span class="cx"> 
</span><del>-    Label coldPathBegin = label();
-
</del><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><del>-    static_assert(argumentGPR3 != BaselineGetByValRegisters::property);
-    move(BaselineGetByValRegisters::base, argumentGPR3);
-    move(BaselineGetByValRegisters::property, viableArgumentGPR4);
-    loadGlobalObject(argumentGPR0);
-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, argumentGPR1);
-    materializePointerIntoMetadata(bytecode, OpcodeType::Metadata::offsetOfArrayProfile(), argumentGPR2);
-    callOperationWithProfile<decltype(operationGetByValOptimize)>(bytecode, Address(argumentGPR1, StructureStubInfo::offsetOfSlowOperation()), dst, argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3, viableArgumentGPR4);
</del><ins>+        Call call = callOperationWithProfile(metadata, operationGetByValOptimize, dst, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), profile, regT0, regT1);
</ins><span class="cx"> #else
</span><del>-    VM& vm = this->vm();
-    uint32_t bytecodeOffset = m_bytecodeIndex.offset();
-    ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
</del><ins>+        VM& vm = this->vm();
+        uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+        ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
</ins><span class="cx"> 
</span><del>-    constexpr GPRReg bytecodeOffsetGPR = argumentGPR4;
-    move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
</del><ins>+        constexpr GPRReg bytecodeOffsetGPR = argumentGPR4;
+        move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
</ins><span class="cx"> 
</span><del>-    constexpr GPRReg stubInfoGPR = argumentGPR3; // arg1 arg1 already used.
-    constexpr GPRReg profileGPR = argumentGPR2;
-    constexpr GPRReg baseGPR = regT0;
-    constexpr GPRReg propertyGPR = regT1;
-    static_assert(baseGPR == argumentGPR0 || !isARM64());
-    static_assert(propertyGPR == argumentGPR1);
-    static_assert(BaselineGetByValRegisters::base == regT0);
-    static_assert(BaselineGetByValRegisters::property == regT1);
</del><ins>+        constexpr GPRReg stubInfoGPR = argumentGPR3; // arg1 arg1 already used.
+        constexpr GPRReg profileGPR = argumentGPR2;
+        constexpr GPRReg baseGPR = regT0;
+        constexpr GPRReg propertyGPR = regT1;
+        static_assert(baseGPR == argumentGPR0 || !isARM64());
+        static_assert(propertyGPR == argumentGPR1);
</ins><span class="cx"> 
</span><del>-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
-    materializePointerIntoMetadata(bytecode, OpcodeType::Metadata::offsetOfArrayProfile(), profileGPR);
-    emitNakedNearCall(vm.getCTIStub(slow_op_get_by_val_prepareCallGenerator).retaggedCode<NoPtrTag>());
</del><ins>+        move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+        move(TrustedImmPtr(profile), profileGPR);
+        emitNakedNearCall(vm.getCTIStub(slow_op_get_by_val_prepareCallGenerator).retaggedCode<NoPtrTag>());
</ins><span class="cx"> 
</span><del>-    emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
</del><ins>+        Call call;
+        if (JITCode::useDataIC(JITType::BaselineJIT))
+            gen.stubInfo()->m_slowOperation = operationGetByValOptimize;
+        else
+            call = appendCall(operationGetByValOptimize);
+        emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
</ins><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode, returnValueGPR);
-    emitPutVirtualRegister(dst, returnValueGPR);
</del><ins>+        emitValueProfilingSite(metadata, returnValueGPR);
+        emitPutVirtualRegister(dst, returnValueGPR);
</ins><span class="cx"> #endif // ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> 
</span><del>-    gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+        gen.reportSlowPathCall(coldPathBegin, call);
+    }
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emitSlow_op_get_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
</span><span class="lines">@@ -170,7 +149,7 @@
</span><span class="cx">     // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
</span><span class="cx">     // DFG/FTL may inline functions belonging to other globalObjects, which may not match
</span><span class="cx">     // CallFrame::codeBlock().
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx">     if (!JITCode::useDataIC(JITType::BaselineJIT))
</span><span class="cx">         jit.tagReturnAddress();
</span><span class="lines">@@ -181,8 +160,8 @@
</span><span class="cx">     constexpr GPRReg globalObjectGPR = argumentGPR5;
</span><span class="cx">     constexpr GPRReg stubInfoGPR = argumentGPR3;
</span><span class="cx">     constexpr GPRReg profileGPR = argumentGPR2;
</span><del>-    constexpr GPRReg baseGPR = BaselineGetByValRegisters::base;
-    constexpr GPRReg propertyGPR = BaselineGetByValRegisters::property;
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg propertyGPR = regT1;
</ins><span class="cx">     static_assert(baseGPR == argumentGPR0 || !isARM64());
</span><span class="cx">     static_assert(propertyGPR == argumentGPR1);
</span><span class="cx"> 
</span><span class="lines">@@ -208,34 +187,27 @@
</span><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx">     VirtualRegister base = bytecode.m_base;
</span><span class="cx">     VirtualRegister property = bytecode.m_property;
</span><del>-
-    constexpr GPRReg baseGPR = BaselineGetByValRegisters::base;
-    constexpr GPRReg propertyGPR = BaselineGetByValRegisters::property;
-    constexpr GPRReg stubInfoGPR = BaselineGetByValRegisters::stubInfo;
-    JSValueRegs resultRegs = JSValueRegs(BaselineGetByValRegisters::result);
-
</del><ins>+    GPRReg baseGPR = regT0;
+    GPRReg propertyGPR = regT1;
</ins><span class="cx">     emitGetVirtualRegister(base, baseGPR);
</span><span class="cx">     emitGetVirtualRegister(property, propertyGPR);
</span><span class="cx"> 
</span><del>-    emitJumpSlowCaseIfNotJSCell(baseGPR, base);
</del><ins>+    emitJumpSlowCaseIfNotJSCell(regT0, base);
</ins><span class="cx"> 
</span><ins>+    JSValueRegs resultRegs = JSValueRegs(regT0);
+
</ins><span class="cx">     JITGetByValGenerator gen(
</span><del>-        nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::GetPrivateName,
-        RegisterSet::stubUnavailableRegisters(), JSValueRegs(baseGPR), JSValueRegs(propertyGPR), resultRegs, stubInfoGPR);
-
-    UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-    stubInfo->accessType = AccessType::GetPrivateName;
-    stubInfo->bytecodeIndex = m_bytecodeIndex;
-    JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-    gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-    gen.m_unlinkedStubInfo = stubInfo;
-
-    gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-    addSlowCase();
</del><ins>+        m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::GetPrivateName,
+        RegisterSet::stubUnavailableRegisters(), JSValueRegs(baseGPR), JSValueRegs(propertyGPR), resultRegs, regT2);
+    gen.generateFastPath(*this);
+    if (!JITCode::useDataIC(JITType::BaselineJIT))
+        addSlowCase(gen.slowPathJump());
+    else
+        addSlowCase();
</ins><span class="cx">     m_getByVals.append(gen);
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode, resultRegs);
-    emitPutVirtualRegister(dst, resultRegs);
</del><ins>+    emitValueProfilingSite(bytecode.metadata(m_codeBlock), resultRegs);
+    emitPutVirtualRegister(dst);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emitSlow_op_get_private_name(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
</span><span class="lines">@@ -242,6 +214,7 @@
</span><span class="cx"> {
</span><span class="cx">     ASSERT(hasAnySlowCases(iter));
</span><span class="cx">     auto bytecode = currentInstruction->as<OpGetPrivateName>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx"> 
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="lines">@@ -250,11 +223,9 @@
</span><span class="cx">     Label coldPathBegin = label();
</span><span class="cx"> 
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><del>-    loadGlobalObject(argumentGPR0);
-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, argumentGPR1);
-    emitGetVirtualRegister(bytecode.m_base, argumentGPR2);
-    emitGetVirtualRegister(bytecode.m_property, argumentGPR3);
-    callOperationWithProfile<decltype(operationGetPrivateNameOptimize)>(bytecode, Address(argumentGPR1, StructureStubInfo::offsetOfSlowOperation()), dst, argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3);
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg propertyGPR = regT1;
+    Call call = callOperationWithProfile(metadata, operationGetPrivateNameOptimize, dst, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), baseGPR, propertyGPR);
</ins><span class="cx"> #else
</span><span class="cx">     VM& vm = this->vm();
</span><span class="cx">     uint32_t bytecodeOffset = m_bytecodeIndex.offset();
</span><span class="lines">@@ -264,20 +235,26 @@
</span><span class="cx">     move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
</span><span class="cx"> 
</span><span class="cx">     constexpr GPRReg stubInfoGPR = argumentGPR2; // arg1 already used.
</span><del>-    constexpr GPRReg baseGPR = BaselineGetByValRegisters::base;
-    constexpr GPRReg propertyGPR = BaselineGetByValRegisters::property;
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg propertyGPR = regT1;
</ins><span class="cx">     static_assert(baseGPR == argumentGPR0 || !isARM64());
</span><span class="cx">     static_assert(propertyGPR == argumentGPR1);
</span><span class="cx"> 
</span><del>-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
</del><ins>+    move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
</ins><span class="cx">     emitNakedNearCall(vm.getCTIStub(slow_op_get_private_name_prepareCallGenerator).retaggedCode<NoPtrTag>());
</span><ins>+
+    Call call;
+    if (JITCode::useDataIC(JITType::BaselineJIT))
+        gen.stubInfo()->m_slowOperation = operationGetPrivateNameOptimize;
+    else
+        call = appendCall(operationGetPrivateNameOptimize);
</ins><span class="cx">     emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
</span><span class="cx"> 
</span><del>-    emitValueProfilingSite(bytecode, returnValueGPR);
</del><ins>+    emitValueProfilingSite(metadata, returnValueGPR);
</ins><span class="cx">     emitPutVirtualRegister(dst, returnValueGPR);
</span><span class="cx"> #endif // ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> 
</span><del>-    gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+    gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(EXTRA_CTI_THUNKS)
</span><span class="lines">@@ -287,7 +264,7 @@
</span><span class="cx">     // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
</span><span class="cx">     // DFG/FTL may inline functions belonging to other globalObjects, which may not match
</span><span class="cx">     // CallFrame::codeBlock().
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx">     if (!JITCode::useDataIC(JITType::BaselineJIT))
</span><span class="cx">         jit.tagReturnAddress();
</span><span class="lines">@@ -323,28 +300,21 @@
</span><span class="cx">     auto bytecode = currentInstruction->as<OpSetPrivateBrand>();
</span><span class="cx">     VirtualRegister base = bytecode.m_base;
</span><span class="cx">     VirtualRegister brand = bytecode.m_brand;
</span><del>-
-    constexpr GPRReg baseGPR = BaselinePrivateBrandRegisters::base;
-    constexpr GPRReg brandGPR = BaselinePrivateBrandRegisters::brand;
-    constexpr GPRReg stubInfoGPR = BaselinePrivateBrandRegisters::stubInfo;
-
</del><ins>+    GPRReg baseGPR = regT0;
+    GPRReg brandGPR = regT1;
</ins><span class="cx">     emitGetVirtualRegister(base, baseGPR);
</span><span class="cx">     emitGetVirtualRegister(brand, brandGPR);
</span><ins>+
</ins><span class="cx">     emitJumpSlowCaseIfNotJSCell(baseGPR, base);
</span><span class="cx"> 
</span><span class="cx">     JITPrivateBrandAccessGenerator gen(
</span><del>-        nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::SetPrivateBrand, RegisterSet::stubUnavailableRegisters(),
-        JSValueRegs(baseGPR), JSValueRegs(brandGPR), stubInfoGPR);
-
-    UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-    stubInfo->accessType = AccessType::SetPrivateBrand;
-    stubInfo->bytecodeIndex = m_bytecodeIndex;
-    JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-    gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-    gen.m_unlinkedStubInfo = stubInfo;
-
-    gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-    addSlowCase();
</del><ins>+        m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::SetPrivateBrand, RegisterSet::stubUnavailableRegisters(),
+        JSValueRegs(baseGPR), JSValueRegs(brandGPR), regT2);
+    gen.generateFastPath(*this);
+    if (!JITCode::useDataIC(JITType::BaselineJIT))
+        addSlowCase(gen.slowPathJump());
+    else
+        addSlowCase();
</ins><span class="cx">     m_privateBrandAccesses.append(gen);
</span><span class="cx"> 
</span><span class="cx">     // We should emit write-barrier at the end of sequence since write-barrier clobbers registers.
</span><span class="lines">@@ -354,10 +324,8 @@
</span><span class="cx">     emitWriteBarrier(base, ShouldFilterBase);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JIT::emitSlow_op_set_private_brand(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
</del><ins>+void JIT::emitSlow_op_set_private_brand(const Instruction*, Vector<SlowCaseEntry>::iterator& iter)
</ins><span class="cx"> {
</span><del>-    UNUSED_PARAM(currentInstruction);
-
</del><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><span class="cx">     JITPrivateBrandAccessGenerator& gen = m_privateBrandAccesses[m_privateBrandAccessIndex++];
</span><span class="lines">@@ -364,12 +332,9 @@
</span><span class="cx">     Label coldPathBegin = label();
</span><span class="cx"> 
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><del>-    auto bytecode = currentInstruction->as<OpSetPrivateBrand>();
-    loadGlobalObject(argumentGPR0);
-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, argumentGPR1);
-    emitGetVirtualRegister(bytecode.m_base, argumentGPR2);
-    emitGetVirtualRegister(bytecode.m_brand, argumentGPR3);
-    callOperation<decltype(operationSetPrivateBrandOptimize)>(Address(argumentGPR1, StructureStubInfo::offsetOfSlowOperation()), argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3);
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg brandGPR = regT1;
+    Call call = callOperation(operationSetPrivateBrandOptimize, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), baseGPR, brandGPR);
</ins><span class="cx"> #else
</span><span class="cx">     VM& vm = this->vm();
</span><span class="cx">     uint32_t bytecodeOffset = m_bytecodeIndex.offset();
</span><span class="lines">@@ -379,18 +344,24 @@
</span><span class="cx">     move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
</span><span class="cx"> 
</span><span class="cx">     constexpr GPRReg stubInfoGPR = argumentGPR2; // arg1 already used.
</span><del>-    constexpr GPRReg baseGPR = BaselinePrivateBrandRegisters::base;
-    constexpr GPRReg propertyGPR = BaselinePrivateBrandRegisters::brand;
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg propertyGPR = regT1;
</ins><span class="cx">     static_assert(baseGPR == argumentGPR0 || !isARM64());
</span><span class="cx">     static_assert(propertyGPR == argumentGPR1);
</span><span class="cx"> 
</span><del>-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
</del><ins>+    move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
</ins><span class="cx">     static_assert(std::is_same<FunctionTraits<decltype(operationSetPrivateBrandOptimize)>::ArgumentTypes, FunctionTraits<decltype(operationGetPrivateNameOptimize)>::ArgumentTypes>::value);
</span><span class="cx">     emitNakedNearCall(vm.getCTIStub(slow_op_get_private_name_prepareCallGenerator).retaggedCode<NoPtrTag>());
</span><ins>+
+    Call call;
+    if (JITCode::useDataIC(JITType::BaselineJIT))
+        gen.stubInfo()->m_slowOperation = operationSetPrivateBrandOptimize;
+    else
+        call = appendCall(operationSetPrivateBrandOptimize);
</ins><span class="cx">     emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><del>-    gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+    gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_check_private_brand(const Instruction* currentInstruction)
</span><span class="lines">@@ -399,47 +370,33 @@
</span><span class="cx">     VirtualRegister base = bytecode.m_base;
</span><span class="cx">     VirtualRegister brand = bytecode.m_brand;
</span><span class="cx"> 
</span><del>-    constexpr GPRReg baseGPR = BaselinePrivateBrandRegisters::base;
-    constexpr GPRReg brandGPR = BaselinePrivateBrandRegisters::brand;
-    constexpr GPRReg stubInfoGPR = BaselinePrivateBrandRegisters::stubInfo;
</del><ins>+    emitGetVirtualRegister(base, regT0);
+    emitGetVirtualRegister(brand, regT1);
</ins><span class="cx"> 
</span><del>-    emitGetVirtualRegister(base, baseGPR);
-    emitGetVirtualRegister(brand, brandGPR);
</del><ins>+    emitJumpSlowCaseIfNotJSCell(regT0, base);
</ins><span class="cx"> 
</span><del>-    emitJumpSlowCaseIfNotJSCell(baseGPR, base);
-
</del><span class="cx">     JITPrivateBrandAccessGenerator gen(
</span><del>-        nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::CheckPrivateBrand, RegisterSet::stubUnavailableRegisters(),
-        JSValueRegs(baseGPR), JSValueRegs(brandGPR), stubInfoGPR);
-
-    UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-    stubInfo->accessType = AccessType::CheckPrivateBrand;
-    stubInfo->bytecodeIndex = m_bytecodeIndex;
-    JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-    gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-    gen.m_unlinkedStubInfo = stubInfo;
-
-    gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-    addSlowCase();
</del><ins>+        m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::CheckPrivateBrand, RegisterSet::stubUnavailableRegisters(),
+        JSValueRegs(regT0), JSValueRegs(regT1), regT2);
+    gen.generateFastPath(*this);
+    if (!JITCode::useDataIC(JITType::BaselineJIT))
+        addSlowCase(gen.slowPathJump());
+    else
+        addSlowCase();
</ins><span class="cx">     m_privateBrandAccesses.append(gen);
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JIT::emitSlow_op_check_private_brand(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
</del><ins>+void JIT::emitSlow_op_check_private_brand(const Instruction*, Vector<SlowCaseEntry>::iterator& iter)
</ins><span class="cx"> {
</span><span class="cx">     linkAllSlowCases(iter);
</span><span class="cx"> 
</span><del>-    auto bytecode = currentInstruction->as<OpCheckPrivateBrand>();
-    UNUSED_PARAM(bytecode);
-
</del><span class="cx">     JITPrivateBrandAccessGenerator& gen = m_privateBrandAccesses[m_privateBrandAccessIndex++];
</span><span class="cx">     Label coldPathBegin = label();
</span><span class="cx"> 
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><del>-    loadGlobalObject(argumentGPR0);
-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, argumentGPR1);
-    emitGetVirtualRegister(bytecode.m_base, argumentGPR2);
-    emitGetVirtualRegister(bytecode.m_brand, argumentGPR3);
-    callOperation<decltype(operationCheckPrivateBrandOptimize)>(Address(argumentGPR1, StructureStubInfo::offsetOfSlowOperation()), argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3);
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg brandGPR = regT1;
+    Call call = callOperation(operationCheckPrivateBrandOptimize, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), baseGPR, brandGPR);
</ins><span class="cx"> #else
</span><span class="cx">     VM& vm = this->vm();
</span><span class="cx">     uint32_t bytecodeOffset = m_bytecodeIndex.offset();
</span><span class="lines">@@ -449,18 +406,24 @@
</span><span class="cx">     move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
</span><span class="cx"> 
</span><span class="cx">     constexpr GPRReg stubInfoGPR = argumentGPR2; // arg1 already used.
</span><del>-    constexpr GPRReg baseGPR = BaselinePrivateBrandRegisters::base;
-    constexpr GPRReg propertyGPR = BaselinePrivateBrandRegisters::brand;
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg propertyGPR = regT1;
</ins><span class="cx">     static_assert(baseGPR == argumentGPR0 || !isARM64());
</span><span class="cx">     static_assert(propertyGPR == argumentGPR1);
</span><span class="cx"> 
</span><del>-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
</del><ins>+    move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
</ins><span class="cx">     static_assert(std::is_same<FunctionTraits<decltype(operationCheckPrivateBrandOptimize)>::ArgumentTypes, FunctionTraits<decltype(operationGetPrivateNameOptimize)>::ArgumentTypes>::value);
</span><span class="cx">     emitNakedNearCall(vm.getCTIStub(slow_op_get_private_name_prepareCallGenerator).retaggedCode<NoPtrTag>());
</span><ins>+
+    Call call;
+    if (JITCode::useDataIC(JITType::BaselineJIT))
+        gen.stubInfo()->m_slowOperation = operationCheckPrivateBrandOptimize;
+    else
+        call = appendCall(operationCheckPrivateBrandOptimize);
</ins><span class="cx">     emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
</span><span class="cx"> #endif // ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> 
</span><del>-    gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+    gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_put_by_val_direct(const Instruction* currentInstruction)
</span><span class="lines">@@ -472,43 +435,30 @@
</span><span class="cx"> void JIT::emit_op_put_by_val(const Instruction* currentInstruction)
</span><span class="cx"> {
</span><span class="cx">     auto bytecode = currentInstruction->as<Op>();
</span><ins>+    auto& metadata = bytecode.metadata(m_codeBlock);
</ins><span class="cx">     VirtualRegister base = bytecode.m_base;
</span><span class="cx">     VirtualRegister property = bytecode.m_property;
</span><span class="cx">     VirtualRegister value = bytecode.m_value;
</span><ins>+    ArrayProfile* profile = &metadata.m_arrayProfile;
</ins><span class="cx"> 
</span><del>-    constexpr GPRReg baseGPR = BaselinePutByValRegisters::base;
-    constexpr GPRReg propertyGPR = BaselinePutByValRegisters::property;
-    constexpr GPRReg valueGPR = BaselinePutByValRegisters::value;
-    constexpr GPRReg profileGPR = BaselinePutByValRegisters::profile;
-    constexpr GPRReg stubInfoGPR = BaselinePutByValRegisters::stubInfo;
</del><ins>+    emitGetVirtualRegister(base, regT0);
+    emitGetVirtualRegister(property, regT1);
+    emitGetVirtualRegister(value, regT2);
+    move(TrustedImmPtr(profile), regT3);
</ins><span class="cx"> 
</span><del>-    emitGetVirtualRegister(base, baseGPR);
-    emitGetVirtualRegister(property, propertyGPR);
-    emitGetVirtualRegister(value, valueGPR);
</del><ins>+    emitJumpSlowCaseIfNotJSCell(regT0, base);
+    emitArrayProfilingSiteWithCell(regT0, regT3, regT4);
</ins><span class="cx"> 
</span><del>-    emitJumpSlowCaseIfNotJSCell(baseGPR, base);
-    emitArrayProfilingSiteWithCell(bytecode, baseGPR, profileGPR);
-    materializePointerIntoMetadata(bytecode, Op::Metadata::offsetOfArrayProfile(), profileGPR);
-
</del><span class="cx">     JITPutByValGenerator gen(
</span><del>-        nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::PutByVal, RegisterSet::stubUnavailableRegisters(),
-        JSValueRegs(baseGPR), JSValueRegs(propertyGPR), JSValueRegs(valueGPR), profileGPR, stubInfoGPR);
-
</del><ins>+        m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::PutByVal, RegisterSet::stubUnavailableRegisters(),
+        JSValueRegs(regT0), JSValueRegs(regT1), JSValueRegs(regT2), regT3, regT4);
</ins><span class="cx">     if (isOperandConstantInt(property))
</span><span class="cx">         gen.stubInfo()->propertyIsInt32 = true;
</span><del>-
-    UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-    stubInfo->accessType = AccessType::PutByVal;
-    stubInfo->putKind = std::is_same_v<Op, OpPutByValDirect> ? PutKind::Direct : PutKind::NotDirect;
-    stubInfo->ecmaMode = ecmaMode(bytecode);
-    stubInfo->bytecodeIndex = m_bytecodeIndex;
-    JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-    gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-    gen.m_unlinkedStubInfo = stubInfo;
-
-    gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-    resetSP(); // We might OSR exit here, so we need to conservatively reset SP
-    addSlowCase();
</del><ins>+    gen.generateFastPath(*this);
+    if (!JITCode::useDataIC(JITType::BaselineJIT))
+        addSlowCase(gen.slowPathJump());
+    else
+        addSlowCase();
</ins><span class="cx">     m_putByVals.append(gen);
</span><span class="cx"> 
</span><span class="cx">     // IC can write new Structure without write-barrier if a base is cell.
</span><span class="lines">@@ -524,6 +474,7 @@
</span><span class="cx">     VirtualRegister property;
</span><span class="cx">     VirtualRegister value;
</span><span class="cx">     ECMAMode ecmaMode = ECMAMode::strict();
</span><ins>+    ArrayProfile* profile = nullptr;
</ins><span class="cx"> 
</span><span class="cx">     auto load = [&](auto bytecode) {
</span><span class="cx">         base = bytecode.m_base;
</span><span class="lines">@@ -530,6 +481,8 @@
</span><span class="cx">         property = bytecode.m_property;
</span><span class="cx">         value = bytecode.m_value;
</span><span class="cx">         ecmaMode = bytecode.m_ecmaMode;
</span><ins>+        auto& metadata = bytecode.metadata(m_codeBlock);
+        profile = &metadata.m_arrayProfile;
</ins><span class="cx">     };
</span><span class="cx"> 
</span><span class="cx">     if (isDirect)
</span><span class="lines">@@ -545,16 +498,7 @@
</span><span class="cx"> 
</span><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx">     // They are configured in the fast path and not clobbered.
</span><del>-    loadGlobalObject(argumentGPR0);
-    emitGetVirtualRegister(base, argumentGPR1);
-    emitGetVirtualRegister(property, argumentGPR2);
-    emitGetVirtualRegister(value, argumentGPR3);
-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, viableArgumentGPR4);
-    if (isDirect)
-        materializePointerIntoMetadata(currentInstruction->as<OpPutByValDirect>(), OpPutByValDirect::Metadata::offsetOfArrayProfile(), viableArgumentGPR5);
-    else
-        materializePointerIntoMetadata(currentInstruction->as<OpPutByVal>(), OpPutByVal::Metadata::offsetOfArrayProfile(), viableArgumentGPR5);
-    callOperation<decltype(operationPutByValStrictOptimize)>(Address(viableArgumentGPR4, StructureStubInfo::offsetOfSlowOperation()), argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3, viableArgumentGPR4, viableArgumentGPR5);
</del><ins>+    Call call = callOperation(isDirect ? (ecmaMode.isStrict() ? operationDirectPutByValStrictOptimize : operationDirectPutByValNonStrictOptimize) : (ecmaMode.isStrict() ? operationPutByValStrictOptimize : operationPutByValNonStrictOptimize), TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, regT2, gen.stubInfo(), regT3);
</ins><span class="cx"> #else
</span><span class="cx">     VM& vm = this->vm();
</span><span class="cx">     uint32_t bytecodeOffset = m_bytecodeIndex.offset();
</span><span class="lines">@@ -568,7 +512,7 @@
</span><span class="cx">     constexpr GPRReg stubInfoGPR = regT4;
</span><span class="cx">     constexpr GPRReg bytecodeOffsetGPR = regT5;
</span><span class="cx">     move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
</span><del>-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
</del><ins>+    move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
</ins><span class="cx">     emitNakedNearCall(vm.getCTIStub(slow_op_put_by_val_prepareCallGenerator).retaggedCode<NoPtrTag>());
</span><span class="cx">     Call call;
</span><span class="cx">     auto operation = isDirect ? (ecmaMode.isStrict() ? operationDirectPutByValStrictOptimize : operationDirectPutByValNonStrictOptimize) : (ecmaMode.isStrict() ? operationPutByValStrictOptimize : operationPutByValNonStrictOptimize);
</span><span class="lines">@@ -579,7 +523,7 @@
</span><span class="cx">     emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
</span><span class="cx"> #endif // ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> 
</span><del>-    gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+    gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(EXTRA_CTI_THUNKS)
</span><span class="lines">@@ -589,25 +533,19 @@
</span><span class="cx">     // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
</span><span class="cx">     // DFG/FTL may inline functions belonging to other globalObjects, which may not match
</span><span class="cx">     // CallFrame::codeBlock().
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx">     if (!JITCode::useDataIC(JITType::BaselineJIT))
</span><span class="cx">         jit.tagReturnAddress();
</span><span class="cx"> 
</span><span class="cx">     constexpr GPRReg globalObjectGPR = regT5;
</span><del>-    constexpr GPRReg baseGPR = BaselinePutByValRegisters::base;
-    constexpr GPRReg propertyGPR = BaselinePutByValRegisters::property;
-    constexpr GPRReg valueGPR = BaselinePutByValRegisters::value;
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg propertyGPR = regT1;
+    constexpr GPRReg valueGPR = regT2;
</ins><span class="cx">     constexpr GPRReg stubInfoGPR = regT4;
</span><del>-    constexpr GPRReg profileGPR = BaselinePutByValRegisters::profile;
</del><ins>+    constexpr GPRReg profileGPR = regT3;
</ins><span class="cx">     constexpr GPRReg bytecodeOffsetGPR = regT5;
</span><del>-    {
-        RegisterSet used(BaselinePutByValRegisters::base, BaselinePutByValRegisters::property, BaselinePutByValRegisters::value, BaselinePutByValRegisters::profile);
-        ASSERT(!used.contains(regT4));
-        ASSERT(!used.contains(regT5));
-    }
</del><span class="cx"> 
</span><del>-
</del><span class="cx">     jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
</span><span class="cx">     jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
</span><span class="cx">     jit.loadPtr(Address(globalObjectGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
</span><span class="lines">@@ -632,31 +570,20 @@
</span><span class="cx">     VirtualRegister property = bytecode.m_property;
</span><span class="cx">     VirtualRegister value = bytecode.m_value;
</span><span class="cx"> 
</span><del>-    constexpr GPRReg baseGPR = BaselinePutByValRegisters::base;
-    constexpr GPRReg propertyGPR = BaselinePutByValRegisters::property;
-    constexpr GPRReg valueGPR = BaselinePutByValRegisters::value;
-    constexpr GPRReg stubInfoGPR = BaselinePutByValRegisters::stubInfo;
</del><ins>+    emitGetVirtualRegister(base, regT0);
+    emitGetVirtualRegister(property, regT1);
+    emitGetVirtualRegister(value, regT2);
</ins><span class="cx"> 
</span><del>-    emitGetVirtualRegister(base, baseGPR);
-    emitGetVirtualRegister(property, propertyGPR);
-    emitGetVirtualRegister(value, valueGPR);
</del><ins>+    emitJumpSlowCaseIfNotJSCell(regT0, base);
</ins><span class="cx"> 
</span><del>-    emitJumpSlowCaseIfNotJSCell(baseGPR, base);
-
</del><span class="cx">     JITPutByValGenerator gen(
</span><del>-        nullptr, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::PutPrivateName, RegisterSet::stubUnavailableRegisters(),
-        JSValueRegs(baseGPR), JSValueRegs(propertyGPR), JSValueRegs(valueGPR), InvalidGPRReg, stubInfoGPR);
-
-    UnlinkedStructureStubInfo* stubInfo = m_unlinkedStubInfos.add();
-    stubInfo->accessType = AccessType::PutPrivateName;
-    stubInfo->privateFieldPutKind = bytecode.m_putKind;
-    stubInfo->bytecodeIndex = m_bytecodeIndex;
-    JITConstantPool::Constant stubInfoIndex = m_constantPool.add(JITConstantPool::Type::StructureStubInfo, stubInfo);
-    gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
-    gen.m_unlinkedStubInfo = stubInfo;
-
-    gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
-    addSlowCase();
</del><ins>+        m_codeBlock, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), AccessType::PutByVal, RegisterSet::stubUnavailableRegisters(),
+        JSValueRegs(regT0), JSValueRegs(regT1), JSValueRegs(regT2), InvalidGPRReg, regT4);
+    gen.generateFastPath(*this);
+    if (!JITCode::useDataIC(JITType::BaselineJIT))
+        addSlowCase(gen.slowPathJump());
+    else
+        addSlowCase();
</ins><span class="cx">     m_putByVals.append(gen);
</span><span class="cx"> 
</span><span class="cx">     // IC can write new Structure without write-barrier if a base is cell.
</span><span class="lines">@@ -667,7 +594,8 @@
</span><span class="cx"> 
</span><span class="cx"> void JIT::emitSlow_op_put_private_name(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
</span><span class="cx"> {
</span><del>-    UNUSED_PARAM(currentInstruction);
</del><ins>+    auto bytecode = currentInstruction->as<OpPutPrivateName>();
+    PrivateFieldPutKind putKind = bytecode.m_putKind;
</ins><span class="cx"> 
</span><span class="cx">     JITPutByValGenerator& gen = m_putByVals[m_putByValIndex++];
</span><span class="cx"> 
</span><span class="lines">@@ -675,15 +603,13 @@
</span><span class="cx"> 
</span><span class="cx">     Label coldPathBegin = label();
</span><span class="cx"> 
</span><ins>+    auto operation = putKind.isDefine() ? operationPutByValDefinePrivateFieldOptimize : operationPutByValSetPrivateFieldOptimize;
</ins><span class="cx"> #if !ENABLE(EXTRA_CTI_THUNKS)
</span><del>-    auto bytecode = currentInstruction->as<OpPutPrivateName>();
-
-    loadGlobalObject(argumentGPR0);
-    emitGetVirtualRegister(bytecode.m_base, argumentGPR1);
-    emitGetVirtualRegister(bytecode.m_property, argumentGPR2);
-    emitGetVirtualRegister(bytecode.m_value, argumentGPR3);
-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, viableArgumentGPR4);
-    callOperation<decltype(operationPutByValDefinePrivateFieldOptimize)>(Address(viableArgumentGPR4, StructureStubInfo::offsetOfSlowOperation()), argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3, viableArgumentGPR4, TrustedImmPtr(nullptr));
</del><ins>+    // They are configured in the fast path and not clobbered.
+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg propertyGPR = regT1;
+    constexpr GPRReg valueGPR = regT2;
+    Call call = callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), baseGPR, propertyGPR, valueGPR, gen.stubInfo(), TrustedImmPtr(nullptr));
</ins><span class="cx"> #else
</span><span class="cx">     VM& vm = this->vm();
</span><span class="cx">     uint32_t bytecodeOffset = m_bytecodeIndex.offset();
</span><span class="lines">@@ -696,12 +622,18 @@
</span><span class="cx">     constexpr GPRReg bytecodeOffsetGPR = regT4;
</span><span class="cx"> 
</span><span class="cx">     move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
</span><del>-    loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
</del><ins>+    move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
</ins><span class="cx">     emitNakedNearCall(vm.getCTIStub(slow_op_put_private_name_prepareCallGenerator).retaggedCode<NoPtrTag>());
</span><ins>+
+    Call call;
+    if (JITCode::useDataIC(JITType::BaselineJIT))
+        gen.stubInfo()->m_slowOperation = operation;
+    else
+        call = appendCall(operation);
</ins><span class="cx">     emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
</span><span class="cx"> #endif // ENABLE(EXTRA_CTI_THUNKS)
</span><span class="cx"> 
</span><del>-    gen.reportSlowPathCall(coldPathBegin, Call());
</del><ins>+    gen.reportSlowPathCall(coldPathBegin, call);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(EXTRA_CTI_THUNKS)
</span><span class="lines">@@ -711,14 +643,14 @@
</span><span class="cx">     // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
</span><span class="cx">     // DFG/FTL may inline functions belonging to other globalObjects, which may not match
</span><span class="cx">     // CallFrame::codeBlock().
</span><del>-    CCallHelpers jit;
</del><ins>+    JIT jit(vm);
</ins><span class="cx"> 
</span><span class="cx">     if (!JITCode::useDataIC(JITType::BaselineJIT))
</span><span class="cx">         jit.tagReturnAddress();
</span><span class="cx"> 
</span><del>-    constexpr GPRReg baseGPR = BaselinePutByValRegisters::base;
-    constexpr GPRReg propertyGPR = BaselinePutByValRegisters::property;
-    constexpr GPRReg valueGPR = BaselinePutByValRegisters::value;
</del><ins>+    constexpr GPRReg baseGPR = regT0;
+    constexpr GPRReg propertyGPR = regT1;
+    constexpr GPRReg valueGPR = regT2;
</ins><span class="cx">     constexpr GPRReg stubInfoGPR = regT3;
</span><span class="cx">     constexpr GPRReg bytecodeOffsetGPR = regT4;
</span><span class="cx"> 
</span><span class="lines">@@ -748,8 +680,7 @@
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_base, regT0);
</span><span class="cx">     int32_t options = bytecode.m_attributes;
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_accessor, regT1);
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operationPutGetterById, regT2, regT0, m_unlinkedCodeBlock->identifier(bytecode.m_property).impl(), options, regT1);
</del><ins>+    callOperation(operationPutGetterById, TrustedImmPtr(m_codeBlock->globalObject()), regT0, m_codeBlock->identifier(bytecode.m_property).impl(), options, regT1);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_put_setter_by_id(const Instruction* currentInstruction)
</span><span class="lines">@@ -758,8 +689,7 @@
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_base, regT0);
</span><span class="cx">     int32_t options = bytecode.m_attributes;
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_accessor, regT1);
</span><del>-    loadGlobalObject(regT2);
-    callOperation(operationPutSetterById, regT2, regT0, m_unlinkedCodeBlock->identifier(bytecode.m_property).impl(), options, regT1);
</del><ins>+    callOperation(operationPutSetterById, TrustedImmPtr(m_codeBlock->globalObject()), regT0, m_codeBlock->identifier(bytecode.m_property).impl(), options, regT1);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_put_getter_setter_by_id(const Instruction* currentInstruction)
</span><span class="lines">@@ -769,8 +699,7 @@
</span><span class="cx">     int32_t attribute = bytecode.m_attributes;
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_getter, regT1);
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_setter, regT2);
</span><del>-    loadGlobalObject(regT3);
-    callOperation(operationPutGetterSetter, regT3, regT0, m_unlinkedCodeBlock->identifier(bytecode.m_property).impl(), attribute, regT1, regT2);
</del><ins>+    callOperation(operationPutGetterSetter, TrustedImmPtr(m_codeBlock->globalObject()), regT0, m_codeBlock->identifier(bytecode.m_property).impl(), attribute, regT1, regT2);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_put_getter_by_val(const Instruction* currentInstruction)
</span><span class="lines">@@ -780,8 +709,7 @@
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_property, regT1);
</span><span class="cx">     int32_t attributes = bytecode.m_attributes;
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_accessor, regT2);
</span><del>-    loadGlobalObject(regT3);
-    callOperation(operationPutGetterByVal, regT3, regT0, regT1, attributes, regT2);
</del><ins>+    callOperation(operationPutGetterByVal, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, attributes, regT2);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_put_setter_by_val(const Instruction* currentInstruction)
</span><span class="lines">@@ -791,8 +719,7 @@
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_property, regT1);
</span><span class="cx">     int32_t attributes = bytecode.m_attributes;
</span><span class="cx">     emitGetVirtualRegister(bytecode.m_accessor, regT2);
</span><del>-    loadGlobalObject(regT3);
-    callOperation(operationPutSetterByVal, regT3, regT0, regT1, attributes, regT2);
</del><ins>+    callOperation(operationPutSetterByVal, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, attributes, regT2);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JIT::emit_op_del_by_id(const Instruction* currentInstruction)
</span><span class="lines">@@ -800,33 +727,23 @@
</span><span class="cx">     auto bytecode = currentInstruction->as<OpDelById>();
</span><span class="cx">     VirtualRegister dst = bytecode.m_dst;
</span><span class="cx">     VirtualRegister base = bytecode.m_base;
</span><del>-    const Identifier* ident = &(m_unlinkedCodeBlock->identifier(bytecode.m_property));
</del><ins>+    const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
</ins><span class="cx"> 
</span><del>-    constexpr GPRReg baseGPR = BaselineDelByIdRegisters::base;
-    constexpr GPRReg resultGPR = BaselineDelByIdRegisters::result;
-    constexpr GPRReg stubInfoGPR = BaselineDelByIdRegisters::stubInfo;
-    constexpr GPRReg scratchGPR = BaselineDelByIdRegisters::scratch;
-
-    emitGetVirtualRegister(base, baseGPR);
-  &nb