WebKit Bugzilla
Attachment 349473 Details for
Bug 187373
: New bytecode format for JSC
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-187373-20180912002215.patch (text/plain), 940.42 KB, created by
Tadeu Zagallo
on 2018-09-11 15:22:18 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
Tadeu Zagallo
Created:
2018-09-11 15:22:18 PDT
Size:
940.42 KB
patch
obsolete
>Subversion Revision: 234092 >diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog >index ef79ffda4221f29db15ccadf6d983a72b0d87a86..ec2e4fef865dc31a635711d09568bbe54b4caddd 100644 >--- a/Source/JavaScriptCore/ChangeLog >+++ b/Source/JavaScriptCore/ChangeLog >@@ -1,3 +1,25 @@ >+2018-07-05 Tadeu Zagallo <tzagallo@apple.com> >+ >+ New bytecode format for JSC >+ https://bugs.webkit.org/show_bug.cgi?id=187373 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ Work in progress for the new bytecode format. For now, there's just a >+ handful of docs that I've experimenting with as to how should we >+ declare the opcodes, how should we generate the code and what the >+ generated code should look like. >+ >+ * wip_bytecode/README.md: Briefly documents the goals of for the new >+ bytecode and how it's going work. Still missing a lot of info though. >+ * wip_bytecode/bytecode_generator.rb: Some hacky ruby that I'm >+ considering using for the generating the C++ code for the opcodes >+ * wip_bytecode/bytecode_structs.cpp: Some hacky C++ experiments of >+ what could/should the API for the generated opcodes look like. >+ * wip_bytecode/opcodes.yaml: A list of all the opcodes, with names and >+ types for its arguments and metadata. No idea why it ended up being a >+ yaml file, but if all is well I'll migrate it to the ruby syntax above. >+ > 2018-07-22 Yusuke Suzuki <utatane.tea@gmail.com> > > [JSC] GetByIdVariant and InByIdVariant do not need slot base if they are not "hit" variants >diff --git a/Source/JavaScriptCore/CMakeLists.txt b/Source/JavaScriptCore/CMakeLists.txt >index 3691cf274ed190e3a2f7763bd807162f736c7bda..a39528c5a20ea6f255a18d71bc3a03533102ad82 100644 >--- a/Source/JavaScriptCore/CMakeLists.txt >+++ b/Source/JavaScriptCore/CMakeLists.txt >@@ -200,11 +200,29 @@ set(OFFLINE_ASM > offlineasm/x86.rb > ) > >+set(GENERATOR >+ generator/Argument.rb >+ generator/Assertion.rb >+ generator/DSL.rb >+ generator/Fits.rb >+ generator/GeneratedFile.rb >+ generator/Implementation.rb >+ generator/Interface.rb >+ generator/Metadata.rb >+ generator/Opcode.rb >+ generator/OpcodeGroup.rb >+ generator/Options.rb >+ generator/Section.rb >+ generator/Template.rb >+ generator/Type.rb >+ generator/main.rb >+) >+ > add_custom_command( > OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h >- MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generate-bytecode-files >- DEPENDS ${JAVASCRIPTCORE_DIR}/generate-bytecode-files bytecode/BytecodeList.json >- COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generate-bytecode-files --bytecodes_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h --init_bytecodes_asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm --bytecode_structs_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h ${JAVASCRIPTCORE_DIR}/bytecode/BytecodeList.json >+ MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generator/main.rb >+ DEPENDS ${GENERATOR} bytecode/BytecodeList.rb >+ COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generator/main.rb --bytecodes_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h --init_bytecodes_asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm --bytecode_structs_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h ${JAVASCRIPTCORE_DIR}/bytecode/BytecodeList.rb > VERBATIM) > > list(APPEND JavaScriptCore_HEADERS >diff --git a/Source/JavaScriptCore/DerivedSources.make b/Source/JavaScriptCore/DerivedSources.make >index d95cac50b5d6f567a8aeb87d5e390c8d88ff910f..1b161fe56f3ba78767772a9731aea8025e3b2055 100644 >--- a/Source/JavaScriptCore/DerivedSources.make >+++ b/Source/JavaScriptCore/DerivedSources.make >@@ -215,14 +215,8 @@ udis86_itab.h: $(JavaScriptCore)/disassembler/udis86/ud_itab.py $(JavaScriptCore > > # Bytecode files > >-Bytecodes.h: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json >- $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --bytecodes_h Bytecodes.h $(JavaScriptCore)/bytecode/BytecodeList.json >- >-BytecodeStructs.h: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json >- $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --bytecode_structs_h BytecodeStructs.h $(JavaScriptCore)/bytecode/BytecodeList.json >- >-InitBytecodes.asm: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json >- $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --init_bytecodes_asm InitBytecodes.asm $(JavaScriptCore)/bytecode/BytecodeList.json >+Bytecodes.h BytecodeStructs.h InitBytecodes.asm: $(wildcard $(JavaScriptCore)/generator/*.rb) $(JavaScriptCore)/bytecode/BytecodeList.rb >+ $(RUBY) $(JavaScriptCore)/generator/main.rb $(JavaScriptCore)/bytecode/BytecodeList.rb --bytecode_structs_h BytecodeStructs.h --init_bytecodes_asm InitBytecodes.asm --bytecodes_h Bytecodes.h > > # Inspector interfaces > >diff --git a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj >index 325df6e9eba0c8e84d216e81058c9810d5b310ca..bf9fa87e75b19d1727cb6c61d3e57d79e2415a7c 100644 >--- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj >+++ b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj >@@ -14,11 +14,23 @@ > 0F4680AA14BA7FD900BFE272 /* Generate Derived Sources */, > ); > dependencies = ( >- 65442D5018EBB744007AF92F /* PBXTargetDependency */, >+ 14CC67C4213F0402009B26F0 /* PBXTargetDependency */, > ); > name = "LLInt Offsets"; > productName = "Derived Sources"; > }; >+ 14CC67A5213ECFE2009B26F0 /* LLInt Settings */ = { >+ isa = PBXAggregateTarget; >+ buildConfigurationList = 14CC67A9213ECFE2009B26F0 /* Build configuration list for PBXAggregateTarget "LLInt Settings" */; >+ buildPhases = ( >+ 14CC67A8213ECFE2009B26F0 /* Generate Derived Sources */, >+ ); >+ dependencies = ( >+ 14CC67A6213ECFE2009B26F0 /* PBXTargetDependency */, >+ ); >+ name = "LLInt Settings"; >+ productName = "Derived Sources"; >+ }; > 53B4BD041F68AF8900D2BEA3 /* Generate Unified Sources */ = { > isa = PBXAggregateTarget; > buildConfigurationList = 53B4BD0A1F68AF8900D2BEA3 /* Build configuration list for PBXAggregateTarget "Generate Unified Sources" */; >@@ -804,6 +816,7 @@ > 14BFCE6910CDB1FC00364CCE /* WeakGCMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 14BFCE6810CDB1FC00364CCE /* WeakGCMap.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14CA958B16AB50DE00938A06 /* StaticPropertyAnalyzer.h in Headers */ = {isa = PBXBuildFile; fileRef = 14CA958A16AB50DE00938A06 /* StaticPropertyAnalyzer.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14CA958D16AB50FA00938A06 /* ObjectAllocationProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 14CA958C16AB50FA00938A06 /* ObjectAllocationProfile.h */; settings = {ATTRIBUTES = (Private, ); }; }; >+ 14CC67C2213ED58F009B26F0 /* LLIntSettingsExtractor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14CC67C0213ED588009B26F0 /* LLIntSettingsExtractor.cpp */; }; > 14D2F3DB139F4BE200491031 /* MarkedSpace.h in Headers */ = {isa = PBXBuildFile; fileRef = 14D2F3D9139F4BE200491031 /* MarkedSpace.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14DF04DA16B3996D0016A513 /* StaticPropertyAnalysis.h in Headers */ = {isa = PBXBuildFile; fileRef = 14DF04D916B3996D0016A513 /* StaticPropertyAnalysis.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14E84F9F14EE1ACC00D6D5D4 /* WeakBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = 14E84F9A14EE1ACC00D6D5D4 /* WeakBlock.h */; settings = {ATTRIBUTES = (Private, ); }; }; >@@ -1235,7 +1248,6 @@ > 969A072B0ED1CE6900F1F681 /* RegisterID.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07280ED1CE6900F1F681 /* RegisterID.h */; }; > 969A07970ED1D3AE00F1F681 /* CodeBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07910ED1D3AE00F1F681 /* CodeBlock.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 969A07980ED1D3AE00F1F681 /* DirectEvalCodeCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07920ED1D3AE00F1F681 /* DirectEvalCodeCache.h */; settings = {ATTRIBUTES = (Private, ); }; }; >- 969A07990ED1D3AE00F1F681 /* Instruction.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07930ED1D3AE00F1F681 /* Instruction.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 969A079B0ED1D3AE00F1F681 /* Opcode.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07950ED1D3AE00F1F681 /* Opcode.h */; }; > 978801411471AD920041B016 /* JSDateMath.h in Headers */ = {isa = PBXBuildFile; fileRef = 9788FC231471AD0C0068CE2D /* JSDateMath.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 981ED82328234D91BAECCADE /* MachineContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 28806E21155E478A93FA7B02 /* MachineContext.h */; settings = {ATTRIBUTES = (Private, ); }; }; >@@ -1835,6 +1847,27 @@ > remoteGlobalIDString = 0F4680A914BA7FD900BFE272; > remoteInfo = "LLInt Offsets"; > }; >+ 14CC67A7213ECFE2009B26F0 /* PBXContainerItemProxy */ = { >+ isa = PBXContainerItemProxy; >+ containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >+ proxyType = 1; >+ remoteGlobalIDString = 65FB3F6609D11E9100F49DEB; >+ remoteInfo = "Derived Sources"; >+ }; >+ 14CC67BE213ED459009B26F0 /* PBXContainerItemProxy */ = { >+ isa = PBXContainerItemProxy; >+ containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >+ proxyType = 1; >+ remoteGlobalIDString = 14CC67A5213ECFE2009B26F0; >+ remoteInfo = "LLInt Settings"; >+ }; >+ 14CC67C3213F0402009B26F0 /* PBXContainerItemProxy */ = { >+ isa = PBXContainerItemProxy; >+ containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >+ proxyType = 1; >+ remoteGlobalIDString = 14CC67B0213ED20C009B26F0; >+ remoteInfo = JSCLLIntSettingsExtractor; >+ }; > 53B4BD131F68C2AA00D2BEA3 /* PBXContainerItemProxy */ = { > isa = PBXContainerItemProxy; > containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >@@ -1884,13 +1917,6 @@ > remoteGlobalIDString = 65FB3F6609D11E9100F49DEB; > remoteInfo = "Derived Sources"; > }; >- 65442D4F18EBB744007AF92F /* PBXContainerItemProxy */ = { >- isa = PBXContainerItemProxy; >- containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >- proxyType = 1; >- remoteGlobalIDString = 65FB3F6609D11E9100F49DEB; >- remoteInfo = "Derived Sources"; >- }; > 65788A9F18B409EB00C189FF /* PBXContainerItemProxy */ = { > isa = PBXContainerItemProxy; > containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >@@ -2260,7 +2286,7 @@ > 0F37308E1C0CD68500052BFA /* DisallowMacroScratchRegisterUsage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DisallowMacroScratchRegisterUsage.h; sourceTree = "<group>"; }; > 0F3730901C0CD70C00052BFA /* AllowMacroScratchRegisterUsage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AllowMacroScratchRegisterUsage.h; sourceTree = "<group>"; }; > 0F3730921C0D67EE00052BFA /* AirUseCounts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AirUseCounts.h; path = b3/air/AirUseCounts.h; sourceTree = "<group>"; }; >- 0F38B00F17CF077F00B144D3 /* LLIntEntrypoint.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntEntrypoint.cpp; path = llint/LLIntEntrypoint.cpp; sourceTree = "<group>"; }; >+ 0F38B00F17CF077F00B144D3 /* LLIntDesiredSettings.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = LLIntDesiredSettings.h; path = LLIntOffsets/LLIntDesiredSettings.h; sourceTree = BUILT_PRODUCTS_DIR; }; > 0F38B01017CF077F00B144D3 /* LLIntEntrypoint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = LLIntEntrypoint.h; path = llint/LLIntEntrypoint.h; sourceTree = "<group>"; }; > 0F38B01317CFE75500B144D3 /* DFGCompilationKey.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGCompilationKey.cpp; path = dfg/DFGCompilationKey.cpp; sourceTree = "<group>"; }; > 0F38B01417CFE75500B144D3 /* DFGCompilationKey.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCompilationKey.h; path = dfg/DFGCompilationKey.h; sourceTree = "<group>"; }; >@@ -3133,6 +3159,7 @@ > 148A7BEE1B82975A002D9157 /* InlineCallFrame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InlineCallFrame.h; sourceTree = "<group>"; }; > 148CD1D7108CF902008163C6 /* JSContextRefPrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSContextRefPrivate.h; sourceTree = "<group>"; }; > 149559ED0DDCDDF700648087 /* DebuggerCallFrame.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DebuggerCallFrame.cpp; sourceTree = "<group>"; }; >+ 1498CAD3214656C400710879 /* libWTF.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; path = libWTF.a; sourceTree = BUILT_PRODUCTS_DIR; }; > 149B24FF0D8AF6D1009CB8C7 /* Register.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Register.h; sourceTree = "<group>"; }; > 149DAAF212EB559D0083B12B /* ConservativeRoots.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConservativeRoots.h; sourceTree = "<group>"; }; > 14A1563010966365006FA260 /* DateInstanceCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DateInstanceCache.h; sourceTree = "<group>"; }; >@@ -3163,6 +3190,10 @@ > 14AD912B1DCAAAB00014F9FE /* UnlinkedFunctionCodeBlock.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = UnlinkedFunctionCodeBlock.cpp; sourceTree = "<group>"; }; > 14B7233F12D7D0DA003BD5ED /* MachineStackMarker.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MachineStackMarker.cpp; sourceTree = "<group>"; }; > 14B7234012D7D0DA003BD5ED /* MachineStackMarker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MachineStackMarker.h; sourceTree = "<group>"; }; >+ 14BA774F211085F0008D0B05 /* Fits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Fits.h; sourceTree = "<group>"; }; >+ 14BA7750211085F0008D0B05 /* Instruction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Instruction.h; sourceTree = "<group>"; }; >+ 14BA7751211086A0008D0B05 /* BytecodeList.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = BytecodeList.rb; sourceTree = "<group>"; }; >+ 14BA7752211A8E5F008D0B05 /* ProfileTypeBytecodeFlag.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ProfileTypeBytecodeFlag.h; sourceTree = "<group>"; }; > 14BA78F013AAB88F005B7C2C /* SlotVisitor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SlotVisitor.h; sourceTree = "<group>"; }; > 14BA7A9513AADFF8005B7C2C /* Heap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Heap.cpp; sourceTree = "<group>"; }; > 14BA7A9613AADFF8005B7C2C /* Heap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Heap.h; sourceTree = "<group>"; }; >@@ -3175,6 +3206,11 @@ > 14BFCE6810CDB1FC00364CCE /* WeakGCMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WeakGCMap.h; sourceTree = "<group>"; }; > 14CA958A16AB50DE00938A06 /* StaticPropertyAnalyzer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StaticPropertyAnalyzer.h; sourceTree = "<group>"; }; > 14CA958C16AB50FA00938A06 /* ObjectAllocationProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ObjectAllocationProfile.h; sourceTree = "<group>"; }; >+ 14CC3BA0213756B0002D58B6 /* DumpValue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DumpValue.h; sourceTree = "<group>"; }; >+ 14CC3BA12138A238002D58B6 /* InstructionStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = InstructionStream.cpp; sourceTree = "<group>"; }; >+ 14CC3BA22138A238002D58B6 /* InstructionStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InstructionStream.h; sourceTree = "<group>"; }; >+ 14CC67BD213ED20C009B26F0 /* JSCLLIntSettingsExtractor */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = JSCLLIntSettingsExtractor; sourceTree = BUILT_PRODUCTS_DIR; }; >+ 14CC67C0213ED588009B26F0 /* LLIntSettingsExtractor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntSettingsExtractor.cpp; path = llint/LLIntSettingsExtractor.cpp; sourceTree = "<group>"; }; > 14D2F3D8139F4BE200491031 /* MarkedSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MarkedSpace.cpp; sourceTree = "<group>"; }; > 14D2F3D9139F4BE200491031 /* MarkedSpace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MarkedSpace.h; sourceTree = "<group>"; }; > 14D792640DAA03FB001A9F05 /* CLoopStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CLoopStack.h; sourceTree = "<group>"; }; >@@ -3542,8 +3578,6 @@ > 6511230514046A4C002B101D /* testRegExp */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = testRegExp; sourceTree = BUILT_PRODUCTS_DIR; }; > 6514F21718B3E1670098FF8B /* Bytecodes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Bytecodes.h; sourceTree = "<group>"; }; > 6514F21818B3E1670098FF8B /* InitBytecodes.asm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm.asm; path = InitBytecodes.asm; sourceTree = "<group>"; }; >- 6529FB3018B2D63900C61102 /* generate-bytecode-files */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = "generate-bytecode-files"; sourceTree = "<group>"; }; >- 6529FB3118B2D99900C61102 /* BytecodeList.json */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = BytecodeList.json; sourceTree = "<group>"; }; > 652A3A201651C66100A80AFE /* ARM64Disassembler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = ARM64Disassembler.cpp; path = disassembler/ARM64Disassembler.cpp; sourceTree = "<group>"; }; > 652A3A221651C69700A80AFE /* A64DOpcode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = A64DOpcode.cpp; path = disassembler/ARM64/A64DOpcode.cpp; sourceTree = "<group>"; }; > 652A3A231651C69700A80AFE /* A64DOpcode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = A64DOpcode.h; path = disassembler/ARM64/A64DOpcode.h; sourceTree = "<group>"; }; >@@ -3891,7 +3925,6 @@ > 969A07900ED1D3AE00F1F681 /* CodeBlock.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CodeBlock.cpp; sourceTree = "<group>"; }; > 969A07910ED1D3AE00F1F681 /* CodeBlock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeBlock.h; sourceTree = "<group>"; }; > 969A07920ED1D3AE00F1F681 /* DirectEvalCodeCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DirectEvalCodeCache.h; sourceTree = "<group>"; }; >- 969A07930ED1D3AE00F1F681 /* Instruction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Instruction.h; sourceTree = "<group>"; }; > 969A07940ED1D3AE00F1F681 /* Opcode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Opcode.cpp; sourceTree = "<group>"; }; > 969A07950ED1D3AE00F1F681 /* Opcode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Opcode.h; sourceTree = "<group>"; }; > 969A09220ED1E09C00F1F681 /* Completion.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Completion.cpp; sourceTree = "<group>"; }; >@@ -4374,8 +4407,6 @@ > ADE802961E08F1C90058DE78 /* WebAssemblyLinkErrorPrototype.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = WebAssemblyLinkErrorPrototype.cpp; path = js/WebAssemblyLinkErrorPrototype.cpp; sourceTree = "<group>"; }; > ADE802971E08F1C90058DE78 /* WebAssemblyLinkErrorPrototype.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = WebAssemblyLinkErrorPrototype.h; path = js/WebAssemblyLinkErrorPrototype.h; sourceTree = "<group>"; }; > ADE8029D1E08F2260058DE78 /* WebAssemblyLinkErrorConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = WebAssemblyLinkErrorConstructor.cpp; path = js/WebAssemblyLinkErrorConstructor.cpp; sourceTree = "<group>"; }; >- B59F89371891AD3300D5CCDC /* UnlinkedInstructionStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnlinkedInstructionStream.h; sourceTree = "<group>"; }; >- B59F89381891ADB500D5CCDC /* UnlinkedInstructionStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = UnlinkedInstructionStream.cpp; sourceTree = "<group>"; }; > BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = ToolExecutable.xcconfig; sourceTree = "<group>"; }; > BC02E9040E1839DB000F9297 /* ErrorConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorConstructor.cpp; sourceTree = "<group>"; }; > BC02E9050E1839DB000F9297 /* ErrorConstructor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ErrorConstructor.h; sourceTree = "<group>"; }; >@@ -4848,6 +4879,13 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > }; >+ 14CC67B7213ED20C009B26F0 /* Frameworks */ = { >+ isa = PBXFrameworksBuildPhase; >+ buildActionMask = 2147483647; >+ files = ( >+ ); >+ runOnlyForDeploymentPostprocessing = 0; >+ }; > 651122FC14046A4C002B101D /* Frameworks */ = { > isa = PBXFrameworksBuildPhase; > buildActionMask = 2147483647; >@@ -4907,6 +4945,7 @@ > 0F9327591C20BCBA00CF6564 /* dynbench */, > 932F5BE10822A1C700736975 /* jsc */, > 0FF922CF14F46B130041A24E /* JSCLLIntOffsetsExtractor */, >+ 14CC67BD213ED20C009B26F0 /* JSCLLIntSettingsExtractor */, > 141211200A48793C00480255 /* minidom */, > 0F6183431C45F62A0072450B /* testair */, > 14BD59BF0A3E8F9000BAF59C /* testapi */, >@@ -4931,7 +4970,6 @@ > F692A8540255597D01FF60F7 /* create_hash_table */, > 937B63CC09E766D200A671DD /* DerivedSources.make */, > 0F93275A1C20BCDF00CF6564 /* dynbench.cpp */, >- 6529FB3018B2D63900C61102 /* generate-bytecode-files */, > F5C290E60284F98E018635CA /* JavaScriptCorePrefix.h */, > 45E12D8806A49B0F00E9DF84 /* jsc.cpp */, > A7C225CC139981F100FF1662 /* KeywordLookupGenerator.py */, >@@ -4950,6 +4988,7 @@ > E3FF752D1D9CE9EA00C7E16D /* domjit */, > 0867D69AFE84028FC02AAC07 /* Frameworks */, > 0FEA09FC1705137F00BB722C /* ftl */, >+ 14BA774C211085A0008D0B05 /* generator */, > 142E312A134FF0A600AFADB5 /* heap */, > A5BA15DF1823409200A82E69 /* inspector */, > 1429D77A0ED20D7300B89619 /* interpreter */, >@@ -4979,6 +5018,7 @@ > 5D5D8AD00E0D0EBE00F9C692 /* libedit.dylib */, > 9322A00306C341D3009067BB /* libicucore.dylib */, > 51F0EC0705C86C9A00E6DF1B /* libobjc.dylib */, >+ 1498CAD3214656C400710879 /* libWTF.a */, > A8A4748D151A8306004123FF /* libWTF.a */, > 371D842C17C98B6E00ECF994 /* libz.dylib */, > A5098B031C16AA0200087797 /* Security.framework */, >@@ -4997,7 +5037,7 @@ > 0F4680CE14BBB3D100BFE272 /* LLIntData.cpp */, > 0F4680CF14BBB3D100BFE272 /* LLIntData.h */, > 5DDDF44614FEE72200B4FB4D /* LLIntDesiredOffsets.h */, >- 0F38B00F17CF077F00B144D3 /* LLIntEntrypoint.cpp */, >+ 0F38B00F17CF077F00B144D3 /* LLIntDesiredSettings.h */, > 0F38B01017CF077F00B144D3 /* LLIntEntrypoint.h */, > 0F46809D14BA7F8200BFE272 /* LLIntExceptions.cpp */, > 0F46809E14BA7F8200BFE272 /* LLIntExceptions.h */, >@@ -5005,6 +5045,7 @@ > 0F4680A114BA7F8200BFE272 /* LLIntOffsetsExtractor.cpp */, > FED287B115EC9A5700DA8161 /* LLIntOpcode.h */, > 79CFC6EF1C33B10000C768EA /* LLIntPCRanges.h */, >+ 14CC67C0213ED588009B26F0 /* LLIntSettingsExtractor.cpp */, > 0F46809F14BA7F8200BFE272 /* LLIntSlowPaths.cpp */, > 0F4680A014BA7F8200BFE272 /* LLIntSlowPaths.h */, > 0F0B839714BCF45A00885B4F /* LLIntThunks.cpp */, >@@ -5951,6 +5992,24 @@ > path = debugger; > sourceTree = "<group>"; > }; >+ 14BA774C211085A0008D0B05 /* generator */ = { >+ isa = PBXGroup; >+ children = ( >+ 14BA774D211085DE008D0B05 /* runtime */, >+ ); >+ path = generator; >+ sourceTree = "<group>"; >+ }; >+ 14BA774D211085DE008D0B05 /* runtime */ = { >+ isa = PBXGroup; >+ children = ( >+ 14CC3BA0213756B0002D58B6 /* DumpValue.h */, >+ 14BA774F211085F0008D0B05 /* Fits.h */, >+ 14BA7750211085F0008D0B05 /* Instruction.h */, >+ ); >+ path = runtime; >+ sourceTree = "<group>"; >+ }; > 1C90513E0BA9E8830081E9D0 /* Configurations */ = { > isa = PBXGroup; > children = ( >@@ -6357,6 +6416,7 @@ > 969A07270ED1CE6900F1F681 /* Label.h */, > 960097A50EBABB58007A7297 /* LabelScope.h */, > 655EB29A10CE2581001A990E /* NodesCodegen.cpp */, >+ 14BA7752211A8E5F008D0B05 /* ProfileTypeBytecodeFlag.h */, > 969A07280ED1CE6900F1F681 /* RegisterID.h */, > 14DF04D916B3996D0016A513 /* StaticPropertyAnalysis.h */, > 14CA958A16AB50DE00938A06 /* StaticPropertyAnalyzer.h */, >@@ -7580,7 +7640,7 @@ > 7094C4DC1AE439530041A2EE /* BytecodeIntrinsicRegistry.cpp */, > 7094C4DD1AE439530041A2EE /* BytecodeIntrinsicRegistry.h */, > 0F2DD80A1AB3D85800BBB8E8 /* BytecodeKills.h */, >- 6529FB3118B2D99900C61102 /* BytecodeList.json */, >+ 14BA7751211086A0008D0B05 /* BytecodeList.rb */, > C2FCAE0E17A9C24E0034C735 /* BytecodeLivenessAnalysis.cpp */, > C2FCAE0F17A9C24E0034C735 /* BytecodeLivenessAnalysis.h */, > 0F666EBE183566F900D017F1 /* BytecodeLivenessAnalysisInlines.h */, >@@ -7667,7 +7727,8 @@ > 0FB399BB20AF6B2A0017E213 /* InstanceOfStatus.h */, > 0FB399BC20AF6B2A0017E213 /* InstanceOfVariant.cpp */, > 0FB399B920AF6B2A0017E213 /* InstanceOfVariant.h */, >- 969A07930ED1D3AE00F1F681 /* Instruction.h */, >+ 14CC3BA12138A238002D58B6 /* InstructionStream.cpp */, >+ 14CC3BA22138A238002D58B6 /* InstructionStream.h */, > 53F6BF6C1C3F060A00F41E5D /* InternalFunctionAllocationProfile.h */, > BCFD8C900EEB2EE700283848 /* JumpTable.cpp */, > BCFD8C910EEB2EE700283848 /* JumpTable.h */, >@@ -7746,8 +7807,6 @@ > 14AD91211DCA9FA40014F9FE /* UnlinkedFunctionExecutable.h */, > 14142E501B796ECE00F4BF4B /* UnlinkedFunctionExecutable.h */, > 14AD911C1DCA9FA40014F9FE /* UnlinkedGlobalCodeBlock.h */, >- B59F89381891ADB500D5CCDC /* UnlinkedInstructionStream.cpp */, >- B59F89371891AD3300D5CCDC /* UnlinkedInstructionStream.h */, > 14AD912A1DCAAAB00014F9FE /* UnlinkedModuleProgramCodeBlock.cpp */, > 14AD911F1DCA9FA40014F9FE /* UnlinkedModuleProgramCodeBlock.h */, > 14AD91291DCAAAB00014F9FE /* UnlinkedProgramCodeBlock.cpp */, >@@ -8387,7 +8446,6 @@ > 53D444DC1DAF08AB00B92784 /* B3WasmAddressValue.h in Headers */, > 5341FC721DAC343C00E7E4D7 /* B3WasmBoundsCheckValue.h in Headers */, > 0F2C63B21E60AE4700C13839 /* B3Width.h in Headers */, >- 0F44A7B220BF68CE0022B171 /* ICStatusMap.h in Headers */, > 52678F8F1A031009006A306D /* BasicBlockLocation.h in Headers */, > 147B83AC0E6DB8C9004775A4 /* BatchedTransitionOptimizer.h in Headers */, > 86976E5F1FA3E8BC00E7C4E1 /* BigIntConstructor.h in Headers */, >@@ -8608,7 +8666,6 @@ > 86EC9DC61328DF82002B2AD7 /* DFGGenerationInfo.h in Headers */, > 86EC9DC81328DF82002B2AD7 /* DFGGraph.h in Headers */, > 0F2FCCFA18A60070001A27F8 /* DFGGraphSafepoint.h in Headers */, >- 0F44A7B120BF68C90022B171 /* ExitingInlineKind.h in Headers */, > 0FB17661196B8F9E0091052A /* DFGHeapLocation.h in Headers */, > 0FC841691BA8C3210061837D /* DFGInferredTypeCheck.h in Headers */, > 0FB14E211812570B009B6B4D /* DFGInlineCacheWrapper.h in Headers */, >@@ -8752,6 +8809,8 @@ > 14142E531B796EDD00F4BF4B /* ExecutableInfo.h in Headers */, > 0F60FE901FFC37020003320A /* ExecutableToCodeBlockEdge.h in Headers */, > 0F56A1D315000F35002992B1 /* ExecutionCounter.h in Headers */, >+ 0F44A7B020BF68620022B171 /* ExitFlag.h in Headers */, >+ 0F44A7B120BF68C90022B171 /* ExitingInlineKind.h in Headers */, > 0F3AC754188E5EC80032029F /* ExitingJITType.h in Headers */, > 0FB105861675481200F8AB6E /* ExitKind.h in Headers */, > 0F0B83AB14BCF5BB00885B4F /* ExpressionRangeInfo.h in Headers */, >@@ -8759,7 +8818,6 @@ > A7A8AF3817ADB5F3005AB174 /* Float32Array.h in Headers */, > A7A8AF3917ADB5F3005AB174 /* Float64Array.h in Headers */, > 0F24E54317EA9F5900ABB217 /* FPRInfo.h in Headers */, >- 0F44A7B320BF68D10022B171 /* RecordedStatuses.h in Headers */, > E34EDBF71DB5FFC900DC87A5 /* FrameTracers.h in Headers */, > 0F5513A61D5A682C00C32BD8 /* FreeList.h in Headers */, > 0F6585E11EE0805A0095176D /* FreeListInlines.h in Headers */, >@@ -8899,6 +8957,7 @@ > FE1BD0251E72053800134BC9 /* HeapVerifier.h in Headers */, > 0F4680D514BBD24B00BFE272 /* HostCallReturnValue.h in Headers */, > DC2143071CA32E55000A8869 /* ICStats.h in Headers */, >+ 0F44A7B220BF68CE0022B171 /* ICStatusMap.h in Headers */, > 0FB399BE20AF6B3D0017E213 /* ICStatusUtils.h in Headers */, > BC18C40F0E16F5CD00B34460 /* Identifier.h in Headers */, > 8606DDEA18DA44AB00A383D0 /* IdentifierInlines.h in Headers */, >@@ -8949,7 +9008,6 @@ > 0F49E9AA20AB4D00001CA0AA /* InstanceOfAccessCase.h in Headers */, > 0FB399BF20AF6B3F0017E213 /* InstanceOfStatus.h in Headers */, > 0FB399C020AF6B430017E213 /* InstanceOfVariant.h in Headers */, >- 969A07990ED1D3AE00F1F681 /* Instruction.h in Headers */, > A7A8AF3B17ADB5F3005AB174 /* Int16Array.h in Headers */, > A7A8AF3C17ADB5F3005AB174 /* Int32Array.h in Headers */, > A7A8AF3A17ADB5F3005AB174 /* Int8Array.h in Headers */, >@@ -9109,7 +9167,6 @@ > 7013CA8C1B491A9400CAE613 /* JSJob.h in Headers */, > BC18C4160E16F5CD00B34460 /* JSLexicalEnvironment.h in Headers */, > BC18C4230E16F5CD00B34460 /* JSLock.h in Headers */, >- 0F44A7B020BF68620022B171 /* ExitFlag.h in Headers */, > C25D709C16DE99F400FCA6BC /* JSManagedValue.h in Headers */, > 2A4BB7F318A41179008A0FCD /* JSManagedValueInternal.h in Headers */, > A700874217CBE8EB00C3E643 /* JSMap.h in Headers */, >@@ -9366,6 +9423,7 @@ > 0F0CD4C215F1A6070032F1C0 /* PutDirectIndexMode.h in Headers */, > 0F9FC8C514E1B60400D52AE0 /* PutKind.h in Headers */, > 147B84630E6DE6B1004775A4 /* PutPropertySlot.h in Headers */, >+ 0F44A7B320BF68D10022B171 /* RecordedStatuses.h in Headers */, > 0FF60AC216740F8300029779 /* ReduceWhitespace.h in Headers */, > E33637A61B63220200EE0840 /* ReflectObject.h in Headers */, > 996B73231BDA08EF00331B84 /* ReflectObject.lut.h in Headers */, >@@ -9425,7 +9483,6 @@ > A7299DA217D12848005F5FF9 /* SetPrototype.h in Headers */, > 0FEE98411A8865B700754E93 /* SetupVarargsFrame.h in Headers */, > DC17E8181C9C91D9008A6AB3 /* ShadowChicken.h in Headers */, >- 0F44A7B420BF68D90022B171 /* TerminatedCodeOrigin.h in Headers */, > DC17E8191C9C91DB008A6AB3 /* ShadowChickenInlines.h in Headers */, > FE3022D31E3D73A500BAC493 /* SigillCrashAnalyzer.h in Headers */, > 0F4D8C781FCA3CFA001D32AC /* SimpleMarkingConstraint.h in Headers */, >@@ -9480,6 +9537,7 @@ > 0F766D3915AE4A1F008F363E /* StructureStubClearingWatchpoint.h in Headers */, > BCCF0D080EF0AAB900413C8F /* StructureStubInfo.h in Headers */, > BC9041480EB9250900FE26FA /* StructureTransitionTable.h in Headers */, >+ 0F44767020C5E2B4008B2C36 /* StubInfoSummary.h in Headers */, > 0F7DF1371E2970E10095951B /* Subspace.h in Headers */, > 0F7DF1381E2970E40095951B /* SubspaceInlines.h in Headers */, > 0F4A38FA1C8E13DF00190318 /* SuperSampler.h in Headers */, >@@ -9498,6 +9556,7 @@ > DC7997831CDE9FA0004D4A09 /* TagRegistersMode.h in Headers */, > 70ECA6091AFDBEA200449739 /* TemplateObjectDescriptor.h in Headers */, > 0F24E54F17EE274900ABB217 /* TempRegisterSet.h in Headers */, >+ 0F44A7B420BF68D90022B171 /* TerminatedCodeOrigin.h in Headers */, > 0FA2C17C17D7CF84009D015F /* TestRunnerUtils.h in Headers */, > FE3422121D6B81C30032BE88 /* ThrowScope.h in Headers */, > 0F572D4F16879FDD00E57FBD /* ThunkGenerator.h in Headers */, >@@ -9581,7 +9640,6 @@ > AD5B416F1EBAFB77008EFA43 /* WasmName.h in Headers */, > AD7B4B2E1FA3E29800C9DF79 /* WasmNameSection.h in Headers */, > ADD8FA461EB3079700DF542F /* WasmNameSectionParser.h in Headers */, >- 0F44767020C5E2B4008B2C36 /* StubInfoSummary.h in Headers */, > 5311BD4B1EA581E500525281 /* WasmOMGPlan.h in Headers */, > 53C6FEEF1E8ADFA900B18425 /* WasmOpcodeOrigin.h in Headers */, > 53B4BD121F68B32500D2BEA3 /* WasmOps.h in Headers */, >@@ -9768,6 +9826,24 @@ > productReference = 14BD59BF0A3E8F9000BAF59C /* testapi */; > productType = "com.apple.product-type.tool"; > }; >+ 14CC67B0213ED20C009B26F0 /* JSCLLIntSettingsExtractor */ = { >+ isa = PBXNativeTarget; >+ buildConfigurationList = 14CC67B8213ED20C009B26F0 /* Build configuration list for PBXNativeTarget "JSCLLIntSettingsExtractor" */; >+ buildPhases = ( >+ 14CC67B5213ED20C009B26F0 /* Sources */, >+ 14CC67B7213ED20C009B26F0 /* Frameworks */, >+ ); >+ buildRules = ( >+ ); >+ dependencies = ( >+ 14CC67BF213ED459009B26F0 /* PBXTargetDependency */, >+ ); >+ name = JSCLLIntSettingsExtractor; >+ productInstallPath = /usr/local/bin; >+ productName = jsc; >+ productReference = 14CC67BD213ED20C009B26F0 /* JSCLLIntSettingsExtractor */; >+ productType = "com.apple.product-type.tool"; >+ }; > 651122F714046A4C002B101D /* testRegExp */ = { > isa = PBXNativeTarget; > buildConfigurationList = 6511230014046A4C002B101D /* Build configuration list for PBXNativeTarget "testRegExp" */; >@@ -9888,10 +9964,12 @@ > projectDirPath = ""; > projectRoot = ""; > targets = ( >- 932F5BE30822A1C700736975 /* All */, > 932F5B3E0822A1C700736975 /* JavaScriptCore */, >- 0F4680A914BA7FD900BFE272 /* LLInt Offsets */, >+ 932F5BE30822A1C700736975 /* All */, > 65FB3F6609D11E9100F49DEB /* Derived Sources */, >+ 14CC67A5213ECFE2009B26F0 /* LLInt Settings */, >+ 14CC67B0213ED20C009B26F0 /* JSCLLIntSettingsExtractor */, >+ 0F4680A914BA7FD900BFE272 /* LLInt Offsets */, > 0FF922C314F46B130041A24E /* JSCLLIntOffsetsExtractor */, > 65788A9D18B409EB00C189FF /* Offline Assembler */, > 53B4BD041F68AF8900D2BEA3 /* Generate Unified Sources */, >@@ -9933,7 +10011,21 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > shellPath = /bin/sh; >- shellScript = "set -e\n\nmkdir -p \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/\"\n\n/usr/bin/env ruby \"${SRCROOT}/offlineasm/generate_offset_extractor.rb\" \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" \"${SRCROOT}/llint/LowLevelInterpreter.asm\" \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/LLIntDesiredOffsets.h\" \"${ARCHS} C_LOOP\"\n"; >+ shellScript = "set -e\n\nmkdir -p \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/\"\n\n/usr/bin/env ruby \"${SRCROOT}/offlineasm/generate_offset_extractor.rb\" \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" \"${SRCROOT}/llint/LowLevelInterpreter.asm\" \"${BUILT_PRODUCTS_DIR}/JSCLLIntSettingsExtractor\" \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/LLIntDesiredOffsets.h\" \"${ARCHS} C_LOOP\"\n"; >+ }; >+ 14CC67A8213ECFE2009B26F0 /* Generate Derived Sources */ = { >+ isa = PBXShellScriptBuildPhase; >+ buildActionMask = 2147483647; >+ files = ( >+ ); >+ inputPaths = ( >+ ); >+ name = "Generate Derived Sources"; >+ outputPaths = ( >+ ); >+ runOnlyForDeploymentPostprocessing = 0; >+ shellPath = /bin/sh; >+ shellScript = "set -e\n\nmkdir -p \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/\"\n\n/usr/bin/env ruby \"${SRCROOT}/offlineasm/generate_settings_extractor.rb\" \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" \"${SRCROOT}/llint/LowLevelInterpreter.asm\" \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/LLIntDesiredSettings.h\" \"${ARCHS} C_LOOP\"\n"; > }; > 1A02D9A81B34A882000D1522 /* Add Symlink in /System/Library/PrivateFrameworks */ = { > isa = PBXShellScriptBuildPhase; >@@ -9982,7 +10074,7 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > shellPath = /bin/sh; >- shellScript = "exec ${SRCROOT}/postprocess-headers.sh"; >+ shellScript = "exec ${SRCROOT}/postprocess-headers.sh\n"; > }; > 374F95C9205F9975002BF68F /* Make libWTF.a Symbolic Link */ = { > isa = PBXShellScriptBuildPhase; >@@ -10103,7 +10195,7 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > shellPath = /bin/sh; >- shellScript = "if [[ \"${ACTION}\" == \"installhdrs\" ]]; then\n exit 0\nfi\n\ncd \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"\n\n/usr/bin/env ruby JavaScriptCore/offlineasm/asm.rb \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" JavaScriptCore/llint/LowLevelInterpreter.asm \"${BUILT_PRODUCTS_DIR}/JSCLLIntOffsetsExtractor\" LLIntAssembly.h || exit 1"; >+ shellScript = "if [[ \"${ACTION}\" == \"installhdrs\" ]]; then\n exit 0\nfi\n\ncd \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"\n\n/usr/bin/env ruby JavaScriptCore/offlineasm/asm.rb \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" JavaScriptCore/llint/LowLevelInterpreter.asm \"${BUILT_PRODUCTS_DIR}/JSCLLIntOffsetsExtractor\" LLIntAssembly.h || exit 1\n"; > }; > 65FB3F6509D11E9100F49DEB /* Generate Derived Sources */ = { > isa = PBXShellScriptBuildPhase; >@@ -10206,6 +10298,14 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > }; >+ 14CC67B5213ED20C009B26F0 /* Sources */ = { >+ isa = PBXSourcesBuildPhase; >+ buildActionMask = 2147483647; >+ files = ( >+ 14CC67C2213ED58F009B26F0 /* LLIntSettingsExtractor.cpp in Sources */, >+ ); >+ runOnlyForDeploymentPostprocessing = 0; >+ }; > 651122FA14046A4C002B101D /* Sources */ = { > isa = PBXSourcesBuildPhase; > buildActionMask = 2147483647; >@@ -10426,6 +10526,21 @@ > target = 0F4680A914BA7FD900BFE272 /* LLInt Offsets */; > targetProxy = 0FF922D214F46B2F0041A24E /* PBXContainerItemProxy */; > }; >+ 14CC67A6213ECFE2009B26F0 /* PBXTargetDependency */ = { >+ isa = PBXTargetDependency; >+ target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; >+ targetProxy = 14CC67A7213ECFE2009B26F0 /* PBXContainerItemProxy */; >+ }; >+ 14CC67BF213ED459009B26F0 /* PBXTargetDependency */ = { >+ isa = PBXTargetDependency; >+ target = 14CC67A5213ECFE2009B26F0 /* LLInt Settings */; >+ targetProxy = 14CC67BE213ED459009B26F0 /* PBXContainerItemProxy */; >+ }; >+ 14CC67C4213F0402009B26F0 /* PBXTargetDependency */ = { >+ isa = PBXTargetDependency; >+ target = 14CC67B0213ED20C009B26F0 /* JSCLLIntSettingsExtractor */; >+ targetProxy = 14CC67C3213F0402009B26F0 /* PBXContainerItemProxy */; >+ }; > 53B4BD141F68C2AA00D2BEA3 /* PBXTargetDependency */ = { > isa = PBXTargetDependency; > target = 53B4BD041F68AF8900D2BEA3 /* Generate Unified Sources */; >@@ -10461,11 +10576,6 @@ > target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; > targetProxy = 65244BD218ECB5000010B708 /* PBXContainerItemProxy */; > }; >- 65442D5018EBB744007AF92F /* PBXTargetDependency */ = { >- isa = PBXTargetDependency; >- target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; >- targetProxy = 65442D4F18EBB744007AF92F /* PBXContainerItemProxy */; >- }; > 65788A9E18B409EB00C189FF /* PBXTargetDependency */ = { > isa = PBXTargetDependency; > target = 0FF922C314F46B130041A24E /* JSCLLIntOffsetsExtractor */; >@@ -10796,6 +10906,86 @@ > }; > name = Production; > }; >+ 14CC67AA213ECFE2009B26F0 /* Debug */ = { >+ isa = XCBuildConfiguration; >+ buildSettings = { >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Debug; >+ }; >+ 14CC67AB213ECFE2009B26F0 /* Release */ = { >+ isa = XCBuildConfiguration; >+ buildSettings = { >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Release; >+ }; >+ 14CC67AC213ECFE2009B26F0 /* Profiling */ = { >+ isa = XCBuildConfiguration; >+ buildSettings = { >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Profiling; >+ }; >+ 14CC67AD213ECFE2009B26F0 /* Production */ = { >+ isa = XCBuildConfiguration; >+ buildSettings = { >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Production; >+ }; >+ 14CC67B9213ED20C009B26F0 /* Debug */ = { >+ isa = XCBuildConfiguration; >+ baseConfigurationReference = BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */; >+ buildSettings = { >+ HEADER_SEARCH_PATHS = ( >+ "\"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"", >+ "\"$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders\"", >+ "$(inherited)", >+ ); >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Debug; >+ }; >+ 14CC67BA213ED20C009B26F0 /* Release */ = { >+ isa = XCBuildConfiguration; >+ baseConfigurationReference = BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */; >+ buildSettings = { >+ HEADER_SEARCH_PATHS = ( >+ "\"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"", >+ "\"$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders\"", >+ "$(inherited)", >+ ); >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Release; >+ }; >+ 14CC67BB213ED20C009B26F0 /* Profiling */ = { >+ isa = XCBuildConfiguration; >+ baseConfigurationReference = BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */; >+ buildSettings = { >+ HEADER_SEARCH_PATHS = ( >+ "\"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"", >+ "\"$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders\"", >+ "$(inherited)", >+ ); >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Profiling; >+ }; >+ 14CC67BC213ED20C009B26F0 /* Production */ = { >+ isa = XCBuildConfiguration; >+ baseConfigurationReference = BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */; >+ buildSettings = { >+ HEADER_SEARCH_PATHS = ( >+ "\"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"", >+ "\"$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders\"", >+ "$(inherited)", >+ ); >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Production; >+ }; > 53B4BD0B1F68AF8900D2BEA3 /* Debug */ = { > isa = XCBuildConfiguration; > buildSettings = { >@@ -11179,6 +11369,28 @@ > defaultConfigurationIsVisible = 0; > defaultConfigurationName = Production; > }; >+ 14CC67A9213ECFE2009B26F0 /* Build configuration list for PBXAggregateTarget "LLInt Settings" */ = { >+ isa = XCConfigurationList; >+ buildConfigurations = ( >+ 14CC67AA213ECFE2009B26F0 /* Debug */, >+ 14CC67AB213ECFE2009B26F0 /* Release */, >+ 14CC67AC213ECFE2009B26F0 /* Profiling */, >+ 14CC67AD213ECFE2009B26F0 /* Production */, >+ ); >+ defaultConfigurationIsVisible = 0; >+ defaultConfigurationName = Production; >+ }; >+ 14CC67B8213ED20C009B26F0 /* Build configuration list for PBXNativeTarget "JSCLLIntSettingsExtractor" */ = { >+ isa = XCConfigurationList; >+ buildConfigurations = ( >+ 14CC67B9213ED20C009B26F0 /* Debug */, >+ 14CC67BA213ED20C009B26F0 /* Release */, >+ 14CC67BB213ED20C009B26F0 /* Profiling */, >+ 14CC67BC213ED20C009B26F0 /* Production */, >+ ); >+ defaultConfigurationIsVisible = 0; >+ defaultConfigurationName = Production; >+ }; > 53B4BD0A1F68AF8900D2BEA3 /* Build configuration list for PBXAggregateTarget "Generate Unified Sources" */ = { > isa = XCConfigurationList; > buildConfigurations = ( >diff --git a/Source/JavaScriptCore/Sources.txt b/Source/JavaScriptCore/Sources.txt >index 76dfee251b23da282f380d522ebe2a60c4cc81eb..7f76ca9df33344535f412d212beb035e85c2f652 100644 >--- a/Source/JavaScriptCore/Sources.txt >+++ b/Source/JavaScriptCore/Sources.txt >@@ -231,6 +231,7 @@ bytecode/InlineCallFrameSet.cpp > bytecode/InstanceOfAccessCase.cpp > bytecode/InstanceOfStatus.cpp > bytecode/InstanceOfVariant.cpp >+bytecode/InstructionStream.cpp > bytecode/IntrinsicGetterAccessCase.cpp > bytecode/JumpTable.cpp > bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp >@@ -266,7 +267,6 @@ bytecode/UnlinkedCodeBlock.cpp > bytecode/UnlinkedEvalCodeBlock.cpp > bytecode/UnlinkedFunctionCodeBlock.cpp > bytecode/UnlinkedFunctionExecutable.cpp >-bytecode/UnlinkedInstructionStream.cpp > bytecode/UnlinkedModuleProgramCodeBlock.cpp > bytecode/UnlinkedProgramCodeBlock.cpp > bytecode/ValueRecovery.cpp >diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h >index fac8b86bc1bc92645eb03e718890e8180cb11c11..d1caee463647541c16659abe569a3c515f112040 100644 >--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h >+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h >@@ -176,7 +176,7 @@ class ReturnAddressPtr { > public: > ReturnAddressPtr() { } > >- explicit ReturnAddressPtr(void* value) >+ explicit ReturnAddressPtr(const void* value) > : m_value(value) > { > PoisonedMasmPtr::assertIsNotPoisoned(m_value); >@@ -191,7 +191,7 @@ public: > ASSERT_VALID_CODE_POINTER(m_value); > } > >- void* value() const >+ const void* value() const > { > PoisonedMasmPtr::assertIsNotPoisoned(m_value); > return m_value; >@@ -203,7 +203,7 @@ public: > } > > private: >- void* m_value { nullptr }; >+ const void* m_value { nullptr }; > }; > > // MacroAssemblerCodePtr: >diff --git a/Source/JavaScriptCore/bytecode/ArithProfile.h b/Source/JavaScriptCore/bytecode/ArithProfile.h >index 40fad1be3baa43486e98a03cd04d554876ed4dc7..011f8b9b225131893b72b173432f7a3701fa2ce5 100644 >--- a/Source/JavaScriptCore/bytecode/ArithProfile.h >+++ b/Source/JavaScriptCore/bytecode/ArithProfile.h >@@ -102,6 +102,11 @@ public: > ASSERT(lhsObservedType().isEmpty()); > ASSERT(rhsObservedType().isEmpty()); > } >+ >+ ArithProfile(OperandTypes types) >+ : ArithProfile(types.first(), types.second()) >+ { } >+ > ArithProfile() = default; > > static ArithProfile fromInt(uint32_t bits) >diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h >index c233deb7a24114553257a9ab8fc874e1e8709a6f..fef936e257888ed22ea20f8b18f9d9c7b13265f3 100644 >--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h >+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h >@@ -32,6 +32,13 @@ namespace JSC { > > class ArrayAllocationProfile { > public: >+ ArrayAllocationProfile() = default; >+ >+ ArrayAllocationProfile(IndexingType recommendedIndexingMode) >+ { >+ initializeIndexingMode(recommendedIndexingMode); >+ } >+ > IndexingType selectIndexingType() > { > JSArray* lastArray = m_lastArray; >diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp >index a4397b5441f1c2acc6b62a6f0271a2292847e365..b29ad35afa2a4fd050ab2e3e9a6e2f0553a64848 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp >@@ -39,7 +39,7 @@ void BytecodeBasicBlock::shrinkToFit() > m_successors.shrinkToFit(); > } > >-static bool isJumpTarget(OpcodeID opcodeID, const Vector<unsigned, 32>& jumpTargets, unsigned bytecodeOffset) >+static bool isJumpTarget(OpcodeID opcodeID, const Vector<InstructionStream::Offset, 32>& jumpTargets, unsigned bytecodeOffset) > { > if (opcodeID == op_catch) > return true; >@@ -47,11 +47,11 @@ static bool isJumpTarget(OpcodeID opcodeID, const Vector<unsigned, 32>& jumpTarg > return std::binary_search(jumpTargets.begin(), jumpTargets.end(), bytecodeOffset); > } > >-template<typename Block, typename Instruction> >-void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) >+template<typename Block> >+void BytecodeBasicBlock::computeImpl(Block* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) > { >- Vector<unsigned, 32> jumpTargets; >- computePreciseJumpTargets(codeBlock, instructionsBegin, instructionCount, jumpTargets); >+ Vector<InstructionStream::Offset, 32> jumpTargets; >+ computePreciseJumpTargets(codeBlock, instructions, jumpTargets); > > auto appendBlock = [&] (std::unique_ptr<BytecodeBasicBlock>&& block) { > block->m_index = basicBlocks.size(); >@@ -66,7 +66,7 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > basicBlocks.reserveCapacity(jumpTargets.size() + 2); > > auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); >- auto firstBlock = std::make_unique<BytecodeBasicBlock>(0, 0); >+ auto firstBlock = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); > linkBlocks(entry.get(), firstBlock.get()); > > appendBlock(WTFMove(entry)); >@@ -77,19 +77,18 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > > bool nextInstructionIsLeader = false; > >- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- unsigned opcodeLength = opcodeLengths[opcodeID]; >+ for (const auto& instruction : instructions) { >+ auto bytecodeOffset = instruction.offset(); >+ OpcodeID opcodeID = instruction->opcodeID(); > > bool createdBlock = false; > // If the current bytecode is a jump target, then it's the leader of its own basic block. > if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) { >- auto newBlock = std::make_unique<BytecodeBasicBlock>(bytecodeOffset, opcodeLength); >+ auto newBlock = std::make_unique<BytecodeBasicBlock>(instruction); > current = newBlock.get(); > appendBlock(WTFMove(newBlock)); > createdBlock = true; > nextInstructionIsLeader = false; >- bytecodeOffset += opcodeLength; > } > > // If the current bytecode is a branch or a return, then the next instruction is the leader of its own basic block. >@@ -100,8 +99,7 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > continue; > > // Otherwise, just add to the length of the current block. >- current->addLength(opcodeLength); >- bytecodeOffset += opcodeLength; >+ current->addLength(instruction->size()); > } > > // Link basic blocks together. >@@ -111,24 +109,27 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > if (block->isEntryBlock() || block->isExitBlock()) > continue; > >- bool fallsThrough = true; >- for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- unsigned opcodeLength = opcodeLengths[opcodeID]; >+ bool fallsThrough = true; >+ for (auto bytecodeOffset : block->offsets()) { >+ auto instruction = instructions.at(bytecodeOffset); >+ OpcodeID opcodeID = instruction->opcodeID(); >+ > // If we found a terminal bytecode, link to the exit block. > if (isTerminal(opcodeID)) { >- ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >+ // TODO >+ // ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); > linkBlocks(block, exit.get()); > fallsThrough = false; > break; > } > >- // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. >+ // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. > // If there isn't one, treat this throw as a terminal. This is true even if we have a finally > // block because the finally block will create its own catch, which will generate a HandlerInfo. > if (isThrow(opcodeID)) { >- ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >- auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset); >+ // TODO >+ // ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >+ auto* handler = codeBlock->handlerForBytecodeOffset(instruction.offset()); > fallsThrough = false; > if (!handler) { > linkBlocks(block, exit.get()); >@@ -146,9 +147,10 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > > // If we found a branch, link to the block(s) that we jump to. > if (isBranch(opcodeID)) { >- ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >- Vector<unsigned, 1> bytecodeOffsetsJumpedTo; >- findJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, bytecodeOffsetsJumpedTo); >+ // TODO >+ // ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >+ Vector<InstructionStream::Offset, 1> bytecodeOffsetsJumpedTo; >+ findJumpTargetsForInstruction(codeBlock, instruction, bytecodeOffsetsJumpedTo); > > size_t numberOfJumpTargets = bytecodeOffsetsJumpedTo.size(); > ASSERT(numberOfJumpTargets); >@@ -172,7 +174,6 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > > break; > } >- bytecodeOffset += opcodeLength; > } > > // If we fall through then link to the next block in program order. >@@ -184,19 +185,19 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > } > > appendBlock(WTFMove(exit)); >- >+ > for (auto& basicBlock : basicBlocks) > basicBlock->shrinkToFit(); > } > >-void BytecodeBasicBlock::compute(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) >+void BytecodeBasicBlock::compute(CodeBlock* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) > { >- computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks); >+ computeImpl(codeBlock, instructions, basicBlocks); > } > >-void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) >+void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) > { >- BytecodeBasicBlock::computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks); >+ computeImpl(codeBlock, instructions, basicBlocks); > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h >index fb81650ca1f6516e9b61bb0f782f2c23b66b8be9..3697934ca230fe2957cd134a1d5d6a858edb9225 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h >@@ -25,6 +25,7 @@ > > #pragma once > >+#include "InstructionStream.h" > #include <limits.h> > #include <wtf/FastBitVector.h> > #include <wtf/Vector.h> >@@ -34,23 +35,22 @@ namespace JSC { > class CodeBlock; > class UnlinkedCodeBlock; > struct Instruction; >-struct UnlinkedInstruction; > > class BytecodeBasicBlock { > WTF_MAKE_FAST_ALLOCATED; > public: > enum SpecialBlockType { EntryBlock, ExitBlock }; >- BytecodeBasicBlock(unsigned start, unsigned length); >+ BytecodeBasicBlock(const InstructionStream::Ref&); > BytecodeBasicBlock(SpecialBlockType); > void shrinkToFit(); > > bool isEntryBlock() { return !m_leaderOffset && !m_totalLength; } > bool isExitBlock() { return m_leaderOffset == UINT_MAX && m_totalLength == UINT_MAX; } > >- unsigned leaderOffset() { return m_leaderOffset; } >- unsigned totalLength() { return m_totalLength; } >+ unsigned leaderOffset() const { return m_leaderOffset; } >+ unsigned totalLength() const { return m_totalLength; } > >- const Vector<unsigned>& offsets() const { return m_offsets; } >+ const Vector<InstructionStream::Offset>& offsets() const { return m_offsets; } > > const Vector<BytecodeBasicBlock*>& successors() const { return m_successors; } > >@@ -59,30 +59,30 @@ public: > > unsigned index() const { return m_index; } > >- static void compute(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>&); >- static void compute(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>&); >+ static void compute(CodeBlock*, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>&); >+ static void compute(UnlinkedCodeBlock*, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>&); > > private: >- template<typename Block, typename Instruction> static void computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks); >+ template<typename Block> static void computeImpl(Block* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks); > > void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); } > >- void addLength(unsigned); >+ void addLength(size_t); > >- unsigned m_leaderOffset; >- unsigned m_totalLength; >+ InstructionStream::Offset m_leaderOffset; >+ size_t m_totalLength; > unsigned m_index; > >- Vector<unsigned> m_offsets; >+ Vector<InstructionStream::Offset> m_offsets; > Vector<BytecodeBasicBlock*> m_successors; > > FastBitVector m_in; > FastBitVector m_out; > }; > >-inline BytecodeBasicBlock::BytecodeBasicBlock(unsigned start, unsigned length) >- : m_leaderOffset(start) >- , m_totalLength(length) >+inline BytecodeBasicBlock::BytecodeBasicBlock(const InstructionStream::Ref& instruction) >+ : m_leaderOffset(instruction.offset()) >+ , m_totalLength(instruction->size()) > { > m_offsets.append(m_leaderOffset); > } >@@ -93,7 +93,7 @@ inline BytecodeBasicBlock::BytecodeBasicBlock(BytecodeBasicBlock::SpecialBlockTy > { > } > >-inline void BytecodeBasicBlock::addLength(unsigned bytecodeLength) >+inline void BytecodeBasicBlock::addLength(size_t bytecodeLength) > { > m_offsets.append(m_leaderOffset + m_totalLength); > m_totalLength += bytecodeLength; >diff --git a/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp b/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >index 1eddbf361f94c865a80124848b9a3235b96c1bab..a7ce3ebf025cd7ae5b89a1b6d8b5f176db840f81 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >@@ -28,6 +28,7 @@ > #include "BytecodeDumper.h" > > #include "ArithProfile.h" >+#include "BytecodeStructs.h" > #include "CallLinkStatus.h" > #include "CodeBlock.h" > #include "Error.h" >@@ -41,203 +42,6 @@ > > namespace JSC { > >-static StructureID getStructureID(const Instruction& instruction) >-{ >- return instruction.u.structureID; >-} >- >-static StructureID getStructureID(const UnlinkedInstruction&) >-{ >- return 0; >-} >- >-static Special::Pointer getSpecialPointer(const Instruction& instruction) >-{ >- return instruction.u.specialPointer; >-} >- >-static Special::Pointer getSpecialPointer(const UnlinkedInstruction& instruction) >-{ >- return static_cast<Special::Pointer>(instruction.u.operand); >-} >- >-static PutByIdFlags getPutByIdFlags(const Instruction& instruction) >-{ >- return instruction.u.putByIdFlags; >-} >- >-static PutByIdFlags getPutByIdFlags(const UnlinkedInstruction& instruction) >-{ >- return static_cast<PutByIdFlags>(instruction.u.operand); >-} >- >-static ToThisStatus getToThisStatus(const Instruction& instruction) >-{ >- return instruction.u.toThisStatus; >-} >- >-static ToThisStatus getToThisStatus(const UnlinkedInstruction& instruction) >-{ >- return static_cast<ToThisStatus>(instruction.u.operand); >-} >- >-static void* getPointer(const Instruction& instruction) >-{ >- return instruction.u.pointer; >-} >- >-static void* getPointer(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-static StructureChain* getStructureChain(const Instruction& instruction) >-{ >- return instruction.u.structureChain.get(); >-} >- >-static StructureChain* getStructureChain(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-static Structure* getStructure(const Instruction& instruction) >-{ >- return instruction.u.structure.get(); >-} >- >-static Structure* getStructure(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-static LLIntCallLinkInfo* getCallLinkInfo(const Instruction& instruction) >-{ >- return instruction.u.callLinkInfo; >-} >- >-static LLIntCallLinkInfo* getCallLinkInfo(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-static BasicBlockLocation* getBasicBlockLocation(const Instruction& instruction) >-{ >- return instruction.u.basicBlockLocation; >-} >- >-static BasicBlockLocation* getBasicBlockLocation(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-template<class Block> >-void* BytecodeDumper<Block>::actualPointerFor(Special::Pointer) const >-{ >- return nullptr; >-} >- >-template<> >-void* BytecodeDumper<CodeBlock>::actualPointerFor(Special::Pointer pointer) const >-{ >- return block()->globalObject()->actualPointerFor(pointer); >-} >- >-static void beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling) >-{ >- if (hasPrintedProfiling) { >- out.print("; "); >- return; >- } >- >- out.print(" "); >- hasPrintedProfiling = true; >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpValueProfiling(PrintStream&, const typename Block::Instruction*& it, bool&) >-{ >- ++it; >-} >- >-template<> >-void BytecodeDumper<CodeBlock>::dumpValueProfiling(PrintStream& out, const typename CodeBlock::Instruction*& it, bool& hasPrintedProfiling) >-{ >- ConcurrentJSLocker locker(block()->m_lock); >- >- ++it; >- CString description = it->u.profile->briefDescription(locker); >- if (!description.length()) >- return; >- beginDumpProfiling(out, hasPrintedProfiling); >- out.print(description); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpArrayProfiling(PrintStream&, const typename Block::Instruction*& it, bool&) >-{ >- ++it; >-} >- >-template<> >-void BytecodeDumper<CodeBlock>::dumpArrayProfiling(PrintStream& out, const typename CodeBlock::Instruction*& it, bool& hasPrintedProfiling) >-{ >- ConcurrentJSLocker locker(block()->m_lock); >- >- ++it; >- if (!it->u.arrayProfile) >- return; >- CString description = it->u.arrayProfile->briefDescription(locker, block()); >- if (!description.length()) >- return; >- beginDumpProfiling(out, hasPrintedProfiling); >- out.print(description); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpProfilesForBytecodeOffset(PrintStream&, unsigned, bool&) >-{ >-} >- >-static void dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling) >-{ >- if (!profile || !profile->m_counter) >- return; >- >- beginDumpProfiling(out, hasPrintedProfiling); >- out.print(name, profile->m_counter); >-} >- >-static void dumpArithProfile(PrintStream& out, ArithProfile* profile, bool& hasPrintedProfiling) >-{ >- if (!profile) >- return; >- >- beginDumpProfiling(out, hasPrintedProfiling); >- out.print("results: ", *profile); >-} >- >-template<> >-void BytecodeDumper<CodeBlock>::dumpProfilesForBytecodeOffset(PrintStream& out, unsigned location, bool& hasPrintedProfiling) >-{ >- dumpRareCaseProfile(out, "rare case: ", block()->rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling); >- { >- dumpArithProfile(out, block()->arithProfileForBytecodeOffset(location), hasPrintedProfiling); >- } >- >-#if ENABLE(DFG_JIT) >- Vector<DFG::FrequentExitSite> exitSites = block()->unlinkedCodeBlock()->exitProfile().exitSitesFor(location); >- if (!exitSites.isEmpty()) { >- out.print(" !! frequent exits: "); >- CommaPrinter comma; >- for (auto& exitSite : exitSites) >- out.print(comma, exitSite.kind(), " ", exitSite.jitType()); >- } >-#else // ENABLE(DFG_JIT) >- UNUSED_PARAM(location); >-#endif // ENABLE(DFG_JIT) >-} >- > template<class Block> > VM* BytecodeDumper<Block>::vm() const > { >@@ -250,17 +54,12 @@ const Identifier& BytecodeDumper<Block>::identifier(int index) const > return block()->identifier(index); > } > >-template<class Instruction> >-static void printLocationAndOp(PrintStream& out, int location, const Instruction*&, const char* op) >-{ >- out.printf("[%4d] %-17s ", location, op); >-} >- > static ALWAYS_INLINE bool isConstantRegisterIndex(int index) > { > return index >= FirstConstantRegisterIndex; > } > >+/* > NEVER_INLINE static const char* debugHookName(int debugHookType) > { > switch (static_cast<DebugHookType>(debugHookType)) { >@@ -283,6 +82,7 @@ NEVER_INLINE static const char* debugHookName(int debugHookType) > RELEASE_ASSERT_NOT_REACHED(); > return ""; > } >+*/ > > template<class Block> > CString BytecodeDumper<Block>::registerName(int r) const >@@ -293,10 +93,12 @@ CString BytecodeDumper<Block>::registerName(int r) const > return toCString(VirtualRegister(r)); > } > >+/* > static CString idName(int id0, const Identifier& ident) > { > return toCString(ident.impl(), "(@id", id0, ")"); > } >+*/ > > template<class Block> > CString BytecodeDumper<Block>::constantName(int index) const >@@ -306,1473 +108,42 @@ CString BytecodeDumper<Block>::constantName(int index) const > } > > template<class Block> >-void BytecodeDumper<Block>::printUnaryOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) >-{ >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printBinaryOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) >+void BytecodeDumper<Block>::printLocationAndOp(InstructionStream::Offset location, const char* op) > { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >+ m_out.printf("[%4lu] %-17s ", location, op); > } > > template<class Block> >-void BytecodeDumper<Block>::printConditionalJump(PrintStream& out, const typename Block::Instruction*, const typename Block::Instruction*& it, int location, const char* op) >+void BytecodeDumper<Block>::dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&) > { >- int r0 = (++it)->u.operand; >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset); >+ ::JSC::dumpBytecode(this, it.offset(), it.ptr()); > } > > template<class Block> >-void BytecodeDumper<Block>::printCompareJump(PrintStream& out, const typename Block::Instruction*, const typename Block::Instruction*& it, int location, const char* op) >+void BytecodeDumper<Block>::dumpBytecode(Block* block, PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) > { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); >+ BytecodeDumper dumper(block, out); >+ dumper.dumpBytecode(it, statusMap); > } > > template<class Block> >-void BytecodeDumper<Block>::printGetByIdOp(PrintStream& out, int location, const typename Block::Instruction*& it) >-{ >- const char* op; >- switch (Interpreter::getOpcodeID(*it)) { >- case op_get_by_id: >- op = "get_by_id"; >- break; >- case op_get_by_id_proto_load: >- op = "get_by_id_proto_load"; >- break; >- case op_get_by_id_unset: >- op = "get_by_id_unset"; >- break; >- case op_get_array_length: >- op = "array_length"; >- break; >- default: >- RELEASE_ASSERT_NOT_REACHED(); >-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) >- op = 0; >-#endif >- } >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- it += 4; // Increment up to the value profiler. >-} >- >-static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident) >-{ >- if (!structure) >- return; >- >- out.printf("%s = %p", name, structure); >- >- PropertyOffset offset = structure->getConcurrently(ident.impl()); >- if (offset != invalidOffset) >- out.printf(" (offset = %d)", offset); >-} >- >-static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident) >-{ >- out.printf("chain = %p: [", chain); >- bool first = true; >- for (WriteBarrier<Structure>* currentStructure = chain->head(); *currentStructure; ++currentStructure) { >- if (first) >- first = false; >- else >- out.printf(", "); >- dumpStructure(out, "struct", currentStructure->get(), ident); >- } >- out.printf("]"); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printGetByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) >-{ >- const auto* instruction = instructionsBegin() + location; >- >- const Identifier& ident = identifier(instruction[3].u.operand); >- >- UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. >- >- if (Interpreter::getOpcodeID(instruction[0]) == op_get_array_length) >- out.printf(" llint(array_length)"); >- else if (StructureID structureID = getStructureID(instruction[4])) { >- Structure* structure = vm()->heap.structureIDTable().get(structureID); >- out.printf(" llint("); >- dumpStructure(out, "struct", structure, ident); >- out.printf(")"); >- if (Interpreter::getOpcodeID(instruction[0]) == op_get_by_id_proto_load) >- out.printf(" proto(%p)", getPointer(instruction[6])); >- } >- >-#if ENABLE(JIT) >- if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { >- StructureStubInfo& stubInfo = *stubPtr; >- if (stubInfo.resetByGC) >- out.print(" (Reset By GC)"); >- >- out.printf(" jit("); >- >- Structure* baseStructure = nullptr; >- PolymorphicAccess* stub = nullptr; >- >- switch (stubInfo.cacheType) { >- case CacheType::GetByIdSelf: >- out.printf("self"); >- baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get(); >- break; >- case CacheType::Stub: >- out.printf("stub"); >- stub = stubInfo.u.stub; >- break; >- case CacheType::Unset: >- out.printf("unset"); >- break; >- case CacheType::ArrayLength: >- out.printf("ArrayLength"); >- break; >- default: >- RELEASE_ASSERT_NOT_REACHED(); >- break; >- } >- >- if (baseStructure) { >- out.printf(", "); >- dumpStructure(out, "struct", baseStructure, ident); >- } >- >- if (stub) >- out.print(", ", *stub); >- >- out.printf(")"); >- } >-#else >- UNUSED_PARAM(statusMap); >-#endif >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printPutByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) >-{ >- const auto* instruction = instructionsBegin() + location; >- >- const Identifier& ident = identifier(instruction[2].u.operand); >- >- UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. >- >- out.print(", ", getPutByIdFlags(instruction[8])); >- >- if (StructureID structureID = getStructureID(instruction[4])) { >- Structure* structure = vm()->heap.structureIDTable().get(structureID); >- out.print(" llint("); >- if (StructureID newStructureID = getStructureID(instruction[6])) { >- Structure* newStructure = vm()->heap.structureIDTable().get(newStructureID); >- dumpStructure(out, "prev", structure, ident); >- out.print(", "); >- dumpStructure(out, "next", newStructure, ident); >- if (StructureChain* chain = getStructureChain(instruction[7])) { >- out.print(", "); >- dumpChain(out, chain, ident); >- } >- } else >- dumpStructure(out, "struct", structure, ident); >- out.print(")"); >- } >- >-#if ENABLE(JIT) >- if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { >- StructureStubInfo& stubInfo = *stubPtr; >- if (stubInfo.resetByGC) >- out.print(" (Reset By GC)"); >- >- out.printf(" jit("); >- >- switch (stubInfo.cacheType) { >- case CacheType::PutByIdReplace: >- out.print("replace, "); >- dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident); >- break; >- case CacheType::Stub: { >- out.print("stub, ", *stubInfo.u.stub); >- break; >- } >- case CacheType::Unset: >- out.printf("unset"); >- break; >- default: >- RELEASE_ASSERT_NOT_REACHED(); >- break; >- } >- out.printf(")"); >- } >-#else >- UNUSED_PARAM(statusMap); >-#endif >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printInByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) >-{ >- const auto* instruction = instructionsBegin() + location; >- >- const Identifier& ident = identifier(instruction[3].u.operand); >- >- UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. >- >-#if ENABLE(JIT) >- if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { >- StructureStubInfo& stubInfo = *stubPtr; >- if (stubInfo.resetByGC) >- out.print(" (Reset By GC)"); >- >- out.printf(" jit("); >- >- Structure* baseStructure = nullptr; >- PolymorphicAccess* stub = nullptr; >- >- switch (stubInfo.cacheType) { >- case CacheType::InByIdSelf: >- out.printf("self"); >- baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get(); >- break; >- case CacheType::Stub: >- out.printf("stub"); >- stub = stubInfo.u.stub; >- break; >- case CacheType::Unset: >- out.printf("unset"); >- break; >- default: >- RELEASE_ASSERT_NOT_REACHED(); >- break; >- } >- >- if (baseStructure) { >- out.printf(", "); >- dumpStructure(out, "struct", baseStructure, ident); >- } >- >- if (stub) >- out.print(", ", *stub); >- >- out.printf(")"); >- } >-#else >- UNUSED_PARAM(out); >- UNUSED_PARAM(statusMap); >-#endif >-} >- >-#if ENABLE(JIT) >-template<typename Block> >-void BytecodeDumper<Block>::dumpCallLinkStatus(PrintStream&, unsigned, const ICStatusMap&) >-{ >-} >- >-template<> >-void BytecodeDumper<CodeBlock>::dumpCallLinkStatus(PrintStream& out, unsigned location, const ICStatusMap& statusMap) >-{ >- if (block()->jitType() != JITCode::FTLJIT) >- out.print(" status(", CallLinkStatus::computeFor(block(), location, statusMap), ")"); >-} >-#endif >- >-template<class Block> >-void BytecodeDumper<Block>::printCallOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const ICStatusMap& statusMap) >-{ >- int dst = (++it)->u.operand; >- int func = (++it)->u.operand; >- int argCount = (++it)->u.operand; >- int registerOffset = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.print(registerName(dst), ", ", registerName(func), ", ", argCount, ", ", registerOffset); >- out.print(" (this at ", virtualRegisterForArgument(0, -registerOffset), ")"); >- if (cacheDumpMode == DumpCaches) { >- LLIntCallLinkInfo* callLinkInfo = getCallLinkInfo(it[1]); >- if (callLinkInfo->lastSeenCallee) { >- JSObject* object = callLinkInfo->lastSeenCallee.get(); >- if (auto* function = jsDynamicCast<JSFunction*>(*vm(), object)) >- out.printf(" llint(%p, exec %p)", function, function->executable()); >- else >- out.printf(" llint(%p)", object); >- } >-#if ENABLE(JIT) >- if (CallLinkInfo* info = statusMap.get(CodeOrigin(location)).callLinkInfo) { >- if (info->haveLastSeenCallee()) { >- JSObject* object = info->lastSeenCallee(); >- if (auto* function = jsDynamicCast<JSFunction*>(*vm(), object)) >- out.printf(" jit(%p, exec %p)", function, function->executable()); >- else >- out.printf(" jit(%p)", object); >- } >- } >- >- dumpCallLinkStatus(out, location, statusMap); >-#else >- UNUSED_PARAM(statusMap); >-#endif >- } >- ++it; >- ++it; >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- dumpValueProfiling(out, it, hasPrintedProfiling); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printPutByIdOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) >-{ >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data()); >- it += 5; >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printLocationOpAndRegisterOperand(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op, int operand) >-{ >- printLocationAndOp(out, location, it, op); >- out.printf("%s", registerName(operand).data()); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpBytecode(PrintStream& out, const typename Block::Instruction* begin, const typename Block::Instruction*& it, const ICStatusMap& statusMap) >-{ >- int location = it - begin; >- bool hasPrintedProfiling = false; >- OpcodeID opcode = Interpreter::getOpcodeID(*it); >- switch (opcode) { >- case op_enter: { >- printLocationAndOp(out, location, it, "enter"); >- break; >- } >- case op_get_scope: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "get_scope", r0); >- break; >- } >- case op_create_direct_arguments: { >- int r0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_direct_arguments"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_create_scoped_arguments: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_scoped_arguments"); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >- break; >- } >- case op_create_cloned_arguments: { >- int r0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_cloned_arguments"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_argument_count: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "argument_count", r0); >- break; >- } >- case op_get_argument: { >- int r0 = (++it)->u.operand; >- int index = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "argument", r0); >- out.printf(", %d", index); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_create_rest: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- unsigned argumentOffset = (++it)->u.unsignedValue; >- printLocationAndOp(out, location, it, "create_rest"); >- out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data()); >- out.printf("ArgumentsOffset: %u", argumentOffset); >- break; >- } >- case op_get_rest_length: { >- int r0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_rest_length"); >- out.printf("%s, ", registerName(r0).data()); >- unsigned argumentOffset = (++it)->u.unsignedValue; >- out.printf("ArgumentsOffset: %u", argumentOffset); >- break; >- } >- case op_create_this: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- unsigned inferredInlineCapacity = (++it)->u.operand; >- unsigned cachedFunction = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_this"); >- out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction); >- break; >- } >- case op_to_this: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "to_this", r0); >- Structure* structure = getStructure(*(++it)); >- if (structure) >- out.print(", cache(struct = ", RawPointer(structure), ")"); >- out.print(", ", getToThisStatus(*(++it))); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_check_tdz: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "op_check_tdz", r0); >- break; >- } >- case op_new_object: { >- int r0 = (++it)->u.operand; >- unsigned inferredInlineCapacity = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_object"); >- out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity); >- ++it; // Skip object allocation profile. >- break; >- } >- case op_new_array: { >- int dst = (++it)->u.operand; >- int argv = (++it)->u.operand; >- int argc = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_array"); >- out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc); >- ++it; // Skip array allocation profile. >- break; >- } >- case op_new_array_with_spread: { >- int dst = (++it)->u.operand; >- int argv = (++it)->u.operand; >- int argc = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_array_with_spread"); >- out.printf("%s, %s, %d, ", registerName(dst).data(), registerName(argv).data(), argc); >- unsigned bitVectorIndex = (++it)->u.unsignedValue; >- const BitVector& bitVector = block()->bitVector(bitVectorIndex); >- out.print("BitVector:", bitVectorIndex, ":"); >- for (unsigned i = 0; i < static_cast<unsigned>(argc); i++) { >- if (bitVector.get(i)) >- out.print("1"); >- else >- out.print("0"); >- } >- break; >- } >- case op_spread: { >- int dst = (++it)->u.operand; >- int arg = (++it)->u.operand; >- printLocationAndOp(out, location, it, "spread"); >- out.printf("%s, %s", registerName(dst).data(), registerName(arg).data()); >- break; >- } >- case op_new_array_with_size: { >- int dst = (++it)->u.operand; >- int length = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_array_with_size"); >- out.printf("%s, %s", registerName(dst).data(), registerName(length).data()); >- ++it; // Skip array allocation profile. >- break; >- } >- case op_new_array_buffer: { >- int dst = (++it)->u.operand; >- int array = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_array_buffer"); >- out.printf("%s, %s", registerName(dst).data(), registerName(array).data()); >- ++it; // Skip array allocation profile. >- break; >- } >- case op_new_regexp: { >- int r0 = (++it)->u.operand; >- int re0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_regexp"); >- out.printf("%s, %s", registerName(r0).data(), registerName(re0).data()); >- break; >- } >- case op_mov: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "mov"); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >- break; >- } >- case op_profile_type: { >- int r0 = (++it)->u.operand; >- ++it; >- ++it; >- ++it; >- ++it; >- printLocationAndOp(out, location, it, "op_profile_type"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_profile_control_flow: { >- BasicBlockLocation* basicBlockLocation = getBasicBlockLocation(*(++it)); >- printLocationAndOp(out, location, it, "profile_control_flow"); >- if (basicBlockLocation) >- out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset()); >- break; >- } >- case op_not: { >- printUnaryOp(out, location, it, "not"); >- break; >- } >- case op_eq: { >- printBinaryOp(out, location, it, "eq"); >- break; >- } >- case op_eq_null: { >- printUnaryOp(out, location, it, "eq_null"); >- break; >- } >- case op_neq: { >- printBinaryOp(out, location, it, "neq"); >- break; >- } >- case op_neq_null: { >- printUnaryOp(out, location, it, "neq_null"); >- break; >- } >- case op_stricteq: { >- printBinaryOp(out, location, it, "stricteq"); >- break; >- } >- case op_nstricteq: { >- printBinaryOp(out, location, it, "nstricteq"); >- break; >- } >- case op_less: { >- printBinaryOp(out, location, it, "less"); >- break; >- } >- case op_lesseq: { >- printBinaryOp(out, location, it, "lesseq"); >- break; >- } >- case op_greater: { >- printBinaryOp(out, location, it, "greater"); >- break; >- } >- case op_greatereq: { >- printBinaryOp(out, location, it, "greatereq"); >- break; >- } >- case op_below: { >- printBinaryOp(out, location, it, "below"); >- break; >- } >- case op_beloweq: { >- printBinaryOp(out, location, it, "beloweq"); >- break; >- } >- case op_inc: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "inc", r0); >- break; >- } >- case op_dec: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "dec", r0); >- break; >- } >- case op_to_number: { >- printUnaryOp(out, location, it, "to_number"); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_to_string: { >- printUnaryOp(out, location, it, "to_string"); >- break; >- } >- case op_to_object: { >- printUnaryOp(out, location, it, "to_object"); >- int id0 = (++it)->u.operand; >- out.printf(" %s", idName(id0, identifier(id0)).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_negate: { >- printUnaryOp(out, location, it, "negate"); >- ++it; // op_negate has an extra operand for the ArithProfile. >- break; >- } >- case op_add: { >- printBinaryOp(out, location, it, "add"); >- ++it; >- break; >- } >- case op_mul: { >- printBinaryOp(out, location, it, "mul"); >- ++it; >- break; >- } >- case op_div: { >- printBinaryOp(out, location, it, "div"); >- ++it; >- break; >- } >- case op_mod: { >- printBinaryOp(out, location, it, "mod"); >- break; >- } >- case op_pow: { >- printBinaryOp(out, location, it, "pow"); >- break; >- } >- case op_sub: { >- printBinaryOp(out, location, it, "sub"); >- ++it; >- break; >- } >- case op_lshift: { >- printBinaryOp(out, location, it, "lshift"); >- break; >- } >- case op_rshift: { >- printBinaryOp(out, location, it, "rshift"); >- break; >- } >- case op_urshift: { >- printBinaryOp(out, location, it, "urshift"); >- break; >- } >- case op_bitand: { >- printBinaryOp(out, location, it, "bitand"); >- ++it; >- break; >- } >- case op_bitxor: { >- printBinaryOp(out, location, it, "bitxor"); >- ++it; >- break; >- } >- case op_bitor: { >- printBinaryOp(out, location, it, "bitor"); >- ++it; >- break; >- } >- case op_overrides_has_instance: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "overrides_has_instance"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- break; >- } >- case op_instanceof: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "instanceof"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- break; >- } >- case op_instanceof_custom: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "instanceof_custom"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); >- break; >- } >- case op_unsigned: { >- printUnaryOp(out, location, it, "unsigned"); >- break; >- } >- case op_typeof: { >- printUnaryOp(out, location, it, "typeof"); >- break; >- } >- case op_is_empty: { >- printUnaryOp(out, location, it, "is_empty"); >- break; >- } >- case op_is_undefined: { >- printUnaryOp(out, location, it, "is_undefined"); >- break; >- } >- case op_is_boolean: { >- printUnaryOp(out, location, it, "is_boolean"); >- break; >- } >- case op_is_number: { >- printUnaryOp(out, location, it, "is_number"); >- break; >- } >- case op_is_cell_with_type: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int type = (++it)->u.operand; >- printLocationAndOp(out, location, it, "is_cell_with_type"); >- out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), type); >- break; >- } >- case op_is_object: { >- printUnaryOp(out, location, it, "is_object"); >- break; >- } >- case op_is_object_or_null: { >- printUnaryOp(out, location, it, "is_object_or_null"); >- break; >- } >- case op_is_function: { >- printUnaryOp(out, location, it, "is_function"); >- break; >- } >- case op_in_by_id: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "in_by_id"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- printInByIdCacheStatus(out, location, statusMap); >- break; >- } >- case op_in_by_val: { >- printBinaryOp(out, location, it, "in_by_val"); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_try_get_by_id: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "try_get_by_id"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_get_by_id_direct: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_by_id_direct"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- it += 2; // Increment up to the value profiler. >- printGetByIdCacheStatus(out, location, statusMap); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: >- case op_get_array_length: { >- printGetByIdOp(out, location, it); >- printGetByIdCacheStatus(out, location, statusMap); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_get_by_id_with_this: { >- printLocationAndOp(out, location, it, "get_by_id_with_this"); >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), idName(id0, identifier(id0)).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_get_by_val_with_this: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_by_val_with_this"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_by_id: { >- printPutByIdOp(out, location, it, "put_by_id"); >- printPutByIdCacheStatus(out, location, statusMap); >- break; >- } >- case op_put_by_id_with_this: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_by_id_with_this"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), registerName(r2).data()); >- break; >- } >- case op_put_by_val_with_this: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_by_val_with_this"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); >- break; >- } >- case op_put_getter_by_id: { >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_getter_by_id"); >- out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data()); >- break; >- } >- case op_put_setter_by_id: { >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_setter_by_id"); >- out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data()); >- break; >- } >- case op_put_getter_setter_by_id: { >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_getter_setter_by_id"); >- out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data()); >- break; >- } >- case op_put_getter_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_getter_by_val"); >- out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data()); >- break; >- } >- case op_put_setter_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_setter_by_val"); >- out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data()); >- break; >- } >- case op_define_data_property: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "define_data_property"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); >- break; >- } >- case op_define_accessor_property: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- int r4 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "define_accessor_property"); >- out.printf("%s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data()); >- break; >- } >- case op_del_by_id: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "del_by_id"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- break; >- } >- case op_get_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_by_val"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_by_val"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_by_val_direct: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_by_val_direct"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_del_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "del_by_val"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- break; >- } >- case op_jmp: { >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, "jmp"); >- out.printf("%d(->%d)", offset, location + offset); >- break; >- } >- case op_jtrue: { >- printConditionalJump(out, begin, it, location, "jtrue"); >- break; >- } >- case op_jfalse: { >- printConditionalJump(out, begin, it, location, "jfalse"); >- break; >- } >- case op_jeq_null: { >- printConditionalJump(out, begin, it, location, "jeq_null"); >- break; >- } >- case op_jneq_null: { >- printConditionalJump(out, begin, it, location, "jneq_null"); >- break; >- } >- case op_jneq_ptr: { >- int r0 = (++it)->u.operand; >- Special::Pointer pointer = getSpecialPointer(*(++it)); >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, "jneq_ptr"); >- out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, actualPointerFor(pointer), offset, location + offset); >- ++it; >- break; >- } >- case op_jless: { >- printCompareJump(out, begin, it, location, "jless"); >- break; >- } >- case op_jlesseq: { >- printCompareJump(out, begin, it, location, "jlesseq"); >- break; >- } >- case op_jgreater: { >- printCompareJump(out, begin, it, location, "jgreater"); >- break; >- } >- case op_jgreatereq: { >- printCompareJump(out, begin, it, location, "jgreatereq"); >- break; >- } >- case op_jnless: { >- printCompareJump(out, begin, it, location, "jnless"); >- break; >- } >- case op_jnlesseq: { >- printCompareJump(out, begin, it, location, "jnlesseq"); >- break; >- } >- case op_jngreater: { >- printCompareJump(out, begin, it, location, "jngreater"); >- break; >- } >- case op_jngreatereq: { >- printCompareJump(out, begin, it, location, "jngreatereq"); >- break; >- } >- case op_jeq: { >- printCompareJump(out, begin, it, location, "jeq"); >- break; >- } >- case op_jneq: { >- printCompareJump(out, begin, it, location, "jneq"); >- break; >- } >- case op_jstricteq: { >- printCompareJump(out, begin, it, location, "jstricteq"); >- break; >- } >- case op_jnstricteq: { >- printCompareJump(out, begin, it, location, "jnstricteq"); >- break; >- } >- case op_jbelow: { >- printCompareJump(out, begin, it, location, "jbelow"); >- break; >- } >- case op_jbeloweq: { >- printCompareJump(out, begin, it, location, "jbeloweq"); >- break; >- } >- case op_loop_hint: { >- printLocationAndOp(out, location, it, "loop_hint"); >- break; >- } >- case op_check_traps: { >- printLocationAndOp(out, location, it, "check_traps"); >- break; >- } >- case op_nop: { >- printLocationAndOp(out, location, it, "nop"); >- break; >- } >- case op_super_sampler_begin: { >- printLocationAndOp(out, location, it, "super_sampler_begin"); >- break; >- } >- case op_super_sampler_end: { >- printLocationAndOp(out, location, it, "super_sampler_end"); >- break; >- } >- case op_log_shadow_chicken_prologue: { >- int r0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "log_shadow_chicken_prologue"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_log_shadow_chicken_tail: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "log_shadow_chicken_tail"); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >- break; >- } >- case op_switch_imm: { >- int tableIndex = (++it)->u.operand; >- int defaultTarget = (++it)->u.operand; >- int scrutineeRegister = (++it)->u.operand; >- printLocationAndOp(out, location, it, "switch_imm"); >- out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); >- break; >- } >- case op_switch_char: { >- int tableIndex = (++it)->u.operand; >- int defaultTarget = (++it)->u.operand; >- int scrutineeRegister = (++it)->u.operand; >- printLocationAndOp(out, location, it, "switch_char"); >- out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); >- break; >- } >- case op_switch_string: { >- int tableIndex = (++it)->u.operand; >- int defaultTarget = (++it)->u.operand; >- int scrutineeRegister = (++it)->u.operand; >- printLocationAndOp(out, location, it, "switch_string"); >- out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); >- break; >- } >- case op_new_func: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_func"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_generator_func: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_generator_func"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_async_func: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_async_func"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_async_generator_func: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_async_generator_func"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_func_exp: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_func_exp"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_generator_func_exp: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_generator_func_exp"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_async_func_exp: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_async_func_exp"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_async_generator_func_exp: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "op_new_async_generator_func_exp"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_set_function_name: { >- int funcReg = (++it)->u.operand; >- int nameReg = (++it)->u.operand; >- printLocationAndOp(out, location, it, "set_function_name"); >- out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data()); >- break; >- } >- case op_call: { >- printCallOp(out, location, it, "call", DumpCaches, hasPrintedProfiling, statusMap); >- break; >- } >- case op_tail_call: { >- printCallOp(out, location, it, "tail_call", DumpCaches, hasPrintedProfiling, statusMap); >- break; >- } >- case op_call_eval: { >- printCallOp(out, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, statusMap); >- break; >- } >- >- case op_construct_varargs: >- case op_call_varargs: >- case op_tail_call_varargs: >- case op_tail_call_forward_arguments: { >- int result = (++it)->u.operand; >- int callee = (++it)->u.operand; >- int thisValue = (++it)->u.operand; >- int arguments = (++it)->u.operand; >- int firstFreeRegister = (++it)->u.operand; >- int varArgOffset = (++it)->u.operand; >- ++it; >- const char* opName; >- if (opcode == op_call_varargs) >- opName = "call_varargs"; >- else if (opcode == op_construct_varargs) >- opName = "construct_varargs"; >- else if (opcode == op_tail_call_varargs) >- opName = "tail_call_varargs"; >- else if (opcode == op_tail_call_forward_arguments) >- opName = "tail_call_forward_arguments"; >- else >- RELEASE_ASSERT_NOT_REACHED(); >- >- printLocationAndOp(out, location, it, opName); >- out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- >- case op_ret: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "ret", r0); >- break; >- } >- case op_construct: { >- printCallOp(out, location, it, "construct", DumpCaches, hasPrintedProfiling, statusMap); >- break; >- } >- case op_strcat: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int count = (++it)->u.operand; >- printLocationAndOp(out, location, it, "strcat"); >- out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count); >- break; >- } >- case op_to_primitive: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "to_primitive"); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >- break; >- } >- case op_get_enumerable_length: { >- int dst = it[1].u.operand; >- int base = it[2].u.operand; >- printLocationAndOp(out, location, it, "op_get_enumerable_length"); >- out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); >- it += OPCODE_LENGTH(op_get_enumerable_length) - 1; >- break; >- } >- case op_has_indexed_property: { >- int dst = (++it)->u.operand; >- int base = (++it)->u.operand; >- int propertyName = (++it)->u.operand; >- printLocationAndOp(out, location, it, "op_has_indexed_property"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data()); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_has_structure_property: { >- int dst = it[1].u.operand; >- int base = it[2].u.operand; >- int propertyName = it[3].u.operand; >- int enumerator = it[4].u.operand; >- printLocationAndOp(out, location, it, "op_has_structure_property"); >- out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data()); >- it += OPCODE_LENGTH(op_has_structure_property) - 1; >- break; >- } >- case op_has_generic_property: { >- int dst = it[1].u.operand; >- int base = it[2].u.operand; >- int propertyName = it[3].u.operand; >- printLocationAndOp(out, location, it, "op_has_generic_property"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data()); >- it += OPCODE_LENGTH(op_has_generic_property) - 1; >- break; >- } >- case op_get_direct_pname: { >- int dst = (++it)->u.operand; >- int base = (++it)->u.operand; >- int propertyName = (++it)->u.operand; >- int index = (++it)->u.operand; >- int enumerator = (++it)->u.operand; >- printLocationAndOp(out, location, it, "op_get_direct_pname"); >- out.printf("%s, %s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- >- } >- case op_get_property_enumerator: { >- int dst = it[1].u.operand; >- int base = it[2].u.operand; >- printLocationAndOp(out, location, it, "op_get_property_enumerator"); >- out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); >- it += OPCODE_LENGTH(op_get_property_enumerator) - 1; >- break; >- } >- case op_enumerator_structure_pname: { >- int dst = it[1].u.operand; >- int enumerator = it[2].u.operand; >- int index = it[3].u.operand; >- printLocationAndOp(out, location, it, "op_enumerator_structure_pname"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); >- it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1; >- break; >- } >- case op_enumerator_generic_pname: { >- int dst = it[1].u.operand; >- int enumerator = it[2].u.operand; >- int index = it[3].u.operand; >- printLocationAndOp(out, location, it, "op_enumerator_generic_pname"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); >- it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1; >- break; >- } >- case op_to_index_string: { >- int dst = it[1].u.operand; >- int index = it[2].u.operand; >- printLocationAndOp(out, location, it, "op_to_index_string"); >- out.printf("%s, %s", registerName(dst).data(), registerName(index).data()); >- it += OPCODE_LENGTH(op_to_index_string) - 1; >- break; >- } >- case op_push_with_scope: { >- int dst = (++it)->u.operand; >- int newScope = (++it)->u.operand; >- int currentScope = (++it)->u.operand; >- printLocationAndOp(out, location, it, "push_with_scope"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data()); >- break; >- } >- case op_get_parent_scope: { >- int dst = (++it)->u.operand; >- int parentScope = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_parent_scope"); >- out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data()); >- break; >- } >- case op_create_lexical_environment: { >- int dst = (++it)->u.operand; >- int scope = (++it)->u.operand; >- int symbolTable = (++it)->u.operand; >- int initialValue = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_lexical_environment"); >- out.printf("%s, %s, %s, %s", >- registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data()); >- break; >- } >- case op_catch: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- void* pointer = getPointer(*(++it)); >- printLocationAndOp(out, location, it, "catch"); >- out.printf("%s, %s, %p", registerName(r0).data(), registerName(r1).data(), pointer); >- break; >- } >- case op_throw: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "throw", r0); >- break; >- } >- case op_throw_static_error: { >- int r0 = (++it)->u.operand; >- ErrorType k1 = static_cast<ErrorType>((++it)->u.unsignedValue); >- printLocationAndOp(out, location, it, "throw_static_error"); >- out.printf("%s, ", registerName(r0).data()); >- out.print(k1); >- break; >- } >- case op_debug: { >- int debugHookType = (++it)->u.operand; >- int hasBreakpointFlag = (++it)->u.operand; >- printLocationAndOp(out, location, it, "debug"); >- out.printf("%s, %d", debugHookName(debugHookType), hasBreakpointFlag); >- break; >- } >- case op_identity_with_profile: { >- int r0 = (++it)->u.operand; >- ++it; // Profile top half >- ++it; // Profile bottom half >- printLocationAndOp(out, location, it, "identity_with_profile"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_unreachable: { >- printLocationAndOp(out, location, it, "unreachable"); >- break; >- } >- case op_end: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "end", r0); >- break; >- } >- case op_resolve_scope_for_hoisting_func_decl_in_eval: { >- int r0 = (++it)->u.operand; >- int scope = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "resolve_scope_for_hoisting_func_decl_in_eval"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data()); >- break; >- } >- case op_resolve_scope: { >- int r0 = (++it)->u.operand; >- int scope = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand); >- int depth = (++it)->u.operand; >- void* pointer = getPointer(*(++it)); >- printLocationAndOp(out, location, it, "resolve_scope"); >- out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer); >- break; >- } >- case op_get_from_scope: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand); >- ++it; // Structure >- int operand = (++it)->u.operand; // Operand >- printLocationAndOp(out, location, it, "get_from_scope"); >- out.print(registerName(r0), ", ", registerName(r1)); >- if (static_cast<unsigned>(id0) == UINT_MAX) >- out.print(", anonymous"); >- else >- out.print(", ", idName(id0, identifier(id0))); >- out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_to_scope: { >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand); >- ++it; // Structure >- int operand = (++it)->u.operand; // Operand >- printLocationAndOp(out, location, it, "put_to_scope"); >- out.print(registerName(r0)); >- if (static_cast<unsigned>(id0) == UINT_MAX) >- out.print(", anonymous"); >- else >- out.print(", ", idName(id0, identifier(id0))); >- out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand); >- break; >- } >- case op_get_from_arguments: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_from_arguments"); >- out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_to_arguments: { >- int r0 = (++it)->u.operand; >- int offset = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_to_arguments"); >- out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data()); >- break; >- } >- case op_yield: { >- int r0 = (++it)->u.operand; >- unsigned yieldPoint = (++it)->u.unsignedValue; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "yield"); >- out.printf("%s, %u, %s", registerName(r0).data(), yieldPoint, registerName(r1).data()); >- break; >- } >- default: >- RELEASE_ASSERT_NOT_REACHED(); >- } >- dumpProfilesForBytecodeOffset(out, location, hasPrintedProfiling); >- out.print("\n"); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpBytecode(Block* block, PrintStream& out, const typename Block::Instruction* begin, const typename Block::Instruction*& it, const ICStatusMap& statusMap) >-{ >- BytecodeDumper dumper(block, begin); >- dumper.dumpBytecode(out, begin, it, statusMap); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpIdentifiers(PrintStream& out) >+void BytecodeDumper<Block>::dumpIdentifiers() > { > if (size_t count = block()->numberOfIdentifiers()) { >- out.printf("\nIdentifiers:\n"); >+ m_out.printf("\nIdentifiers:\n"); > size_t i = 0; > do { >- out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data()); >+ m_out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data()); > ++i; > } while (i != count); > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpConstants(PrintStream& out) >+void BytecodeDumper<Block>::dumpConstants() > { > if (!block()->constantRegisters().isEmpty()) { >- out.printf("\nConstants:\n"); >+ m_out.printf("\nConstants:\n"); > size_t i = 0; > for (const auto& constant : block()->constantRegisters()) { > const char* sourceCodeRepresentationDescription = nullptr; >@@ -1787,94 +158,94 @@ void BytecodeDumper<Block>::dumpConstants(PrintStream& out) > sourceCodeRepresentationDescription = ""; > break; > } >- out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(constant.get()).data(), sourceCodeRepresentationDescription); >+ m_out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(constant.get()).data(), sourceCodeRepresentationDescription); > ++i; > } > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpExceptionHandlers(PrintStream& out) >+void BytecodeDumper<Block>::dumpExceptionHandlers() > { > if (unsigned count = block()->numberOfExceptionHandlers()) { >- out.printf("\nException Handlers:\n"); >+ m_out.printf("\nException Handlers:\n"); > unsigned i = 0; > do { > const auto& handler = block()->exceptionHandler(i); >- out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n", i + 1, handler.start, handler.end, handler.target, handler.typeName()); >+ m_out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n", i + 1, handler.start, handler.end, handler.target, handler.typeName()); > ++i; > } while (i < count); > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpSwitchJumpTables(PrintStream& out) >+void BytecodeDumper<Block>::dumpSwitchJumpTables() > { > if (unsigned count = block()->numberOfSwitchJumpTables()) { >- out.printf("Switch Jump Tables:\n"); >+ m_out.printf("Switch Jump Tables:\n"); > unsigned i = 0; > do { >- out.printf(" %1d = {\n", i); >+ m_out.printf(" %1d = {\n", i); > const auto& switchJumpTable = block()->switchJumpTable(i); > int entry = 0; > auto end = switchJumpTable.branchOffsets.end(); > for (auto iter = switchJumpTable.branchOffsets.begin(); iter != end; ++iter, ++entry) { > if (!*iter) > continue; >- out.printf("\t\t%4d => %04d\n", entry + switchJumpTable.min, *iter); >+ m_out.printf("\t\t%4d => %04d\n", entry + switchJumpTable.min, *iter); > } >- out.printf(" }\n"); >+ m_out.printf(" }\n"); > ++i; > } while (i < count); > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpStringSwitchJumpTables(PrintStream& out) >+void BytecodeDumper<Block>::dumpStringSwitchJumpTables() > { > if (unsigned count = block()->numberOfStringSwitchJumpTables()) { >- out.printf("\nString Switch Jump Tables:\n"); >+ m_out.printf("\nString Switch Jump Tables:\n"); > unsigned i = 0; > do { >- out.printf(" %1d = {\n", i); >+ m_out.printf(" %1d = {\n", i); > const auto& stringSwitchJumpTable = block()->stringSwitchJumpTable(i); > auto end = stringSwitchJumpTable.offsetTable.end(); > for (auto iter = stringSwitchJumpTable.offsetTable.begin(); iter != end; ++iter) >- out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset); >- out.printf(" }\n"); >+ m_out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset); >+ m_out.printf(" }\n"); > ++i; > } while (i < count); > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpBlock(Block* block, const typename Block::UnpackedInstructions& instructions, PrintStream& out, const ICStatusMap& statusMap) >+void BytecodeDumper<Block>::dumpBlock(Block* block, const InstructionStream& instructions, PrintStream& out, const ICStatusMap& statusMap) > { > size_t instructionCount = 0; > >- for (size_t i = 0; i < instructions.size(); i += opcodeLengths[Interpreter::getOpcodeID(instructions[i])]) >+ for (const auto& instruction : instructions) { >+ UNUSED_PARAM(instruction); > ++instructionCount; >+ } > > out.print(*block); > out.printf( >- ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)", >- static_cast<unsigned long>(instructions.size()), >- static_cast<unsigned long>(instructions.size() * sizeof(Instruction)), >+ ": %lu instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)", >+ static_cast<unsigned long>(instructionCount), >+ static_cast<unsigned long>(instructions.sizeInBytes()), > block->numParameters(), block->numCalleeLocals(), block->numVars()); > out.print("; scope at ", block->scopeRegister()); > out.printf("\n"); > >- const auto* begin = instructions.begin(); >- const auto* end = instructions.end(); >- BytecodeDumper<Block> dumper(block, begin); >- for (const auto* it = begin; it != end; ++it) >- dumper.dumpBytecode(out, begin, it, statusMap); >+ BytecodeDumper<Block> dumper(block, out); >+ for (const auto& it : instructions) >+ dumper.dumpBytecode(it, statusMap); > >- dumper.dumpIdentifiers(out); >- dumper.dumpConstants(out); >- dumper.dumpExceptionHandlers(out); >- dumper.dumpSwitchJumpTables(out); >- dumper.dumpStringSwitchJumpTables(out); >+ dumper.dumpIdentifiers(); >+ dumper.dumpConstants(); >+ dumper.dumpExceptionHandlers(); >+ dumper.dumpSwitchJumpTables(); >+ dumper.dumpStringSwitchJumpTables(); > > out.printf("\n"); > } >diff --git a/Source/JavaScriptCore/bytecode/BytecodeDumper.h b/Source/JavaScriptCore/bytecode/BytecodeDumper.h >index d811a8d7267cb33ab0a574ca684e29d95c4513b7..2d9bfecb26964c6eeb98fa72f806a218da04678f 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeDumper.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeDumper.h >@@ -28,6 +28,7 @@ > > #include "CallLinkInfo.h" > #include "ICStatusMap.h" >+#include "InstructionStream.h" > #include "StructureStubInfo.h" > > namespace JSC { >@@ -37,20 +38,29 @@ struct Instruction; > template<class Block> > class BytecodeDumper { > public: >- typedef typename Block::Instruction Instruction; >+ static void dumpBytecode(Block*, PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap = ICStatusMap()); >+ static void dumpBlock(Block*, const InstructionStream&, PrintStream& out, const ICStatusMap& statusMap = ICStatusMap()); > >- static void dumpBytecode(Block*, PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap& statusMap = ICStatusMap()); >- static void dumpBlock(Block*, const typename Block::UnpackedInstructions&, PrintStream& out, const ICStatusMap& statusMap = ICStatusMap()); >+ void printLocationAndOp(InstructionStream::Offset location, const char* op); >+ >+ template<typename T> >+ void dumpOperand(T&& operand) >+ { >+ m_out.print(", "); >+ dumpValue(std::forward<T>(operand)); >+ } >+ >+ template<typename T> >+ void dumpValue(T&&); > > private: >- BytecodeDumper(Block* block, const Instruction* instructionsBegin) >+ BytecodeDumper(Block* block, PrintStream& out) > : m_block(block) >- , m_instructionsBegin(instructionsBegin) >+ , m_out(out) > { > } > > Block* block() const { return m_block; } >- const Instruction* instructionsBegin() const { return m_instructionsBegin; } > > ALWAYS_INLINE VM* vm() const; > >@@ -59,25 +69,13 @@ private: > > const Identifier& identifier(int index) const; > >- void dumpIdentifiers(PrintStream& out); >- void dumpConstants(PrintStream& out); >- void dumpExceptionHandlers(PrintStream& out); >- void dumpSwitchJumpTables(PrintStream& out); >- void dumpStringSwitchJumpTables(PrintStream& out); >- >- void printUnaryOp(PrintStream& out, int location, const Instruction*& it, const char* op); >- void printBinaryOp(PrintStream& out, int location, const Instruction*& it, const char* op); >- void printConditionalJump(PrintStream& out, const Instruction*, const Instruction*& it, int location, const char* op); >- void printCompareJump(PrintStream& out, const Instruction*, const Instruction*& it, int location, const char* op); >- void printGetByIdOp(PrintStream& out, int location, const Instruction*& it); >- void printGetByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); >- void printPutByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); >- void printInByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); >- enum CacheDumpMode { DumpCaches, DontDumpCaches }; >- void printCallOp(PrintStream& out, int location, const Instruction*& it, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const ICStatusMap&); >- void printPutByIdOp(PrintStream& out, int location, const Instruction*& it, const char* op); >- void printLocationOpAndRegisterOperand(PrintStream& out, int location, const Instruction*& it, const char* op, int operand); >- void dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap&); >+ void dumpIdentifiers(); >+ void dumpConstants(); >+ void dumpExceptionHandlers(); >+ void dumpSwitchJumpTables(); >+ void dumpStringSwitchJumpTables(); >+ >+ void dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&); > > void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); > void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); >@@ -90,7 +88,7 @@ private: > #endif > > Block* m_block; >- const Instruction* m_instructionsBegin; >+ PrintStream& m_out; > }; > > } >diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp >index e91b5b5d8356118136796276004e204828eb27c3..aa35d25a4a94c15a7abe13d5aad40a9edd4ffa4d 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp >@@ -30,6 +30,7 @@ > #include "BytecodeDumper.h" > #include "BytecodeLivenessAnalysisInlines.h" > #include "BytecodeRewriter.h" >+#include "BytecodeStructs.h" > #include "BytecodeUseDef.h" > #include "IdentifierInlines.h" > #include "InterpreterInlines.h" >@@ -43,8 +44,8 @@ > namespace JSC { > > struct YieldData { >- size_t point { 0 }; >- int argument { 0 }; >+ InstructionStream::Offset point { 0 }; >+ VirtualRegister argument { 0 }; > FastBitVector liveness; > }; > >@@ -52,29 +53,31 @@ class BytecodeGeneratorification { > public: > typedef Vector<YieldData> Yields; > >- BytecodeGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) >- : m_codeBlock(codeBlock) >+ BytecodeGeneratorification(BytecodeGenerator& bytecodeGenerator, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) >+ : m_bytecodeGenerator(bytecodeGenerator) >+ , m_codeBlock(codeBlock) > , m_instructions(instructions) > , m_graph(m_codeBlock, m_instructions) > , m_generatorFrameSymbolTable(*codeBlock->vm(), generatorFrameSymbolTable) > , m_generatorFrameSymbolTableIndex(generatorFrameSymbolTableIndex) > { > for (BytecodeBasicBlock* block : m_graph) { >- for (unsigned bytecodeOffset : block->offsets()) { >- const UnlinkedInstruction* pc = &instructions[bytecodeOffset]; >- switch (pc->u.opcode) { >+ for (const auto offset : block->offsets()) { >+ const auto instruction = m_instructions.at(offset); >+ switch (instruction->opcodeID()) { > case op_enter: { >- m_enterPoint = bytecodeOffset; >+ m_enterPoint = instruction.offset(); > break; > } > > case op_yield: { >- unsigned liveCalleeLocalsIndex = pc[2].u.unsignedValue; >+ auto bytecode = instruction->as<OpYield>(); >+ unsigned liveCalleeLocalsIndex = bytecode.yieldPoint; > if (liveCalleeLocalsIndex >= m_yields.size()) > m_yields.resize(liveCalleeLocalsIndex + 1); > YieldData& data = m_yields[liveCalleeLocalsIndex]; >- data.point = bytecodeOffset; >- data.argument = pc[3].u.operand; >+ data.point = instruction.offset(); >+ data.argument = bytecode.argument; > break; > } > >@@ -105,9 +108,14 @@ public: > return m_yields; > } > >- unsigned enterPoint() const >+ InstructionStream::Ref enterPoint() const > { >- return m_enterPoint; >+ return m_instructions.at(m_enterPoint); >+ } >+ >+ const InstructionStream& instructions() const >+ { >+ return m_instructions; > } > > private: >@@ -138,9 +146,10 @@ private: > return storage; > } > >- unsigned m_enterPoint { 0 }; >+ BytecodeGenerator& m_bytecodeGenerator; >+ InstructionStream::Offset m_enterPoint; > UnlinkedCodeBlock* m_codeBlock; >- UnlinkedCodeBlock::UnpackedInstructions& m_instructions; >+ InstructionStreamWriter& m_instructions; > BytecodeGraph m_graph; > Vector<std::optional<Storage>> m_storages; > Yields m_yields; >@@ -155,7 +164,7 @@ public: > { > } > >- void run(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions) >+ void run(UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions) > { > // Perform modified liveness analysis to determine which locals are live at the merge points. > // This produces the conservative results for the question, "which variables should be saved and resumed?". >@@ -163,7 +172,7 @@ public: > runLivenessFixpoint(codeBlock, instructions, m_generatorification.graph()); > > for (YieldData& data : m_generatorification.yields()) >- data.liveness = getLivenessInfoAtBytecodeOffset(codeBlock, instructions, m_generatorification.graph(), data.point + opcodeLength(op_yield)); >+ data.liveness = getLivenessInfoAtBytecodeOffset(codeBlock, instructions, m_generatorification.graph(), m_generatorification.instructions().at(data.point).next().offset()); > } > > private: >@@ -179,85 +188,78 @@ void BytecodeGeneratorification::run() > pass.run(m_codeBlock, m_instructions); > } > >- BytecodeRewriter rewriter(m_graph, m_codeBlock, m_instructions); >+ BytecodeRewriter rewriter(m_bytecodeGenerator, m_graph, m_codeBlock, m_instructions); > > // Setup the global switch for the generator. > { >- unsigned nextToEnterPoint = enterPoint() + opcodeLength(op_enter); >+ auto nextToEnterPoint = enterPoint().next(); > unsigned switchTableIndex = m_codeBlock->numberOfSwitchJumpTables(); > VirtualRegister state = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::State)); > auto& jumpTable = m_codeBlock->addSwitchJumpTable(); > jumpTable.min = 0; > jumpTable.branchOffsets.resize(m_yields.size() + 1); > jumpTable.branchOffsets.fill(0); >- jumpTable.add(0, nextToEnterPoint); >+ jumpTable.add(0, nextToEnterPoint.offset()); > for (unsigned i = 0; i < m_yields.size(); ++i) > jumpTable.add(i + 1, m_yields[i].point); > > rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) { >- fragment.appendInstruction(op_switch_imm, switchTableIndex, nextToEnterPoint, state.offset()); >+ fragment.appendInstruction<OpSwitchImm>(switchTableIndex, nextToEnterPoint.offset(), state); > }); > } > > for (const YieldData& data : m_yields) { > VirtualRegister scope = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::Frame)); > >+ auto instruction = m_instructions.at(data.point); > // Emit save sequence. >- rewriter.insertFragmentBefore(data.point, [&](BytecodeRewriter::Fragment& fragment) { >+ rewriter.insertFragmentBefore(instruction, [&](BytecodeRewriter::Fragment& fragment) { > data.liveness.forEachSetBit([&](size_t index) { > VirtualRegister operand = virtualRegisterForLocal(index); > Storage storage = storageForGeneratorLocal(index); > >- fragment.appendInstruction( >- op_put_to_scope, >- scope.offset(), // scope >+ fragment.appendInstruction<OpPutToScope>( >+ scope, // scope > storage.identifierIndex, // identifier >- operand.offset(), // value >- GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info >- m_generatorFrameSymbolTableIndex, // symbol table constant index >- storage.scopeOffset.offset() // scope offset >+ operand, // value >+ GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info >+ m_generatorFrameSymbolTableIndex // symbol table constant index > ); > }); > > // Insert op_ret just after save sequence. >- fragment.appendInstruction(op_ret, data.argument); >+ fragment.appendInstruction<OpRet>(data.argument); > }); > > // Emit resume sequence. >- rewriter.insertFragmentAfter(data.point, [&](BytecodeRewriter::Fragment& fragment) { >+ rewriter.insertFragmentAfter(instruction, [&](BytecodeRewriter::Fragment& fragment) { > data.liveness.forEachSetBit([&](size_t index) { > VirtualRegister operand = virtualRegisterForLocal(index); > Storage storage = storageForGeneratorLocal(index); > >- UnlinkedValueProfile profile = m_codeBlock->vm()->canUseJIT() >- ? m_codeBlock->addValueProfile() >- : static_cast<UnlinkedValueProfile>(-1); >- fragment.appendInstruction( >- op_get_from_scope, >- operand.offset(), // dst >- scope.offset(), // scope >+ fragment.appendInstruction<OpGetFromScope>( >+ operand, // dst >+ scope, // scope > storage.identifierIndex, // identifier >- GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info >- 0, // local scope depth >- storage.scopeOffset.offset(), // scope offset >- profile // profile >+ GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info >+ 0 // local scope depth > ); > }); > }); > > // Clip the unnecessary bytecodes. >- rewriter.removeBytecode(data.point); >+ rewriter.removeBytecode(instruction); > } > > rewriter.execute(); > } > >-void performGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) >+void performGeneratorification(BytecodeGenerator &bytecodeGenerator, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) > { > if (Options::dumpBytecodesBeforeGeneratorification()) > BytecodeDumper<UnlinkedCodeBlock>::dumpBlock(codeBlock, instructions, WTF::dataFile()); > >- BytecodeGeneratorification pass(codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex); >+ BytecodeGeneratorification pass(bytecodeGenerator, codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex); > pass.run(); > } > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h >index c7b613746086ae34c4c0a22bf4e67c76e6cfd4e9..01f096e041bfdb25f6fae5ddc0614ef7a6f06306 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h >@@ -26,12 +26,14 @@ > > #pragma once > >-#include "UnlinkedCodeBlock.h" >- > namespace JSC { > >+class BytecodeGenerator; >+class InstructionStreamWriter; >+class SymbolTable; >+class UnlinkedCodeBlock; > class SymbolTable; > >-void performGeneratorification(UnlinkedCodeBlock*, UnlinkedCodeBlock::UnpackedInstructions&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex); >+void performGeneratorification(BytecodeGenerator&, UnlinkedCodeBlock*, InstructionStreamWriter&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex); > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/BytecodeGraph.h b/Source/JavaScriptCore/bytecode/BytecodeGraph.h >index c204b41f329718e72ce7db97fae64315e3c73009..ee2da185c3519e555d5ec706e65421158cada5b6 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeGraph.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeGraph.h >@@ -44,20 +44,20 @@ public: > typedef WTF::IndexedContainerIterator<BytecodeGraph> iterator; > > template <typename CodeBlockType> >- inline BytecodeGraph(CodeBlockType*, typename CodeBlockType::UnpackedInstructions&); >+ inline BytecodeGraph(CodeBlockType*, const InstructionStream&); > > WTF::IteratorRange<BasicBlocksVector::reverse_iterator> basicBlocksInReverseOrder() > { > return WTF::makeIteratorRange(m_basicBlocks.rbegin(), m_basicBlocks.rend()); > } > >- static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset) >+ static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, InstructionStream::Offset bytecodeOffset) > { > unsigned leaderOffset = block->leaderOffset(); > return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalLength(); > } > >- BytecodeBasicBlock* findBasicBlockForBytecodeOffset(unsigned bytecodeOffset) >+ BytecodeBasicBlock* findBasicBlockForBytecodeOffset(InstructionStream::Offset bytecodeOffset) > { > /* > for (unsigned i = 0; i < m_basicBlocks.size(); i++) { >@@ -85,7 +85,7 @@ public: > return basicBlock[1].get(); > } > >- BytecodeBasicBlock* findBasicBlockWithLeaderOffset(unsigned leaderOffset) >+ BytecodeBasicBlock* findBasicBlockWithLeaderOffset(InstructionStream::Offset leaderOffset) > { > return (*tryBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(m_basicBlocks, m_basicBlocks.size(), leaderOffset, [] (std::unique_ptr<BytecodeBasicBlock>* basicBlock) { return (*basicBlock)->leaderOffset(); })).get(); > } >@@ -105,9 +105,9 @@ private: > > > template<typename CodeBlockType> >-BytecodeGraph::BytecodeGraph(CodeBlockType* codeBlock, typename CodeBlockType::UnpackedInstructions& instructions) >+BytecodeGraph::BytecodeGraph(CodeBlockType* codeBlock, const InstructionStream& instructions) > { >- BytecodeBasicBlock::compute(codeBlock, instructions.begin(), instructions.size(), m_basicBlocks); >+ BytecodeBasicBlock::compute(codeBlock, instructions, m_basicBlocks); > ASSERT(m_basicBlocks.size()); > } > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeKills.h b/Source/JavaScriptCore/bytecode/BytecodeKills.h >index feab026863afccd9c0156aee017f514336c37fa2..491f47e4bc2d2468a8034489df6e5e192b0bb7f1 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeKills.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeKills.h >@@ -40,36 +40,37 @@ public: > { > } > >+ // TODO: is this dead? > // By convention, we say that non-local operands are never killed. >- bool operandIsKilled(unsigned bytecodeIndex, int operand) const >- { >- ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); >- VirtualRegister reg(operand); >- if (reg.isLocal()) >- return m_killSets[bytecodeIndex].contains(operand); >- return false; >- } >+ //bool operandIsKilled(unsigned bytecodeIndex, int operand) const >+ //{ >+ //ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); >+ //VirtualRegister reg(operand); >+ //if (reg.isLocal()) >+ //return m_killSets[bytecodeIndex].contains(operand); >+ //return false; >+ //} > >- bool operandIsKilled(Instruction* instruction, int operand) const >- { >- return operandIsKilled(m_codeBlock->bytecodeOffset(instruction), operand); >- } >+ //bool operandIsKilled(Instruction* instruction, int operand) const >+ //{ >+ //return operandIsKilled(m_codeBlock->bytecodeOffset(instruction), operand); >+ //} > >- template<typename Functor> >- void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const >- { >- ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); >- m_killSets[bytecodeIndex].forEachLocal( >- [&] (unsigned local) { >- functor(virtualRegisterForLocal(local)); >- }); >- } >+ //template<typename Functor> >+ //void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const >+ //{ >+ //ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); >+ //m_killSets[bytecodeIndex].forEachLocal( >+ //[&] (unsigned local) { >+ //functor(virtualRegisterForLocal(local)); >+ //}); >+ //} > >- template<typename Functor> >- void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const >- { >- forEachOperandKilledAt(m_codeBlock->bytecodeOffset(pc), functor); >- } >+ //template<typename Functor> >+ //void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const >+ //{ >+ //forEachOperandKilledAt(m_codeBlock->bytecodeOffset(pc), functor); >+ //} > > private: > friend class BytecodeLivenessAnalysis; >diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.json b/Source/JavaScriptCore/bytecode/BytecodeList.json >deleted file mode 100644 >index f5bdc49a7a671b8de9cb348b7ebba936748b2f42..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/bytecode/BytecodeList.json >+++ /dev/null >@@ -1,236 +0,0 @@ >-[ >- { >- "section" : "Bytecodes", "emitInHFile" : true, "emitInStructsFile" : true, "emitInASMFile" : true, >- "emitOpcodeIDStringValuesInHFile" : true, "macroNameComponent" : "BYTECODE", "asmPrefix" : "llint_", >- "bytecodes" : [ >- { "name" : "op_enter", "length" : 1 }, >- { "name" : "op_get_scope", "length" : 2 }, >- { "name" : "op_create_direct_arguments", "length" : 2 }, >- { "name" : "op_create_scoped_arguments", "length" : 3 }, >- { "name" : "op_create_cloned_arguments", "length" : 2 }, >- { "name" : "op_create_this", "offsets" : >- [{"dst" : "int"}, >- {"callee" : "int"}, >- {"inlineCapacity" : "int"}, >- {"cachedCallee" : "WriteBarrier<JSCell>"}]}, >- { "name" : "op_get_argument", "length" : 4 }, >- { "name" : "op_argument_count", "length" : 2 }, >- { "name" : "op_to_this", "length" : 5 }, >- { "name" : "op_check_tdz", "length" : 2 }, >- { "name" : "op_new_object", "length" : 4 }, >- { "name" : "op_new_array", "length" : 5 }, >- { "name" : "op_new_array_with_size", "length" : 4 }, >- { "name" : "op_new_array_buffer", "offsets" : >- [{"dst" : "int"}, >- {"immutableButterfly" : "int"}, >- {"profile" : "ArrayAllocationProfile*"}]}, >- { "name" : "op_new_array_with_spread", "length" : 5 }, >- { "name" : "op_spread", "length" : 3 }, >- { "name" : "op_new_regexp", "length" : 3 }, >- { "name" : "op_mov", "length" : 3 }, >- { "name" : "op_not", "length" : 3 }, >- { "name" : "op_eq", "length" : 4 }, >- { "name" : "op_eq_null", "length" : 3 }, >- { "name" : "op_neq", "length" : 4 }, >- { "name" : "op_neq_null", "length" : 3 }, >- { "name" : "op_stricteq", "length" : 4 }, >- { "name" : "op_nstricteq", "length" : 4 }, >- { "name" : "op_less", "length" : 4 }, >- { "name" : "op_lesseq", "length" : 4 }, >- { "name" : "op_greater", "length" : 4 }, >- { "name" : "op_greatereq", "length" : 4 }, >- { "name" : "op_below", "length" : 4 }, >- { "name" : "op_beloweq", "length" : 4 }, >- { "name" : "op_inc", "length" : 2 }, >- { "name" : "op_dec", "length" : 2 }, >- { "name" : "op_to_number", "length" : 4 }, >- { "name" : "op_to_string", "length" : 3 }, >- { "name" : "op_to_object", "length" : 5 }, >- { "name" : "op_negate", "length" : 4 }, >- { "name" : "op_add", "length" : 5 }, >- { "name" : "op_mul", "length" : 5 }, >- { "name" : "op_div", "length" : 5 }, >- { "name" : "op_mod", "length" : 4 }, >- { "name" : "op_sub", "length" : 5 }, >- { "name" : "op_pow", "length" : 4 }, >- { "name" : "op_lshift", "length" : 4 }, >- { "name" : "op_rshift", "length" : 4 }, >- { "name" : "op_urshift", "length" : 4 }, >- { "name" : "op_unsigned", "length" : 3 }, >- { "name" : "op_bitand", "length" : 5 }, >- { "name" : "op_bitxor", "length" : 5 }, >- { "name" : "op_bitor", "length" : 5 }, >- { "name" : "op_identity_with_profile", "length" : 4 }, >- { "name" : "op_overrides_has_instance", "offsets" : >- [{"dst" : "int"}, >- {"constructor" : "int"}, >- {"hasInstanceValue" : "int"}] }, >- { "name" : "op_instanceof", "offsets" : >- [{"dst" : "int"}, >- {"value" : "int"}, >- {"prototype" : "int"}] }, >- { "name" : "op_instanceof_custom", "offsets" : >- [{"dst" : "int"}, >- {"value" : "int"}, >- {"constructor" : "int"}, >- {"hasInstanceValue" : "int"}] }, >- { "name" : "op_typeof", "length" : 3 }, >- { "name" : "op_is_empty", "length" : 3 }, >- { "name" : "op_is_undefined", "length" : 3 }, >- { "name" : "op_is_boolean", "length" : 3 }, >- { "name" : "op_is_number", "length" : 3 }, >- { "name" : "op_is_object", "length" : 3 }, >- { "name" : "op_is_object_or_null", "length" : 3 }, >- { "name" : "op_is_function", "length" : 3 }, >- { "name" : "op_is_cell_with_type", "length" : 4 }, >- { "name" : "op_in_by_val", "length" : 5 }, >- { "name" : "op_in_by_id", "length" : 4 }, >- { "name" : "op_get_array_length", "length" : 9 }, >- { "name" : "op_get_by_id", "length" : 9 }, >- { "name" : "op_get_by_id_proto_load", "length" : 9 }, >- { "name" : "op_get_by_id_unset", "length" : 9 }, >- { "name" : "op_get_by_id_with_this", "length" : 6 }, >- { "name" : "op_get_by_val_with_this", "length" : 6 }, >- { "name" : "op_get_by_id_direct", "length" : 7 }, >- { "name" : "op_try_get_by_id", "length" : 5 }, >- { "name" : "op_put_by_id", "length" : 9 }, >- { "name" : "op_put_by_id_with_this", "length" : 5 }, >- { "name" : "op_del_by_id", "length" : 4 }, >- { "name" : "op_get_by_val", "length" : 6 }, >- { "name" : "op_put_by_val", "length" : 5 }, >- { "name" : "op_put_by_val_with_this", "length" : 5 }, >- { "name" : "op_put_by_val_direct", "length" : 5 }, >- { "name" : "op_del_by_val", "length" : 4 }, >- { "name" : "op_put_getter_by_id", "length" : 5 }, >- { "name" : "op_put_setter_by_id", "length" : 5 }, >- { "name" : "op_put_getter_setter_by_id", "length" : 6 }, >- { "name" : "op_put_getter_by_val", "length" : 5 }, >- { "name" : "op_put_setter_by_val", "length" : 5 }, >- { "name" : "op_define_data_property", "length" : 5 }, >- { "name" : "op_define_accessor_property", "length" : 6 }, >- { "name" : "op_jmp", "length" : 2 }, >- { "name" : "op_jtrue", "length" : 3 }, >- { "name" : "op_jfalse", "length" : 3 }, >- { "name" : "op_jeq_null", "length" : 3 }, >- { "name" : "op_jneq_null", "length" : 3 }, >- { "name" : "op_jneq_ptr", "length" : 5 }, >- { "name" : "op_jeq", "length" : 4 }, >- { "name" : "op_jstricteq", "length" : 4 }, >- { "name" : "op_jneq", "length" : 4 }, >- { "name" : "op_jnstricteq", "length" : 4 }, >- { "name" : "op_jless", "length" : 4 }, >- { "name" : "op_jlesseq", "length" : 4 }, >- { "name" : "op_jgreater", "length" : 4 }, >- { "name" : "op_jgreatereq", "length" : 4 }, >- { "name" : "op_jnless", "length" : 4 }, >- { "name" : "op_jnlesseq", "length" : 4 }, >- { "name" : "op_jngreater", "length" : 4 }, >- { "name" : "op_jngreatereq", "length" : 4 }, >- { "name" : "op_jbelow", "length" : 4 }, >- { "name" : "op_jbeloweq", "length" : 4 }, >- { "name" : "op_loop_hint", "length" : 1 }, >- { "name" : "op_switch_imm", "length" : 4 }, >- { "name" : "op_switch_char", "length" : 4 }, >- { "name" : "op_switch_string", "length" : 4 }, >- { "name" : "op_new_func", "length" : 4 }, >- { "name" : "op_new_func_exp", "length" : 4 }, >- { "name" : "op_new_generator_func", "length" : 4 }, >- { "name" : "op_new_generator_func_exp", "length" : 4 }, >- { "name" : "op_new_async_func", "length" : 4 }, >- { "name" : "op_new_async_func_exp", "length" : 4 }, >- { "name" : "op_new_async_generator_func", "length" : 4 }, >- { "name" : "op_new_async_generator_func_exp", "length" : 4 }, >- { "name" : "op_set_function_name", "length" : 3 }, >- { "name" : "op_call", "length" : 9 }, >- { "name" : "op_tail_call", "length" : 9 }, >- { "name" : "op_call_eval", "length" : 9 }, >- { "name" : "op_call_varargs", "length" : 9 }, >- { "name" : "op_tail_call_varargs", "length" : 9 }, >- { "name" : "op_tail_call_forward_arguments", "length" : 9 }, >- { "name" : "op_ret", "length" : 2 }, >- { "name" : "op_construct", "length" : 9 }, >- { "name" : "op_construct_varargs", "length" : 9 }, >- { "name" : "op_strcat", "length" : 4 }, >- { "name" : "op_to_primitive", "length" : 3 }, >- { "name" : "op_resolve_scope", "length" : 7 }, >- { "name" : "op_get_from_scope", "length" : 8 }, >- { "name" : "op_put_to_scope", "length" : 7 }, >- { "name" : "op_get_from_arguments", "length" : 5 }, >- { "name" : "op_put_to_arguments", "length" : 4 }, >- { "name" : "op_push_with_scope", "length" : 4 }, >- { "name" : "op_create_lexical_environment", "length" : 5 }, >- { "name" : "op_get_parent_scope", "length" : 3 }, >- { "name" : "op_catch", "length" : 4 }, >- { "name" : "op_throw", "length" : 2 }, >- { "name" : "op_throw_static_error", "length" : 3 }, >- { "name" : "op_debug", "length" : 3 }, >- { "name" : "op_end", "length" : 2 }, >- { "name" : "op_profile_type", "length" : 6 }, >- { "name" : "op_profile_control_flow", "length" : 2 }, >- { "name" : "op_get_enumerable_length", "length" : 3 }, >- { "name" : "op_has_indexed_property", "length" : 5 }, >- { "name" : "op_has_structure_property", "length" : 5 }, >- { "name" : "op_has_generic_property", "length" : 4 }, >- { "name" : "op_get_direct_pname", "length" : 7 }, >- { "name" : "op_get_property_enumerator", "length" : 3 }, >- { "name" : "op_enumerator_structure_pname", "length" : 4 }, >- { "name" : "op_enumerator_generic_pname", "length" : 4 }, >- { "name" : "op_to_index_string", "length" : 3 }, >- { "name" : "op_unreachable", "length" : 1 }, >- { "name" : "op_create_rest", "length": 4 }, >- { "name" : "op_get_rest_length", "length": 3 }, >- { "name" : "op_yield", "length" : 4 }, >- { "name" : "op_check_traps", "length" : 1 }, >- { "name" : "op_log_shadow_chicken_prologue", "length" : 2}, >- { "name" : "op_log_shadow_chicken_tail", "length" : 3}, >- { "name" : "op_resolve_scope_for_hoisting_func_decl_in_eval", "length" : 4 }, >- { "name" : "op_nop", "length" : 1 }, >- { "name" : "op_super_sampler_begin", "length" : 1 }, >- { "name" : "op_super_sampler_end", "length" : 1 } >- ] >- }, >- { >- "section" : "CLoopHelpers", "emitInHFile" : true, "emitInStructsFile" : false, "emitInASMFile" : false, >- "emitOpcodeIDStringValuesInHFile" : false, "defaultLength" : 1, "macroNameComponent" : "CLOOP_BYTECODE_HELPER", >- "bytecodes" : [ >- { "name" : "llint_entry" }, >- { "name" : "getHostCallReturnValue" }, >- { "name" : "llint_return_to_host" }, >- { "name" : "llint_vm_entry_to_javascript" }, >- { "name" : "llint_vm_entry_to_native" }, >- { "name" : "llint_cloop_did_return_from_js_1" }, >- { "name" : "llint_cloop_did_return_from_js_2" }, >- { "name" : "llint_cloop_did_return_from_js_3" }, >- { "name" : "llint_cloop_did_return_from_js_4" }, >- { "name" : "llint_cloop_did_return_from_js_5" }, >- { "name" : "llint_cloop_did_return_from_js_6" }, >- { "name" : "llint_cloop_did_return_from_js_7" }, >- { "name" : "llint_cloop_did_return_from_js_8" }, >- { "name" : "llint_cloop_did_return_from_js_9" }, >- { "name" : "llint_cloop_did_return_from_js_10" }, >- { "name" : "llint_cloop_did_return_from_js_11" }, >- { "name" : "llint_cloop_did_return_from_js_12" } >- ] >- }, >- { >- "section" : "NativeHelpers", "emitInHFile" : true, "emitInStructsFile" : false, "emitInASMFile" : true, >- "emitOpcodeIDStringValuesInHFile" : false, "defaultLength" : 1, "macroNameComponent" : "BYTECODE_HELPER", >- "bytecodes" : [ >- { "name" : "llint_program_prologue" }, >- { "name" : "llint_eval_prologue" }, >- { "name" : "llint_module_program_prologue" }, >- { "name" : "llint_function_for_call_prologue" }, >- { "name" : "llint_function_for_construct_prologue" }, >- { "name" : "llint_function_for_call_arity_check" }, >- { "name" : "llint_function_for_construct_arity_check" }, >- { "name" : "llint_generic_return_point" }, >- { "name" : "llint_throw_from_slow_path_trampoline" }, >- { "name" : "llint_throw_during_call_trampoline" }, >- { "name" : "llint_native_call_trampoline" }, >- { "name" : "llint_native_construct_trampoline" }, >- { "name" : "llint_internal_function_call_trampoline" }, >- { "name" : "llint_internal_function_construct_trampoline" }, >- { "name" : "handleUncaughtException" } >- ] >- } >-] >diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.rb b/Source/JavaScriptCore/bytecode/BytecodeList.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..3a768aa9e5f7d1bad71c977e3888215bec562296 >--- /dev/null >+++ b/Source/JavaScriptCore/bytecode/BytecodeList.rb >@@ -0,0 +1,1134 @@ >+types [ >+ :VirtualRegister, >+ >+ :BasicBlockLocation, >+ :DebugHookType, >+ :ErrorType, >+ :GetPutInfo, >+ :IndexingType, >+ :JSCell, >+ :JSGlobalLexicalEnvironment, >+ :JSGlobalObject, >+ :JSModuleEnvironment, >+ :JSObject, >+ :JSScope, >+ :JSType, >+ :JSValue, >+ :LLIntCallLinkInfo, >+ :OperandTypes, >+ :ProfileTypeBytecodeFlag, >+ :PutByIdFlags, >+ :ResolveType, >+ :ScopeOffset, >+ :Structure, >+ :StructureID, >+ :StructureChain, >+ :SymbolTable, >+ :ToThisStatus, >+ :TypeLocation, >+ :WatchpointSet, >+ >+ :ValueProfile, >+ :ValueProfileAndOperandBuffer, >+ :ArithProfile, >+ :ArrayProfile, >+ :ArrayAllocationProfile, >+ :ObjectAllocationProfile, >+] >+ >+namespace :Special do >+ types [ :Pointer ] >+end >+ >+templates [ >+ :WriteBarrierBase, >+] >+ >+begin_section :Bytecodes, >+ emit_in_h_file: true, >+ emit_in_structs_file: true, >+ emit_in_asm_file: true, >+ emit_opcode_id_string_values_in_h_file: true, >+ macro_name_component: :BYTECODE, >+ asm_prefix: "llint_", >+ op_prefix: "op_" >+ >+op :wide >+ >+op :enter >+ >+op :get_scope, >+ args: { >+ dst: VirtualRegister >+ } >+ >+op :create_direct_arguments, >+ args: { >+ dst: VirtualRegister, >+ } >+ >+op :create_scoped_arguments, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ } >+ >+op :create_cloned_arguments, >+ args: { >+ dst: VirtualRegister, >+ } >+ >+op :create_this, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ inlineCapacity: unsigned, >+ }, >+ metadata: { >+ cachedCallee: WriteBarrierBase[JSCell] >+ } >+ >+op :get_argument, >+ args: { >+ dst: VirtualRegister, >+ index: int, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :argument_count, >+ args: { >+ dst: VirtualRegister, >+ } >+ >+op :to_this, >+ args: { >+ srcDst: VirtualRegister, >+ }, >+ metadata: { >+ cachedStructure: WriteBarrierBase[Structure], >+ toThisStatus: ToThisStatus, >+ profile: ValueProfile, >+ } >+ >+op :check_tdz, >+ args: { >+ target: VirtualRegister, >+ } >+ >+op :new_object, >+ args: { >+ dst: VirtualRegister, >+ inlineCapacity: unsigned, >+ }, >+ metadata: { >+ allocationProfile: ObjectAllocationProfile, >+ } >+ >+op :new_array, >+ args: { >+ dst: VirtualRegister, >+ argv: VirtualRegister, >+ argc: unsigned, >+ recommendedIndexingType: IndexingType, >+ }, >+ metadata: { >+ allocationProfile: ArrayAllocationProfile, >+ }, >+ metadata_initializers: { >+ allocationProfile: :recommendedIndexingType, >+ } >+ >+op :new_array_with_size, >+ args: { >+ dst: VirtualRegister, >+ length: VirtualRegister, >+ }, >+ metadata: { >+ allocationProfile: ArrayAllocationProfile, >+ } >+ >+op :new_array_buffer, >+ args: { >+ dst: VirtualRegister, >+ immutableButterfly: VirtualRegister, >+ recommendedIndexingType: IndexingType >+ }, >+ metadata: { >+ allocationProfile: ArrayAllocationProfile, >+ }, >+ metadata_initializers: { >+ allocationProfile: :recommendedIndexingType, >+ } >+ >+op :new_array_with_spread, >+ args: { >+ dst: VirtualRegister, >+ argv: VirtualRegister, >+ argc: unsigned, >+ bitVector: unsigned, # this could have type BitVector& if the instruction has a reference to the codeblock >+ } >+ >+op :spread, >+ args: { >+ dst: VirtualRegister, >+ argument: VirtualRegister, >+ } >+ >+op :new_regexp, >+ args: { >+ dst: VirtualRegister, >+ regexp: VirtualRegister, # this could have type RegExp the instruction has a reference to the codeblock >+ } >+ >+op :mov, # damnit this is in reverse order to llint >+ args: { >+ dst: VirtualRegister, >+ src: VirtualRegister, >+ } >+ >+op_group :BinaryOp, >+ [ >+ :eq, >+ :neq, >+ :stricteq, >+ :nstricteq, >+ :less, >+ :lesseq, >+ :greater, >+ :greatereq, >+ :below, >+ :beloweq, >+ :mod, >+ :pow, >+ :lshift, >+ :rshift, >+ :urshift, >+ ], >+ args: { >+ dst: VirtualRegister, >+ lhs: VirtualRegister, >+ rhs: VirtualRegister, >+ } >+ >+op_group :ProfiledBinaryOp, >+ [ >+ :add, >+ :mul, >+ :div, >+ :sub, >+ :bitand, >+ :bitxor, >+ :bitor, >+ ], >+ args: { >+ dst: VirtualRegister, >+ lhs: VirtualRegister, >+ rhs: VirtualRegister, >+ operandTypes: OperandTypes, >+ }, >+ metadata: { >+ arithProfile: ArithProfile >+ }, >+ metadata_initializers: { >+ arithProfile: :operandTypes >+ } >+ >+op_group :UnaryOp, >+ [ >+ :eq_null, >+ :neq_null, >+ :to_string, >+ :unsigned, >+ :is_empty, >+ :is_undefined, >+ :is_boolean, >+ :is_number, >+ :is_object, >+ :is_object_or_null, >+ :is_function, >+ ], >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ } >+ >+op :inc, >+ args: { >+ srcDst: VirtualRegister, >+ } >+ >+op :dec, >+ args: { >+ srcDst: VirtualRegister, >+ } >+ >+op :to_object, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ message: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :to_number, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :negate, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ operandTypes: OperandTypes, >+ }, >+ metadata: { >+ arithProfile: ArithProfile, >+ }, >+ metadata_initializers: { >+ arithProfile: :operandTypes >+ } >+ >+op :not, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ } >+ >+ >+op :identity_with_profile, >+ args: { >+ srcDst: VirtualRegister, >+ topProfile: unsigned, >+ bottomProfile: unsigned, >+ } >+ >+op :overrides_has_instance, >+ args: { >+ dst: VirtualRegister, >+ constructor: VirtualRegister, >+ hasInstanceValue: VirtualRegister, >+ } >+ >+op :instanceof, >+ args: { >+ dst: VirtualRegister, >+ value: VirtualRegister, >+ prototype: VirtualRegister, >+ } >+ >+op :instanceof_custom, >+ args: { >+ dst: VirtualRegister, >+ value: VirtualRegister, >+ constructor: VirtualRegister, >+ hasInstanceValue: VirtualRegister, >+ } >+ >+op :typeof, >+ args: { >+ dst: VirtualRegister, >+ value: VirtualRegister, >+ } >+ >+op :is_cell_with_type, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ type: JSType, >+ } >+ >+op :in_by_val, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :in_by_id, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ } >+ >+# NOTE: get_by_id variants >+# they all used to have to share the same size, in order to store all the metadata >+# for all the variants - this should no longer be necessary, since the metadata is >+# stored out-of-line, but has to be confirmed later on >+# we should also consider whether we want to keep modifying the bytecode stream >+# throughout execution, because otherwise we'll need an alternative way of specializing >+# get_by_id >+op :get_array_length, # special - never emitted >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, # must be a JSArray >+ property: unsigned, # always "length" >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :get_by_id, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ structure: StructureID, >+ hitCountForLLIntCaching: unsigned, >+ } >+ >+op :get_by_id_proto_load, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ structure: StructureID, >+ slot: JSObject.*, >+ } >+ >+op :get_by_id_unset, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ structure: StructureID, >+ } >+ >+op :get_by_id_with_this, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ thisValue: VirtualRegister, >+ property: int, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :get_by_val_with_this, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ thisValue: VirtualRegister, >+ property: VirtualRegister, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :get_by_id_direct, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ structure: StructureID, >+ offset: unsigned, >+ } >+ >+op :try_get_by_id, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :put_by_id, >+ args: { >+ base: VirtualRegister, >+ property: unsigned, >+ value: VirtualRegister, >+ flags: PutByIdFlags, >+ }, >+ metadata: { >+ oldStructure: StructureID, >+ offset: unsigned, >+ newStructure: StructureID, >+ structureChain: WriteBarrierBase[StructureChain], >+ flags: PutByIdFlags, >+ } >+ >+op :put_by_id_with_this, >+ args: { >+ base: VirtualRegister, >+ thisValue: VirtualRegister, >+ property: unsigned, >+ value: VirtualRegister, >+ } >+ >+op :del_by_id, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ } >+ >+op :get_by_val, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ arrayProfile: ArrayProfile, >+ } >+ >+op :put_by_val, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ value: VirtualRegister, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :put_by_val_with_this, >+ args: { >+ base: VirtualRegister, >+ thisValue: VirtualRegister, >+ property: VirtualRegister, >+ value: VirtualRegister, >+ } >+ >+op :put_by_val_direct, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ value: VirtualRegister, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :del_by_val, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ } >+ >+op :put_getter_by_id, >+ args: { >+ base: VirtualRegister, >+ property: int, >+ attributes: unsigned, >+ accessor: VirtualRegister, >+ } >+ >+op :put_setter_by_id, >+ args: { >+ base: VirtualRegister, >+ property: int, >+ attributes: unsigned, >+ accessor: VirtualRegister, >+ } >+ >+op :put_getter_setter_by_id, >+ args: { >+ base: VirtualRegister, >+ property: unsigned, >+ attributes: unsigned, >+ getter: VirtualRegister, >+ setter: VirtualRegister, >+ } >+ >+op :put_getter_by_val, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ attributes: unsigned, >+ accessor: VirtualRegister, >+ } >+ >+op :put_setter_by_val, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ attributes: unsigned, >+ accessor: VirtualRegister, >+ } >+ >+op :define_data_property, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ value: VirtualRegister, >+ attributes: VirtualRegister, >+ } >+ >+op :define_accessor_property, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ getter: VirtualRegister, >+ setter: VirtualRegister, >+ attributes: VirtualRegister, >+ } >+ >+op :jmp, >+ args: { >+ target: int, >+ } >+ >+op :jtrue, >+ args: { >+ condition: VirtualRegister, >+ target: int, >+ } >+ >+op :jfalse, >+ args: { >+ condition: VirtualRegister, >+ target: int, >+ } >+ >+op :jeq_null, >+ args: { >+ condition: VirtualRegister, >+ target: int, >+ } >+ >+op :jneq_null, >+ args: { >+ condition: VirtualRegister, >+ target: int, >+ } >+ >+op :jneq_ptr, >+ args: { >+ condition: VirtualRegister, >+ specialPointer: Special::Pointer, >+ target: int, >+ }, >+ metadata: { >+ hasJumped: bool, >+ } >+ >+op_group :BinaryJmp, >+ [ >+ :jeq, >+ :jstricteq, >+ :jneq, >+ :jnstricteq, >+ :jless, >+ :jlesseq, >+ :jgreater, >+ :jgreatereq, >+ :jnless, >+ :jnlesseq, >+ :jngreater, >+ :jngreatereq, >+ :jbelow, >+ :jbeloweq, >+ ], >+ args: { >+ lhs: VirtualRegister, >+ rhs: VirtualRegister, >+ target: int, >+ } >+ >+op :loop_hint >+ >+op_group :SwitchValue, >+ [ >+ :switch_imm, >+ :switch_char, >+ :switch_string, >+ ], >+ args: { >+ tableIndex: int, >+ defaultOffset: int, >+ scrutinee: VirtualRegister, >+ } >+ >+op_group :NewFunction, >+ [ >+ :new_func, >+ :new_func_exp, >+ :new_generator_func, >+ :new_generator_func_exp, >+ :new_async_func, >+ :new_async_func_exp, >+ :new_async_generator_func, >+ :new_async_generator_func_exp, >+ ], >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ functionDecl: int, >+ } >+ >+op :set_function_name, >+ args: { >+ function: VirtualRegister, >+ name: VirtualRegister, >+ } >+ >+# op_call variations >+op :call, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ argc: unsigned, >+ argv: unsigned, >+ }, >+ metadata: { >+ callLinkInfo: LLIntCallLinkInfo, >+ # ? there was an extra slot here >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :tail_call, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ argc: unsigned, >+ argv: unsigned, >+ }, >+ metadata: { >+ callLinkInfo: LLIntCallLinkInfo, >+ # ? there was an extra slot here >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :call_eval, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ argc: unsigned, >+ argv: unsigned, >+ }, >+ metadata: { >+ callLinkInfo: LLIntCallLinkInfo, >+ # ? there was an extra slot here >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :call_varargs, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ thisValue?: VirtualRegister, >+ arguments?: VirtualRegister, >+ firstFree: VirtualRegister, >+ firstVarArg: int, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :tail_call_varargs, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ thisValue?: VirtualRegister, >+ arguments?: VirtualRegister, >+ firstFree: VirtualRegister, >+ firstVarArg: int, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :tail_call_forward_arguments, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ thisValue?: VirtualRegister, >+ arguments?: VirtualRegister, >+ firstFree: VirtualRegister, >+ firstVarArg: int, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :construct, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ argc: unsigned, >+ argv: unsigned, >+ }, >+ metadata: { >+ callLinkInfo: LLIntCallLinkInfo, >+ # ? there was an extra slot here >+ # ? empty slot here >+ profile: ValueProfile, >+ } >+ >+op :construct_varargs, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ thisValue?: VirtualRegister, >+ arguments?: VirtualRegister, >+ firstFree: VirtualRegister, >+ firstVarArg: int, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :ret, >+ args: { >+ value: VirtualRegister, >+ } >+ >+op :strcat, >+ args: { >+ dst: VirtualRegister, >+ src: VirtualRegister, >+ count: int, >+ } >+ >+op :to_primitive, >+ args: { >+ dst: VirtualRegister, >+ src: VirtualRegister, >+ } >+ >+op :resolve_scope, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ var: unsigned, >+ resolveType: ResolveType, >+ localScopeDepth: unsigned, >+ }, >+ metadata: { >+ resolveType: ResolveType, >+ localScopeDepth: unsigned, >+ symbolTable: WriteBarrierBase[SymbolTable], >+ globalObject: JSGlobalObject.*, >+ globalLexicalEnvironment: JSGlobalLexicalEnvironment.*, >+ scope: WriteBarrierBase[JSScope], >+ moduleEnvironment: WriteBarrierBase[JSModuleEnvironment], >+ } >+ >+op :get_from_scope, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ var: unsigned, >+ getPutInfo: GetPutInfo, >+ localScopeDepth: int, >+ }, >+ metadata: { >+ getPutInfo: GetPutInfo, >+ profile: ValueProfile, >+ scopeOffset: JSValue.*, >+ watchpointSet: WatchpointSet.*, >+ structure: WriteBarrierBase[Structure], >+ varOffset: unsigned, >+ }, >+ metadata_initializers: { >+ getPutInfo: :getPutInfo >+ } >+ >+op :put_to_scope, >+ args: { >+ scope: VirtualRegister, >+ var: unsigned, >+ value: VirtualRegister, >+ getPutInfo: GetPutInfo, >+ localScopeDepth: int, >+ }, >+ metadata: { >+ getPutInfo: GetPutInfo, >+ profile: ValueProfile, >+ scopeOffset: JSValue.*, >+ watchpointSet: WatchpointSet.*, >+ structure: WriteBarrierBase[Structure], >+ varOffset: unsigned, >+ }, >+ metadata_initializers: { >+ getPutInfo: :getPutInfo >+ } >+ >+op :get_from_arguments, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ offset: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :put_to_arguments, >+ args: { >+ scope: VirtualRegister, >+ offset: unsigned, >+ value: VirtualRegister, >+ } >+ >+op :push_with_scope, >+ args: { >+ dst: VirtualRegister, >+ currentScope: VirtualRegister, >+ newScope: VirtualRegister, >+ } >+ >+op :create_lexical_environment, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ symbolTableIndex: int, >+ initialValue: VirtualRegister, >+ } >+ >+op :get_parent_scope, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ } >+ >+op :catch, >+ args: { >+ exception: VirtualRegister, >+ thrownValue: VirtualRegister, >+ }, >+ metadata: { >+ buffer: ValueProfileAndOperandBuffer.*, >+ } >+ >+op :throw, >+ args: { >+ value: VirtualRegister, >+ } >+ >+op :throw_static_error, >+ args: { >+ message: VirtualRegister, >+ errorType: ErrorType, >+ } >+ >+op :debug, >+ args: { >+ debugHookType: DebugHookType, >+ hasBreakpoint: bool, >+ } >+ >+op :end, >+ args: { >+ value: VirtualRegister, >+ } >+ >+op :profile_type, >+ args: { >+ target: VirtualRegister, >+ symbolTableOrScopeDepth: int, >+ flag: ProfileTypeBytecodeFlag, >+ identifier?: unsigned, >+ resolveType: ResolveType, >+ }, >+ metadata: { >+ typeLocation: TypeLocation.*, >+ } >+ >+op :profile_control_flow, >+ args: { >+ textOffset: int, >+ }, >+ metadata: { >+ basicBlockLocation: BasicBlockLocation.*, >+ } >+ >+op :get_enumerable_length, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ } >+ >+op :has_indexed_property, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :has_structure_property, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ enumerator: VirtualRegister, >+ } >+ >+op :has_generic_property, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ } >+ >+op :get_direct_pname, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ index: VirtualRegister, >+ enumerator: VirtualRegister, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :get_property_enumerator, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ } >+ >+op :enumerator_structure_pname, >+ args: { >+ dst: VirtualRegister, >+ enumerator: VirtualRegister, >+ index: VirtualRegister, >+ } >+ >+op :enumerator_generic_pname, >+ args: { >+ dst: VirtualRegister, >+ enumerator: VirtualRegister, >+ index: VirtualRegister, >+ } >+ >+op :to_index_string, >+ args: { >+ dst: VirtualRegister, >+ index: VirtualRegister, >+ } >+ >+op :unreachable >+ >+op :create_rest, >+ args: { >+ dst: VirtualRegister, >+ arraySize: VirtualRegister, >+ numParametersToSkip: unsigned, >+ } >+ >+op :get_rest_length, >+ args: { >+ dst: VirtualRegister, >+ numParametersToSkip: unsigned, >+ } >+ >+op :yield, >+ args: { >+ generator: VirtualRegister, >+ yieldPoint: unsigned, >+ argument: VirtualRegister, >+ } >+ >+op :check_traps >+ >+op :log_shadow_chicken_prologue, >+ args: { >+ scope: VirtualRegister, >+ } >+ >+op :log_shadow_chicken_tail, >+ args: { >+ thisValue: VirtualRegister, >+ scope: VirtualRegister, >+ } >+ >+op :resolve_scope_for_hoisting_func_decl_in_eval, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ property: unsigned, >+ } >+ >+op :nop >+ >+op :super_sampler_begin >+ >+op :super_sampler_end >+ >+end_section :Bytecodes >+ >+begin_section :CLoopHelpers, >+ emit_in_h_file: true, >+ macro_name_component: :CLOOP_BYTECODE_HELPER >+ >+op :llint_entry >+op :getHostCallReturnValue >+op :llint_return_to_host >+op :llint_vm_entry_to_javascript >+op :llint_vm_entry_to_native >+op :llint_cloop_did_return_from_js_1 >+op :llint_cloop_did_return_from_js_2 >+op :llint_cloop_did_return_from_js_3 >+op :llint_cloop_did_return_from_js_4 >+op :llint_cloop_did_return_from_js_5 >+op :llint_cloop_did_return_from_js_6 >+op :llint_cloop_did_return_from_js_7 >+op :llint_cloop_did_return_from_js_8 >+op :llint_cloop_did_return_from_js_9 >+op :llint_cloop_did_return_from_js_10 >+op :llint_cloop_did_return_from_js_11 >+op :llint_cloop_did_return_from_js_12 >+ >+end_section :CLoopHelpers >+ >+begin_section :NativeHelpers, >+ emit_in_h_file: true, >+ emit_in_asm_file: true, >+ macro_name_component: :BYTECODE_HELPER >+ >+op :llint_program_prologue >+op :llint_eval_prologue >+op :llint_module_program_prologue >+op :llint_function_for_call_prologue >+op :llint_function_for_construct_prologue >+op :llint_function_for_call_arity_check >+op :llint_function_for_construct_arity_check >+op :llint_generic_return_point >+op :llint_throw_from_slow_path_trampoline >+op :llint_throw_during_call_trampoline >+op :llint_native_call_trampoline >+op :llint_native_construct_trampoline >+op :llint_internal_function_call_trampoline >+op :llint_internal_function_construct_trampoline >+op :handleUncaughtException >+ >+end_section :NativeHelpers >diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp >index e0169dfb498ee644d610aaf4df90b4845fabae2f..330c7c7a98dee70b1961ced0e1c5c24277b25e8b 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp >@@ -119,7 +119,7 @@ void BytecodeLivenessAnalysis::computeKills(CodeBlock* codeBlock, BytecodeKills& > void BytecodeLivenessAnalysis::dumpResults(CodeBlock* codeBlock) > { > dataLog("\nDumping bytecode liveness for ", *codeBlock, ":\n"); >- Instruction* instructionsBegin = codeBlock->instructions().begin(); >+ const auto& instructions = codeBlock->instructions(); > unsigned i = 0; > > unsigned numberOfBlocks = m_graph.size(); >@@ -167,17 +167,15 @@ void BytecodeLivenessAnalysis::dumpResults(CodeBlock* codeBlock) > continue; > } > for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { >- const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset]; >+ const auto currentInstruction = instructions.at(bytecodeOffset); > > dataLogF("Live variables:"); > FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(codeBlock, bytecodeOffset); > dumpBitVector(liveBefore); > dataLogF("\n"); >- codeBlock->dumpBytecode(WTF::dataFile(), instructionsBegin, currentInstruction); >+ codeBlock->dumpBytecode(WTF::dataFile(), currentInstruction); > >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); >- unsigned opcodeLength = opcodeLengths[opcodeID]; >- bytecodeOffset += opcodeLength; >+ bytecodeOffset += currentInstruction->size(); > } > > dataLogF("Live variables:"); >diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h >index 64b175625b88258c014a0d0eaae56c7cc3e718a0..7bef4cf1dce2a1a7fc183afeae21dd5914f8f949 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h >@@ -37,9 +37,9 @@ class FullBytecodeLiveness; > > class BytecodeLivenessPropagation { > protected: >- template<typename CodeBlockType, typename Instructions, typename UseFunctor, typename DefFunctor> void stepOverInstruction(CodeBlockType*, const Instructions&, BytecodeGraph&, unsigned bytecodeOffset, const UseFunctor&, const DefFunctor&); >+ template<typename CodeBlockType, typename UseFunctor, typename DefFunctor> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, InstructionStream::Offset bytecodeOffset, const UseFunctor&, const DefFunctor&); > >- template<typename CodeBlockType, typename Instructions> void stepOverInstruction(CodeBlockType*, const Instructions&, BytecodeGraph&, unsigned bytecodeOffset, FastBitVector& out); >+ template<typename CodeBlockType> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, InstructionStream::Offset bytecodeOffset, FastBitVector& out); > > template<typename CodeBlockType, typename Instructions> bool computeLocalLivenessForBytecodeOffset(CodeBlockType*, const Instructions&, BytecodeGraph&, BytecodeBasicBlock*, unsigned targetOffset, FastBitVector& result); > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h >index 15e847fc3c50a2ee82b0600e7ee9d988565ada59..71ff56f7ea68a2f455cf678b3c79a2d7bab9b39d 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h >@@ -51,18 +51,17 @@ inline bool operandIsLive(const FastBitVector& out, int operand) > return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand); > } > >-inline bool isValidRegisterForLiveness(int operand) >+inline bool isValidRegisterForLiveness(VirtualRegister operand) > { >- VirtualRegister virtualReg(operand); >- if (virtualReg.isConstant()) >+ if (operand.isConstant()) > return false; >- return virtualReg.isLocal(); >+ return operand.isLocal(); > } > > // Simplified interface to bytecode use/def, which determines defs first and then uses, and includes > // exception handlers in the uses. >-template<typename CodeBlockType, typename Instructions, typename UseFunctor, typename DefFunctor> >-inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const Instructions& instructions, BytecodeGraph& graph, unsigned bytecodeOffset, const UseFunctor& use, const DefFunctor& def) >+template<typename CodeBlockType, typename UseFunctor, typename DefFunctor> >+inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, InstructionStream::Offset bytecodeOffset, const UseFunctor& use, const DefFunctor& def) > { > // This abstractly execute the instruction in reverse. Instructions logically first use operands and > // then define operands. This logical ordering is necessary for operations that use and def the same >@@ -79,22 +78,21 @@ inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* code > // uses before defs, then the add operation above would appear to not have loc1 live, since we'd > // first add it to the out set (the use), and then we'd remove it (the def). > >- auto* instructionsBegin = instructions.begin(); >- auto* instruction = &instructionsBegin[bytecodeOffset]; >- OpcodeID opcodeID = Interpreter::getOpcodeID(*instruction); >+ auto* instruction = instructions.at(bytecodeOffset).ptr(); >+ OpcodeID opcodeID = instruction->opcodeID(); > > computeDefsForBytecodeOffset( > codeBlock, opcodeID, instruction, >- [&] (CodeBlockType*, const typename CodeBlockType::Instruction*, OpcodeID, int operand) { >+ [&] (VirtualRegister operand) { > if (isValidRegisterForLiveness(operand)) >- def(VirtualRegister(operand).toLocal()); >+ def(operand.toLocal()); > }); > > computeUsesForBytecodeOffset( > codeBlock, opcodeID, instruction, >- [&] (CodeBlockType*, const typename CodeBlockType::Instruction*, OpcodeID, int operand) { >+ [&] (VirtualRegister operand) { > if (isValidRegisterForLiveness(operand)) >- use(VirtualRegister(operand).toLocal()); >+ use(operand.toLocal()); > }); > > // If we have an exception handler, we want the live-in variables of the >@@ -106,8 +104,8 @@ inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* code > } > } > >-template<typename CodeBlockType, typename Instructions> >-inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const Instructions& instructions, BytecodeGraph& graph, unsigned bytecodeOffset, FastBitVector& out) >+template<typename CodeBlockType> >+inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, InstructionStream::Offset bytecodeOffset, FastBitVector& out) > { > stepOverInstruction( > codeBlock, instructions, graph, bytecodeOffset, >diff --git a/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp b/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp >index cb278cf10519eb61f17c69011bfb35701e645c76..40bba2e270a54ea2e96631f35fe01030283de546 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp >@@ -38,13 +38,13 @@ void BytecodeRewriter::applyModification() > for (size_t insertionIndex = m_insertions.size(); insertionIndex--;) { > Insertion& insertion = m_insertions[insertionIndex]; > if (insertion.type == Insertion::Type::Remove) >- m_instructions.remove(insertion.index.bytecodeOffset, insertion.length()); >+ m_writer.m_instructions.remove(insertion.index.bytecodeOffset, insertion.length()); > else { > if (insertion.includeBranch == IncludeBranch::Yes) { > int finalOffset = insertion.index.bytecodeOffset + calculateDifference(m_insertions.begin(), m_insertions.begin() + insertionIndex); > adjustJumpTargetsInFragment(finalOffset, insertion); > } >- m_instructions.insertVector(insertion.index.bytecodeOffset, insertion.instructions); >+ m_writer.m_instructions.insertVector(insertion.index.bytecodeOffset, insertion.instructions.m_instructions); > } > } > m_insertions.clear(); >@@ -56,28 +56,23 @@ void BytecodeRewriter::execute() > return lhs.index < rhs.index; > }); > >- m_codeBlock->applyModification(*this, m_instructions); >+ m_codeBlock->applyModification(*this, m_writer); > } > > void BytecodeRewriter::adjustJumpTargetsInFragment(unsigned finalOffset, Insertion& insertion) > { >- auto& fragment = insertion.instructions; >- UnlinkedInstruction* instructionsBegin = fragment.data(); >- for (unsigned fragmentOffset = 0, fragmentCount = fragment.size(); fragmentOffset < fragmentCount;) { >- UnlinkedInstruction& instruction = fragment[fragmentOffset]; >- OpcodeID opcodeID = instruction.u.opcode; >- if (isBranch(opcodeID)) { >- unsigned bytecodeOffset = finalOffset + fragmentOffset; >- extractStoredJumpTargetsForBytecodeOffset(m_codeBlock, instructionsBegin, fragmentOffset, [&](int32_t& label) { >+ for (auto& instruction : insertion.instructions) { >+ if (isBranch(instruction->opcodeID())) { >+ unsigned bytecodeOffset = finalOffset + instruction.offset(); >+ updateStoredJumpTargetsForInstruction(m_codeBlock, instruction, [&](int32_t label) { > int absoluteOffset = adjustAbsoluteOffset(label); >- label = absoluteOffset - static_cast<int>(bytecodeOffset); >+ return absoluteOffset - static_cast<int>(bytecodeOffset); > }); > } >- fragmentOffset += opcodeLength(opcodeID); > } > } > >-void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, Vector<UnlinkedInstruction>&& fragment) >+void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, InstructionStreamWriter&& writer) > { > ASSERT(insertionPoint.position == Position::Before || insertionPoint.position == Position::After); > m_insertions.append(Insertion { >@@ -85,7 +80,7 @@ void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch i > Insertion::Type::Insert, > includeBranch, > 0, >- WTFMove(fragment) >+ WTFMove(writer) > }); > } > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeRewriter.h b/Source/JavaScriptCore/bytecode/BytecodeRewriter.h >index a4723867ad0ce23692a3e5644ec94d1a4b4c2b54..0d5f352ac94ab2999adf3a9a6b331edd2c324cd6 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeRewriter.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeRewriter.h >@@ -26,6 +26,7 @@ > > #pragma once > >+#include "BytecodeGenerator.h" > #include "BytecodeGraph.h" > #include "Bytecodes.h" > #include "Opcode.h" >@@ -93,10 +94,10 @@ public: > }; > > struct InsertionPoint { >- int bytecodeOffset; >+ InstructionStream::Offset bytecodeOffset; > Position position; > >- InsertionPoint(int offset, Position pos) >+ InsertionPoint(InstructionStream::Offset offset, Position pos) > : bytecodeOffset(offset) > , position(pos) > { >@@ -130,85 +131,86 @@ private: > Type type; > IncludeBranch includeBranch; > size_t removeLength; >- Vector<UnlinkedInstruction> instructions; >+ InstructionStreamWriter instructions; > }; > > public: > class Fragment { > WTF_MAKE_NONCOPYABLE(Fragment); > public: >- Fragment(Vector<UnlinkedInstruction>& fragment, IncludeBranch& includeBranch) >- : m_fragment(fragment) >+ Fragment(BytecodeGenerator& bytecodeGenerator, InstructionStreamWriter& writer, IncludeBranch& includeBranch) >+ : m_bytecodeGenerator(bytecodeGenerator) >+ , m_writer(writer) > , m_includeBranch(includeBranch) > { > } > >- template<class... Args> >- void appendInstruction(OpcodeID opcodeID, Args... args) >+ template<class Op, class... Args> >+ void appendInstruction(Args... args) > { >- if (isBranch(opcodeID)) >+ if (isBranch(Op::opcodeID())) > m_includeBranch = IncludeBranch::Yes; > >- UnlinkedInstruction instructions[sizeof...(args) + 1] = { >- UnlinkedInstruction(opcodeID), >- UnlinkedInstruction(args)... >- }; >- m_fragment.append(instructions, sizeof...(args) + 1); >+ m_bytecodeGenerator.withWriter(m_writer, [&] { >+ Op::emit(&m_bytecodeGenerator, std::forward<Args>(args)...); >+ }); > } > > private: >- Vector<UnlinkedInstruction>& m_fragment; >+ BytecodeGenerator& m_bytecodeGenerator; >+ InstructionStreamWriter& m_writer; > IncludeBranch& m_includeBranch; > }; > >- BytecodeRewriter(BytecodeGraph& graph, UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions) >- : m_graph(graph) >+ BytecodeRewriter(BytecodeGenerator& bytecodeGenerator, BytecodeGraph& graph, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& writer) >+ : m_bytecodeGenerator(bytecodeGenerator) >+ , m_graph(graph) > , m_codeBlock(codeBlock) >- , m_instructions(instructions) >+ , m_writer(writer) > { > } > > template<class Function> >- void insertFragmentBefore(unsigned bytecodeOffset, Function function) >+ void insertFragmentBefore(const InstructionStream::Ref& instruction, Function function) > { > IncludeBranch includeBranch = IncludeBranch::No; >- Vector<UnlinkedInstruction> instructions; >- Fragment fragment(instructions, includeBranch); >+ InstructionStreamWriter writer; >+ Fragment fragment(m_bytecodeGenerator, writer, includeBranch); > function(fragment); >- insertImpl(InsertionPoint(bytecodeOffset, Position::Before), includeBranch, WTFMove(instructions)); >+ insertImpl(InsertionPoint(instruction.offset(), Position::Before), includeBranch, WTFMove(writer)); > } > > template<class Function> >- void insertFragmentAfter(unsigned bytecodeOffset, Function function) >+ void insertFragmentAfter(const InstructionStream::Ref& instruction, Function function) > { > IncludeBranch includeBranch = IncludeBranch::No; >- Vector<UnlinkedInstruction> instructions; >- Fragment fragment(instructions, includeBranch); >+ InstructionStreamWriter writer; >+ Fragment fragment(m_bytecodeGenerator, writer, includeBranch); > function(fragment); >- insertImpl(InsertionPoint(bytecodeOffset, Position::After), includeBranch, WTFMove(instructions)); >+ insertImpl(InsertionPoint(instruction.offset(), Position::After), includeBranch, WTFMove(writer)); > } > >- void removeBytecode(unsigned bytecodeOffset) >+ void removeBytecode(const InstructionStream::Ref& instruction) > { >- m_insertions.append(Insertion { InsertionPoint(bytecodeOffset, Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, opcodeLength(m_instructions[bytecodeOffset].u.opcode), { } }); >+ m_insertions.append(Insertion { InsertionPoint(instruction.offset(), Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, instruction->size(), { } }); > } > > void execute(); > > BytecodeGraph& graph() { return m_graph; } > >- int adjustAbsoluteOffset(int absoluteOffset) >+ int adjustAbsoluteOffset(InstructionStream::Offset absoluteOffset) > { > return adjustJumpTarget(InsertionPoint(0, Position::EntryPoint), InsertionPoint(absoluteOffset, Position::LabelPoint)); > } > >- int adjustJumpTarget(int originalBytecodeOffset, int originalJumpTarget) >+ InstructionStream::Offset adjustJumpTarget(InstructionStream::Offset originalBytecodeOffset, InstructionStream::Offset originalJumpTarget) > { > return adjustJumpTarget(InsertionPoint(originalBytecodeOffset, Position::LabelPoint), InsertionPoint(originalJumpTarget, Position::LabelPoint)); > } > > private: >- void insertImpl(InsertionPoint, IncludeBranch, Vector<UnlinkedInstruction>&& fragment); >+ void insertImpl(InsertionPoint, IncludeBranch, InstructionStreamWriter&& fragment); > > friend class UnlinkedCodeBlock; > void applyModification(); >@@ -217,9 +219,10 @@ private: > int adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint); > template<typename Iterator> int calculateDifference(Iterator begin, Iterator end); > >+ BytecodeGenerator& m_bytecodeGenerator; > BytecodeGraph& m_graph; > UnlinkedCodeBlock* m_codeBlock; >- UnlinkedCodeBlock::UnpackedInstructions& m_instructions; >+ InstructionStreamWriter& m_writer; > Vector<Insertion, 8> m_insertions; > }; > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >index 3e3771f5b773ca3c3837dddb3d2c1470b2324f32..0c5b39c81d458a0fd8c55f24de4a01f72f3b4ca7 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >@@ -26,17 +26,50 @@ > #pragma once > > #include "CodeBlock.h" >+#include "Instruction.h" >+#include <wtf/Forward.h> > > namespace JSC { > >-template<typename Block, typename Functor, typename Instruction> >-void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor) >+#define CALL_FUNCTOR(__arg) \ >+ functor(__bytecode.__arg); >+ >+#define USES_OR_DEFS(__opcode, __args...) \ >+ case __opcode::opcodeID(): { \ >+ auto __bytecode = instruction->as<__opcode>(); \ >+ WTF_LAZY_FOR_EACH_TERM(CALL_FUNCTOR, __args) \ >+ return; \ >+ } >+ >+#define USES USES_OR_DEFS >+#define DEFS USES_OR_DEFS >+ >+template<typename Block, typename Functor> >+void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor) > { > if (opcodeID != op_enter && (codeBlock->wasCompiledWithDebuggingOpcodes() || codeBlock->usesEval()) && codeBlock->scopeRegister().isValid()) >- functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset()); >+ functor(codeBlock->scopeRegister()); >+ >+ auto handleNewArrayLike = [&](auto op) { >+ int base = op.argv.offset(); >+ for (int i = 0; i < static_cast<int>(op.argc); i++) >+ functor(VirtualRegister { base - i }); >+ }; >+ >+ auto handleOpCallLike = [&](auto op) { >+ functor(op.callee); >+ int lastArg = -static_cast<int>(op.argv) + CallFrame::thisArgumentOffset(); >+ for (int i = 0; i < static_cast<int>(op.argc); i++) >+ functor(VirtualRegister { lastArg + i }); >+ if (opcodeID == op_call_eval) >+ functor(codeBlock->scopeRegister()); >+ return; >+ }; > > switch (opcodeID) { > // No uses. >+ case op_wide: >+ ASSERT_NOT_REACHED(); > case op_new_regexp: > case op_debug: > case op_jneq_ptr: >@@ -57,282 +90,212 @@ void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instructi > case op_super_sampler_begin: > case op_super_sampler_end: > return; >- case op_get_scope: >- case op_to_this: >- case op_check_tdz: >- case op_identity_with_profile: >- case op_profile_type: >- case op_throw: >- case op_throw_static_error: >- case op_end: >- case op_ret: >- case op_jtrue: >- case op_jfalse: >- case op_jeq_null: >- case op_jneq_null: >- case op_dec: >- case op_inc: >- case op_log_shadow_chicken_prologue: { >- ASSERT(opcodeLengths[opcodeID] > 1); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- return; >- } >- case op_jlesseq: >- case op_jgreater: >- case op_jgreatereq: >- case op_jnless: >- case op_jnlesseq: >- case op_jngreater: >- case op_jngreatereq: >- case op_jless: >- case op_jeq: >- case op_jneq: >- case op_jstricteq: >- case op_jnstricteq: >- case op_jbelow: >- case op_jbeloweq: >- case op_set_function_name: >- case op_log_shadow_chicken_tail: { >- ASSERT(opcodeLengths[opcodeID] > 2); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- return; >- } >- case op_put_by_val_direct: >- case op_put_by_val: { >- ASSERT(opcodeLengths[opcodeID] > 3); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- return; >- } >- case op_put_by_id: >- case op_put_to_scope: >- case op_put_to_arguments: { >- ASSERT(opcodeLengths[opcodeID] > 3); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- return; >- } >- case op_put_by_id_with_this: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_put_by_val_with_this: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_put_getter_by_id: >- case op_put_setter_by_id: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_put_getter_setter_by_id: { >- ASSERT(opcodeLengths[opcodeID] > 5); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); >- return; >- } >- case op_put_getter_by_val: >- case op_put_setter_by_val: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_define_data_property: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_define_accessor_property: { >- ASSERT(opcodeLengths[opcodeID] > 5); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); >- return; >- } >- case op_spread: >- case op_get_property_enumerator: >- case op_get_enumerable_length: >- case op_new_func_exp: >- case op_new_generator_func_exp: >- case op_new_async_func_exp: >- case op_to_index_string: >- case op_create_lexical_environment: >- case op_resolve_scope: >- case op_resolve_scope_for_hoisting_func_decl_in_eval: >- case op_get_from_scope: >- case op_to_primitive: >- case op_try_get_by_id: >- case op_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: >- case op_get_by_id_direct: >- case op_get_array_length: >- case op_in_by_id: >- case op_typeof: >- case op_is_empty: >- case op_is_undefined: >- case op_is_boolean: >- case op_is_number: >- case op_is_object: >- case op_is_object_or_null: >- case op_is_cell_with_type: >- case op_is_function: >- case op_to_number: >- case op_to_string: >- case op_to_object: >- case op_negate: >- case op_neq_null: >- case op_eq_null: >- case op_not: >- case op_mov: >- case op_new_array_with_size: >- case op_create_this: >- case op_del_by_id: >- case op_unsigned: >- case op_new_func: >- case op_new_async_generator_func: >- case op_new_async_generator_func_exp: >- case op_new_generator_func: >- case op_new_async_func: >- case op_get_parent_scope: >- case op_create_scoped_arguments: >- case op_create_rest: >- case op_get_from_arguments: >- case op_new_array_buffer: { >- ASSERT(opcodeLengths[opcodeID] > 2); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- return; >- } >- case op_has_generic_property: >- case op_has_indexed_property: >- case op_enumerator_structure_pname: >- case op_enumerator_generic_pname: >- case op_get_by_val: >- case op_in_by_val: >- case op_overrides_has_instance: >- case op_instanceof: >- case op_add: >- case op_mul: >- case op_div: >- case op_mod: >- case op_sub: >- case op_pow: >- case op_lshift: >- case op_rshift: >- case op_urshift: >- case op_bitand: >- case op_bitxor: >- case op_bitor: >- case op_less: >- case op_lesseq: >- case op_greater: >- case op_greatereq: >- case op_below: >- case op_beloweq: >- case op_nstricteq: >- case op_stricteq: >- case op_neq: >- case op_eq: >- case op_push_with_scope: >- case op_get_by_id_with_this: >- case op_del_by_val: >- case op_tail_call_forward_arguments: { >- ASSERT(opcodeLengths[opcodeID] > 3); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- return; >- } >- case op_get_by_val_with_this: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_instanceof_custom: >- case op_has_structure_property: >- case op_construct_varargs: >- case op_call_varargs: >- case op_tail_call_varargs: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_get_direct_pname: { >- ASSERT(opcodeLengths[opcodeID] > 5); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); >- return; >- } >- case op_switch_string: >- case op_switch_char: >- case op_switch_imm: { >- ASSERT(opcodeLengths[opcodeID] > 3); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- return; >- } >+ // functor(instruction[1].u.operand); >+ USES(OpGetScope, dst) >+ USES(OpToThis, srcDst) >+ USES(OpCheckTdz, target) >+ USES(OpIdentityWithProfile, srcDst) >+ USES(OpProfileType, target); >+ USES(OpThrow, value) >+ USES(OpThrowStaticError, message) >+ USES(OpEnd, value) >+ USES(OpRet, value) >+ USES(OpJtrue, condition) >+ USES(OpJfalse, condition) >+ USES(OpJeqNull, condition) >+ USES(OpJneqNull, condition) >+ USES(OpDec, srcDst) >+ USES(OpInc, srcDst) >+ USES(OpLogShadowChickenPrologue, scope) >+ >+ // functor(instruction[1].u.operand); >+ // functor(instruction[2].u.operand); >+ USES(OpJless, lhs, rhs) >+ USES(OpJlesseq, lhs, rhs) >+ USES(OpJgreater, lhs, rhs) >+ USES(OpJgreatereq, lhs, rhs) >+ USES(OpJnless, lhs, rhs) >+ USES(OpJnlesseq, lhs, rhs) >+ USES(OpJngreater, lhs, rhs) >+ USES(OpJngreatereq, lhs, rhs) >+ USES(OpJeq, lhs, rhs) >+ USES(OpJneq, lhs, rhs) >+ USES(OpJstricteq, lhs, rhs) >+ USES(OpJnstricteq, lhs, rhs) >+ USES(OpJbelow, lhs, rhs) >+ USES(OpJbeloweq, lhs, rhs) >+ USES(OpSetFunctionName, function, name) >+ USES(OpLogShadowChickenTail, thisValue, scope) >+ >+ // functor(instruction[1].u.operand); >+ // functor(instruction[2].u.operand); >+ // functor(instruction[3].u.operand); >+ USES(OpPutByVal, base, property, value) >+ USES(OpPutByValDirect, base, property, value) >+ >+ USES(OpPutById, base, value) >+ USES(OpPutToScope, scope, value) >+ USES(OpPutToArguments, scope, value) >+ >+ USES(OpPutByIdWithThis, base, thisValue, value) >+ >+ USES(OpPutByValWithThis, base, thisValue, property, value) >+ >+ USES(OpPutGetterById, base, accessor) >+ USES(OpPutSetterById, base, accessor) >+ >+ USES(OpPutGetterSetterById, base, getter, setter) >+ >+ USES(OpPutGetterByVal, base, property, accessor) >+ USES(OpPutSetterByVal, base, property, accessor) >+ >+ USES(OpDefineDataProperty, base, property, value, attributes) >+ >+ USES(OpDefineAccessorProperty, base, property, getter, setter, attributes) >+ >+ // functor(instruction[2].u.operand); >+ USES(OpSpread, argument) >+ USES(OpGetPropertyEnumerator, base) >+ USES(OpGetEnumerableLength, base) >+ USES(OpNewFuncExp, scope) >+ USES(OpNewGeneratorFuncExp, scope) >+ USES(OpNewAsyncFuncExp, scope) >+ USES(OpToIndexString, index) >+ USES(OpCreateLexicalEnvironment, scope) >+ USES(OpResolveScope, scope) >+ USES(OpResolveScopeForHoistingFuncDeclInEval, scope) >+ USES(OpGetFromScope, scope) >+ USES(OpToPrimitive, src) >+ USES(OpTryGetById, base) >+ USES(OpGetById, base) >+ USES(OpGetByIdProtoLoad, base) >+ USES(OpGetByIdUnset, base) >+ USES(OpGetByIdDirect, base) >+ USES(OpGetArrayLength, base) >+ USES(OpInById, base) >+ USES(OpTypeof, value) >+ USES(OpIsEmpty, operand) >+ USES(OpIsUndefined, operand) >+ USES(OpIsBoolean, operand) >+ USES(OpIsNumber, operand) >+ USES(OpIsObject, operand) >+ USES(OpIsObjectOrNull, operand) >+ USES(OpIsCellWithType, operand) >+ USES(OpIsFunction, operand) >+ USES(OpToNumber, operand) >+ USES(OpToString, operand) >+ USES(OpToObject, operand) >+ USES(OpNegate, operand) >+ USES(OpEqNull, operand) >+ USES(OpNeqNull, operand) >+ USES(OpNot, operand) >+ USES(OpUnsigned, operand) >+ USES(OpMov, src) >+ USES(OpNewArrayWithSize, length) >+ USES(OpCreateThis, callee) >+ USES(OpDelById, base) >+ USES(OpNewFunc, scope) >+ USES(OpNewAsyncGeneratorFunc, scope) >+ USES(OpNewAsyncGeneratorFuncExp, scope) >+ USES(OpNewGeneratorFunc, scope) >+ USES(OpNewAsyncFunc, scope) >+ USES(OpGetParentScope, scope) >+ USES(OpCreateScopedArguments, scope) >+ USES(OpCreateRest, arraySize) >+ USES(OpGetFromArguments, scope) >+ USES(OpNewArrayBuffer, immutableButterfly) >+ >+ // functor(instruction[2].u.operand); >+ // functor(instruction[3].u.operand); >+ USES(OpHasGenericProperty, base, property) >+ USES(OpHasIndexedProperty, base, property) >+ USES(OpEnumeratorStructurePname, enumerator, index) >+ USES(OpEnumeratorGenericPname, enumerator, index) >+ USES(OpGetByVal, base, property) >+ USES(OpInByVal, base, property) >+ USES(OpOverridesHasInstance, constructor, hasInstanceValue) >+ USES(OpInstanceof, value, prototype) >+ USES(OpAdd, lhs, rhs) >+ USES(OpMul, lhs, rhs) >+ USES(OpDiv, lhs, rhs) >+ USES(OpMod, lhs, rhs) >+ USES(OpSub, lhs, rhs) >+ USES(OpPow, lhs, rhs) >+ USES(OpLshift, lhs, rhs) >+ USES(OpRshift, lhs, rhs) >+ USES(OpUrshift, lhs, rhs) >+ USES(OpBitand, lhs, rhs) >+ USES(OpBitxor, lhs, rhs) >+ USES(OpBitor, lhs, rhs) >+ USES(OpLess, lhs, rhs) >+ USES(OpLesseq, lhs, rhs) >+ USES(OpGreater, lhs, rhs) >+ USES(OpGreatereq, lhs, rhs) >+ USES(OpBelow, lhs, rhs) >+ USES(OpBeloweq, lhs, rhs) >+ USES(OpNstricteq, lhs, rhs) >+ USES(OpStricteq, lhs, rhs) >+ USES(OpNeq, lhs, rhs) >+ USES(OpEq, lhs, rhs) >+ USES(OpPushWithScope, currentScope, newScope) >+ USES(OpGetByIdWithThis, base, thisValue) >+ USES(OpDelByVal, base, property) >+ USES(OpTailCallForwardArguments, callee, thisValue) >+ >+ // functor(instruction[2].u.operand); >+ // functor(instruction[3].u.operand); >+ // functor(instruction[4].u.operand); >+ USES(OpGetByValWithThis, base, thisValue, property) >+ USES(OpInstanceofCustom, value, constructor, hasInstanceValue) >+ USES(OpHasStructureProperty, base, property, enumerator) >+ USES(OpConstructVarargs, callee, thisValue, arguments) >+ USES(OpCallVarargs, callee, thisValue, arguments) >+ USES(OpTailCallVarargs, callee, thisValue, arguments) >+ >+ USES(OpGetDirectPname, base, property, index, enumerator) >+ >+ USES(OpSwitchString, scrutinee) >+ USES(OpSwitchChar, scrutinee) >+ USES(OpSwitchImm, scrutinee) >+ >+ USES(OpYield, generator, argument) >+ > case op_new_array_with_spread: >+ handleNewArrayLike(instruction->as<OpNewArrayWithSpread>()); >+ return; > case op_new_array: >- case op_strcat: { >- int base = instruction[2].u.operand; >- int count = instruction[3].u.operand; >- for (int i = 0; i < count; i++) >- functor(codeBlock, instruction, opcodeID, base - i); >+ handleNewArrayLike(instruction->as<OpNewArray>()); > return; >- } >+ case op_strcat: >+ handleNewArrayLike(instruction->as<OpNewArray>()); >+ return; >+ > case op_construct: >+ handleOpCallLike(instruction->as<OpConstruct>()); >+ return; > case op_call_eval: >+ handleOpCallLike(instruction->as<OpCallEval>()); >+ return; > case op_call: >- case op_tail_call: { >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- int argCount = instruction[3].u.operand; >- int registerOffset = -instruction[4].u.operand; >- int lastArg = registerOffset + CallFrame::thisArgumentOffset(); >- for (int i = 0; i < argCount; i++) >- functor(codeBlock, instruction, opcodeID, lastArg + i); >- if (opcodeID == op_call_eval) >- functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset()); >+ handleOpCallLike(instruction->as<OpCall>()); > return; >- } >- case op_yield: { >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >+ case op_tail_call: >+ handleOpCallLike(instruction->as<OpTailCall>()); > return; >- } >+ > default: > RELEASE_ASSERT_NOT_REACHED(); > break; > } > } > >-template<typename Block, typename Instruction, typename Functor> >-void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor) >+template<typename Block, typename Functor> >+void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor) > { > switch (opcodeID) { > // These don't define anything. >+ case op_wide: >+ ASSERT_NOT_REACHED(); > case op_put_to_scope: > case op_end: > case op_throw: >@@ -392,133 +355,128 @@ void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instructi > #undef LLINT_HELPER_OPCODES > return; > // These all have a single destination for the first argument. >- case op_argument_count: >- case op_to_index_string: >- case op_get_enumerable_length: >- case op_has_indexed_property: >- case op_has_structure_property: >- case op_has_generic_property: >- case op_get_direct_pname: >- case op_get_property_enumerator: >- case op_enumerator_structure_pname: >- case op_enumerator_generic_pname: >- case op_get_parent_scope: >- case op_push_with_scope: >- case op_create_lexical_environment: >- case op_resolve_scope: >- case op_resolve_scope_for_hoisting_func_decl_in_eval: >- case op_strcat: >- case op_to_primitive: >- case op_create_this: >- case op_new_array: >- case op_new_array_with_spread: >- case op_spread: >- case op_new_array_buffer: >- case op_new_array_with_size: >- case op_new_regexp: >- case op_new_func: >- case op_new_func_exp: >- case op_new_generator_func: >- case op_new_generator_func_exp: >- case op_new_async_generator_func: >- case op_new_async_generator_func_exp: >- case op_new_async_func: >- case op_new_async_func_exp: >- case op_call_varargs: >- case op_tail_call_varargs: >- case op_tail_call_forward_arguments: >- case op_construct_varargs: >- case op_get_from_scope: >- case op_call: >- case op_tail_call: >- case op_call_eval: >- case op_construct: >- case op_try_get_by_id: >- case op_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: >- case op_get_by_id_direct: >- case op_get_by_id_with_this: >- case op_get_by_val_with_this: >- case op_get_array_length: >- case op_overrides_has_instance: >- case op_instanceof: >- case op_instanceof_custom: >- case op_get_by_val: >- case op_typeof: >- case op_identity_with_profile: >- case op_is_empty: >- case op_is_undefined: >- case op_is_boolean: >- case op_is_number: >- case op_is_object: >- case op_is_object_or_null: >- case op_is_cell_with_type: >- case op_is_function: >- case op_in_by_id: >- case op_in_by_val: >- case op_to_number: >- case op_to_string: >- case op_to_object: >- case op_negate: >- case op_add: >- case op_mul: >- case op_div: >- case op_mod: >- case op_sub: >- case op_pow: >- case op_lshift: >- case op_rshift: >- case op_urshift: >- case op_bitand: >- case op_bitxor: >- case op_bitor: >- case op_inc: >- case op_dec: >- case op_eq: >- case op_neq: >- case op_stricteq: >- case op_nstricteq: >- case op_less: >- case op_lesseq: >- case op_greater: >- case op_greatereq: >- case op_below: >- case op_beloweq: >- case op_neq_null: >- case op_eq_null: >- case op_not: >- case op_mov: >- case op_new_object: >- case op_to_this: >- case op_check_tdz: >- case op_get_scope: >- case op_create_direct_arguments: >- case op_create_scoped_arguments: >- case op_create_cloned_arguments: >- case op_del_by_id: >- case op_del_by_val: >- case op_unsigned: >- case op_get_from_arguments: >- case op_get_argument: >- case op_create_rest: >- case op_get_rest_length: { >- ASSERT(opcodeLengths[opcodeID] > 1); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- return; >- } >- case op_catch: { >- ASSERT(opcodeLengths[opcodeID] > 2); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- return; >- } >+ DEFS(OpArgumentCount, dst) >+ DEFS(OpToIndexString, dst) >+ DEFS(OpGetEnumerableLength, dst) >+ DEFS(OpHasIndexedProperty, dst) >+ DEFS(OpHasStructureProperty, dst) >+ DEFS(OpHasGenericProperty, dst) >+ DEFS(OpGetDirectPname, dst) >+ DEFS(OpGetPropertyEnumerator, dst) >+ DEFS(OpEnumeratorStructurePname, dst) >+ DEFS(OpEnumeratorGenericPname, dst) >+ DEFS(OpGetParentScope, dst) >+ DEFS(OpPushWithScope, dst) >+ DEFS(OpCreateLexicalEnvironment, dst) >+ DEFS(OpResolveScope, dst) >+ DEFS(OpResolveScopeForHoistingFuncDeclInEval, dst) >+ DEFS(OpStrcat, dst) >+ DEFS(OpToPrimitive, dst) >+ DEFS(OpCreateThis, dst) >+ DEFS(OpNewArray, dst) >+ DEFS(OpNewArrayWithSpread, dst) >+ DEFS(OpSpread, dst) >+ DEFS(OpNewArrayBuffer, dst) >+ DEFS(OpNewArrayWithSize, dst) >+ DEFS(OpNewRegexp, dst) >+ DEFS(OpNewFunc, dst) >+ DEFS(OpNewFuncExp, dst) >+ DEFS(OpNewGeneratorFunc, dst) >+ DEFS(OpNewGeneratorFuncExp, dst) >+ DEFS(OpNewAsyncGeneratorFunc, dst) >+ DEFS(OpNewAsyncGeneratorFuncExp, dst) >+ DEFS(OpNewAsyncFunc, dst) >+ DEFS(OpNewAsyncFuncExp, dst) >+ DEFS(OpCallVarargs, dst) >+ DEFS(OpTailCallVarargs, dst) >+ DEFS(OpTailCallForwardArguments, dst) >+ DEFS(OpConstructVarargs, dst) >+ DEFS(OpGetFromScope, dst) >+ DEFS(OpCall, dst) >+ DEFS(OpTailCall, dst) >+ DEFS(OpCallEval, dst) >+ DEFS(OpConstruct, dst) >+ DEFS(OpTryGetById, dst) >+ DEFS(OpGetById, dst) >+ DEFS(OpGetByIdProtoLoad, dst) >+ DEFS(OpGetByIdUnset, dst) >+ DEFS(OpGetByIdDirect, dst) >+ DEFS(OpGetByIdWithThis, dst) >+ DEFS(OpGetByValWithThis, dst) >+ DEFS(OpGetArrayLength, dst) >+ DEFS(OpOverridesHasInstance, dst) >+ DEFS(OpInstanceof, dst) >+ DEFS(OpInstanceofCustom, dst) >+ DEFS(OpGetByVal, dst) >+ DEFS(OpTypeof, dst) >+ DEFS(OpIdentityWithProfile, srcDst) >+ DEFS(OpIsEmpty, dst) >+ DEFS(OpIsUndefined, dst) >+ DEFS(OpIsBoolean, dst) >+ DEFS(OpIsNumber, dst) >+ DEFS(OpIsObject, dst) >+ DEFS(OpIsObjectOrNull, dst) >+ DEFS(OpIsCellWithType, dst) >+ DEFS(OpIsFunction, dst) >+ DEFS(OpInById, dst) >+ DEFS(OpInByVal, dst) >+ DEFS(OpToNumber, dst) >+ DEFS(OpToString, dst) >+ DEFS(OpToObject, dst) >+ DEFS(OpNegate, dst) >+ DEFS(OpAdd, dst) >+ DEFS(OpMul, dst) >+ DEFS(OpDiv, dst) >+ DEFS(OpMod, dst) >+ DEFS(OpSub, dst) >+ DEFS(OpPow, dst) >+ DEFS(OpLshift, dst) >+ DEFS(OpRshift, dst) >+ DEFS(OpUrshift, dst) >+ DEFS(OpBitand, dst) >+ DEFS(OpBitxor, dst) >+ DEFS(OpBitor, dst) >+ DEFS(OpInc, srcDst) >+ DEFS(OpDec, srcDst) >+ DEFS(OpEq, dst) >+ DEFS(OpNeq, dst) >+ DEFS(OpStricteq, dst) >+ DEFS(OpNstricteq, dst) >+ DEFS(OpLess, dst) >+ DEFS(OpLesseq, dst) >+ DEFS(OpGreater, dst) >+ DEFS(OpGreatereq, dst) >+ DEFS(OpBelow, dst) >+ DEFS(OpBeloweq, dst) >+ DEFS(OpNeqNull, dst) >+ DEFS(OpEqNull, dst) >+ DEFS(OpNot, dst) >+ DEFS(OpMov, dst) >+ DEFS(OpNewObject, dst) >+ DEFS(OpToThis, srcDst) >+ DEFS(OpCheckTdz, target) >+ DEFS(OpGetScope, dst) >+ DEFS(OpCreateDirectArguments, dst) >+ DEFS(OpCreateScopedArguments, dst) >+ DEFS(OpCreateClonedArguments, dst) >+ DEFS(OpDelById, dst) >+ DEFS(OpDelByVal, dst) >+ DEFS(OpUnsigned, dst) >+ DEFS(OpGetFromArguments, dst) >+ DEFS(OpGetArgument, dst) >+ DEFS(OpCreateRest, dst) >+ DEFS(OpGetRestLength, dst) >+ DEFS(OpCatch, exception, thrownValue) > case op_enter: { > for (unsigned i = codeBlock->numVars(); i--;) >- functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset()); >+ functor(virtualRegisterForLocal(i)); > return; > } > } > } > >+#undef CALL_FUNCTOR >+#undef USES_OR_DEFS >+#undef USES >+#undef DEFS > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp >index aadf3ea32ed158b243076061021dbb9b37343d34..986ec24f0c7c085ac904b58cbc3ced36215d7fd9 100644 >--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp >+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp >@@ -26,6 +26,7 @@ > #include "config.h" > #include "CallLinkStatus.h" > >+#include "BytecodeStructs.h" > #include "CallLinkInfo.h" > #include "CodeBlock.h" > #include "DFGJITCode.h" >@@ -66,12 +67,24 @@ CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker&, CodeB > } > #endif > >- Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; >- OpcodeID op = Interpreter::getOpcodeID(instruction[0].u.opcode); >- if (op != op_call && op != op_construct && op != op_tail_call) >+ auto instruction = profiledBlock->instructions().at(bytecodeIndex); >+ OpcodeID op = instruction->opcodeID(); >+ >+ LLIntCallLinkInfo* callLinkInfo; >+ switch (op) { >+ case op_call: >+ callLinkInfo = &instruction->as<OpCall>().metadata(profiledBlock).callLinkInfo; >+ break; >+ case op_construct: >+ callLinkInfo = &instruction->as<OpConstruct>().metadata(profiledBlock).callLinkInfo; >+ break; >+ case op_tail_call: >+ callLinkInfo = &instruction->as<OpTailCall>().metadata(profiledBlock).callLinkInfo; >+ break; >+ default: > return CallLinkStatus(); >+ } > >- LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo; > > return CallLinkStatus(callLinkInfo->lastSeenCallee.get()); > } >diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp >index d051ab37da10f70fde0fff97f37d201033ab7310..55fdf1167b77e375fd4d966bfa4500a85aff6981 100644 >--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp >+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp >@@ -51,6 +51,7 @@ > #include "FunctionExecutableDump.h" > #include "GetPutInfo.h" > #include "InlineCallFrame.h" >+#include "InstructionStream.h" > #include "InterpreterInlines.h" > #include "IsoCellSetInlines.h" > #include "JIT.h" >@@ -81,7 +82,6 @@ > #include "StructureStubInfo.h" > #include "TypeLocationCache.h" > #include "TypeProfiler.h" >-#include "UnlinkedInstructionStream.h" > #include "VMInlines.h" > #include <wtf/BagToHashMap.h> > #include <wtf/CommaPrinter.h> >@@ -243,15 +243,15 @@ void CodeBlock::dumpBytecode(PrintStream& out) > BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap); > } > >-void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap& statusMap) >+void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) > { >- BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, statusMap); >+ BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap); > } > > void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap) > { >- const Instruction* it = &instructions()[bytecodeOffset]; >- dumpBytecode(out, instructions().begin(), it, statusMap); >+ const auto it = instructions().at(bytecodeOffset); >+ dumpBytecode(out, it, statusMap); > } > > #define FOR_EACH_MEMBER_VECTOR(macro) \ >@@ -375,6 +375,7 @@ CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecut > , m_unlinkedCode(*vm, this, unlinkedCodeBlock) > , m_ownerExecutable(*vm, this, ownerExecutable) > , m_poisonedVM(vm) >+ , m_instructions(&unlinkedCodeBlock->instructions()) > , m_thisRegister(unlinkedCodeBlock->thisRegister()) > , m_scopeRegister(unlinkedCodeBlock->scopeRegister()) > , m_source(WTFMove(sourceProvider)) >@@ -403,7 +404,7 @@ CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecut > // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis > // inside UnlinkedCodeBlock. > bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, >- JSScope* scope) >+ JSScope*) > { > Base::finishCreation(vm); > finishCreationCommon(vm); >@@ -513,47 +514,32 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); > #endif > >- // Copy and translate the UnlinkedInstructions >- unsigned instructionCount = unlinkedCodeBlock->instructions().count(); >- UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions()); >- > // Bookkeep the strongly referenced module environments. > HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments; > >- RefCountedArray<Instruction> instructions(instructionCount); >- >- unsigned valueProfileCount = 0; >- auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) { >- if (!vm.canUseJIT()) { >- ASSERT(vm.noJITValueProfileSingleton); >- instructions[bytecodeOffset + opLength - 1] = vm.noJITValueProfileSingleton.get(); >- return; >- } >- >- unsigned valueProfileIndex = valueProfileCount++; >- ValueProfile* profile = &m_valueProfiles[valueProfileIndex]; >- ASSERT(profile->m_bytecodeOffset == -1); >- profile->m_bytecodeOffset = bytecodeOffset; >- instructions[bytecodeOffset + opLength - 1] = profile; >- }; >- >- for (unsigned i = 0; !instructionReader.atEnd(); ) { >- const UnlinkedInstruction* pc = instructionReader.next(); >- >- unsigned opLength = opcodeLength(pc[0].u.opcode); >- >- instructions[i] = Interpreter::getOpcode(pc[0].u.opcode); >- for (size_t j = 1; j < opLength; ++j) { >- if (sizeof(int32_t) != sizeof(intptr_t)) >- instructions[i + j].u.pointer = 0; >- instructions[i + j].u.operand = pc[j].u.operand; >- } >- switch (pc[0].u.opcode) { >+ //unsigned valueProfileCount = 0; >+ //auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) { >+ //if (!vm.canUseJIT()) { >+ //ASSERT(vm.noJITValueProfileSingleton); >+ ////instructions[bytecodeOffset + opLength - 1] = vm.noJITValueProfileSingleton.get(); >+ //return; >+ //} >+ >+ //unsigned valueProfileIndex = valueProfileCount++; >+ //ValueProfile* profile = &m_valueProfiles[valueProfileIndex]; >+ //ASSERT(profile->m_bytecodeOffset == -1); >+ //profile->m_bytecodeOffset = bytecodeOffset; >+ //instructions[bytecodeOffset + opLength - 1] = profile; >+ //}; >+ >+ for (const auto& instruction : *m_instructions) { >+ switch (instruction->opcodeID()) { > case op_has_indexed_property: { >- int arrayProfileIndex = pc[opLength - 1].u.operand; >- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >+ // TODO: link array profile >+ //int arrayProfileIndex = pc[opLength - 1].u.operand; >+ //m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); > >- instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; >+ //instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; > break; > } > case op_call_varargs: >@@ -561,10 +547,11 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > case op_tail_call_forward_arguments: > case op_construct_varargs: > case op_get_by_val: { >- int arrayProfileIndex = pc[opLength - 2].u.operand; >- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >+ // TODO: link array profile >+ //int arrayProfileIndex = pc[opLength - 2].u.operand; >+ //m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); > >- instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; >+ //instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; > FALLTHROUGH; > } > case op_get_direct_pname: >@@ -577,157 +564,157 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > case op_to_number: > case op_to_object: > case op_get_argument: { >- linkValueProfile(i, opLength); >+ //linkValueProfile(i, opLength); > break; > } > > case op_to_this: { >- linkValueProfile(i, opLength); >+ //linkValueProfile(i, opLength); > break; > } > > case op_in_by_val: > case op_put_by_val: > case op_put_by_val_direct: { >- int arrayProfileIndex = pc[opLength - 1].u.operand; >- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >- instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; >+ //int arrayProfileIndex = pc[opLength - 1].u.operand; >+ //m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >+ //instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; > break; > } > > case op_new_array: > case op_new_array_with_size: > case op_new_array_buffer: { >- unsigned arrayAllocationProfileIndex; >- IndexingType recommendedIndexingType; >- std::tie(arrayAllocationProfileIndex, recommendedIndexingType) = UnlinkedCodeBlock::decompressArrayAllocationProfile(pc[opLength - 1].u.operand); >- >- ArrayAllocationProfile* profile = &m_arrayAllocationProfiles[arrayAllocationProfileIndex]; >- if (pc[0].u.opcode == op_new_array_buffer) >- profile->initializeIndexingMode(recommendedIndexingType); >- instructions[i + opLength - 1] = profile; >+ //unsigned arrayAllocationProfileIndex; >+ //IndexingType recommendedIndexingType; >+ //std::tie(arrayAllocationProfileIndex, recommendedIndexingType) = UnlinkedCodeBlock::decompressArrayAllocationProfile(pc[opLength - 1].u.operand); >+ >+ //ArrayAllocationProfile* profile = &m_arrayAllocationProfiles[arrayAllocationProfileIndex]; >+ //if (pc[0].u.opcode == op_new_array_buffer) >+ //profile->initializeIndexingMode(recommendedIndexingType); >+ //instructions[i + opLength - 1] = profile; > break; > } > > case op_new_object: { >- int objectAllocationProfileIndex = pc[opLength - 1].u.operand; >- ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex]; >- int inferredInlineCapacity = pc[opLength - 2].u.operand; >+ //int objectAllocationProfileIndex = pc[opLength - 1].u.operand; >+ //ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex]; >+ //int inferredInlineCapacity = pc[opLength - 2].u.operand; > >- instructions[i + opLength - 1] = objectAllocationProfile; >- objectAllocationProfile->initializeProfile(vm, >- m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity); >+ //instructions[i + opLength - 1] = objectAllocationProfile; >+ //objectAllocationProfile->initializeProfile(vm, >+ //m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity); > break; > } > > case op_call: > case op_tail_call: > case op_call_eval: { >- linkValueProfile(i, opLength); >- int arrayProfileIndex = pc[opLength - 2].u.operand; >- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >- instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; >- instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; >+ //linkValueProfile(i, opLength); >+ //int arrayProfileIndex = pc[opLength - 2].u.operand; >+ //m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >+ //instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; >+ //instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; > break; > } > case op_construct: { >- instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; >- linkValueProfile(i, opLength); >+ //instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; >+ //linkValueProfile(i, opLength); > break; > } > case op_get_array_length: > CRASH(); > > case op_resolve_scope: { >- const Identifier& ident = identifier(pc[3].u.operand); >- ResolveType type = static_cast<ResolveType>(pc[4].u.operand); >- RELEASE_ASSERT(type != LocalClosureVar); >- int localScopeDepth = pc[5].u.operand; >- >- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization); >- RETURN_IF_EXCEPTION(throwScope, false); >- >- instructions[i + 4].u.operand = op.type; >- instructions[i + 5].u.operand = op.depth; >- if (op.lexicalEnvironment) { >- if (op.type == ModuleVar) { >- // Keep the linked module environment strongly referenced. >- if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) >- addConstant(op.lexicalEnvironment); >- instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment); >- } else >- instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); >- } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) >- instructions[i + 6].u.jsCell.set(vm, this, constantScope); >- else >- instructions[i + 6].u.pointer = nullptr; >+ //const Identifier& ident = identifier(pc[3].u.operand); >+ //ResolveType type = static_cast<ResolveType>(pc[4].u.operand); >+ //RELEASE_ASSERT(type != LocalClosureVar); >+ //int localScopeDepth = pc[5].u.operand; >+ >+ //ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization); >+ //RETURN_IF_EXCEPTION(throwScope, false); >+ >+ //instructions[i + 4].u.operand = op.type; >+ //instructions[i + 5].u.operand = op.depth; >+ //if (op.lexicalEnvironment) { >+ //if (op.type == ModuleVar) { >+ //// Keep the linked module environment strongly referenced. >+ //if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) >+ //addConstant(op.lexicalEnvironment); >+ //instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment); >+ //} else >+ //instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); >+ //} else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) >+ //instructions[i + 6].u.jsCell.set(vm, this, constantScope); >+ //else >+ //instructions[i + 6].u.pointer = nullptr; > break; > } > > case op_get_from_scope: { >- linkValueProfile(i, opLength); >+ //linkValueProfile(i, opLength); > > // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand > >- int localScopeDepth = pc[5].u.operand; >- instructions[i + 5].u.pointer = nullptr; >- >- GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >- ASSERT(!isInitialization(getPutInfo.initializationMode())); >- if (getPutInfo.resolveType() == LocalClosureVar) { >- instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); >- break; >- } >- >- const Identifier& ident = identifier(pc[3].u.operand); >- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization); >- RETURN_IF_EXCEPTION(throwScope, false); >- >- instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); >- if (op.type == ModuleVar) >- instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); >- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) >- instructions[i + 5].u.watchpointSet = op.watchpointSet; >- else if (op.structure) >- instructions[i + 5].u.structure.set(vm, this, op.structure); >- instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); >+ //int localScopeDepth = pc[5].u.operand; >+ //instructions[i + 5].u.pointer = nullptr; >+ >+ //GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >+ //ASSERT(!isInitialization(getPutInfo.initializationMode())); >+ //if (getPutInfo.resolveType() == LocalClosureVar) { >+ //instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); >+ //break; >+ //} >+ >+ //const Identifier& ident = identifier(pc[3].u.operand); >+ //ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization); >+ //RETURN_IF_EXCEPTION(throwScope, false); >+ >+ //instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); >+ //if (op.type == ModuleVar) >+ //instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); >+ //if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) >+ //instructions[i + 5].u.watchpointSet = op.watchpointSet; >+ //else if (op.structure) >+ //instructions[i + 5].u.structure.set(vm, this, op.structure); >+ //instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); > break; > } > > case op_put_to_scope: { > // put_to_scope scope, id, value, GetPutInfo, Structure, Operand >- GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >- if (getPutInfo.resolveType() == LocalClosureVar) { >- // Only do watching if the property we're putting to is not anonymous. >- if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) { >- int symbolTableIndex = pc[5].u.operand; >- SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); >- const Identifier& ident = identifier(pc[2].u.operand); >- ConcurrentJSLocker locker(symbolTable->m_lock); >- auto iter = symbolTable->find(locker, ident.impl()); >- ASSERT(iter != symbolTable->end(locker)); >- iter->value.prepareToWatch(); >- instructions[i + 5].u.watchpointSet = iter->value.watchpointSet(); >- } else >- instructions[i + 5].u.watchpointSet = nullptr; >- break; >- } >- >- const Identifier& ident = identifier(pc[2].u.operand); >- int localScopeDepth = pc[5].u.operand; >- instructions[i + 5].u.pointer = nullptr; >- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode()); >- RETURN_IF_EXCEPTION(throwScope, false); >- >- instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); >- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) >- instructions[i + 5].u.watchpointSet = op.watchpointSet; >- else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { >- if (op.watchpointSet) >- op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident)); >- } else if (op.structure) >- instructions[i + 5].u.structure.set(vm, this, op.structure); >- instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); >+ //GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >+ //if (getPutInfo.resolveType() == LocalClosureVar) { >+ //// Only do watching if the property we're putting to is not anonymous. >+ //if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) { >+ //int symbolTableIndex = pc[5].u.operand; >+ //SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); >+ //const Identifier& ident = identifier(pc[2].u.operand); >+ //ConcurrentJSLocker locker(symbolTable->m_lock); >+ //auto iter = symbolTable->find(locker, ident.impl()); >+ //ASSERT(iter != symbolTable->end(locker)); >+ //iter->value.prepareToWatch(); >+ //instructions[i + 5].u.watchpointSet = iter->value.watchpointSet(); >+ //} else >+ //instructions[i + 5].u.watchpointSet = nullptr; >+ //break; >+ //} >+ >+ //const Identifier& ident = identifier(pc[2].u.operand); >+ //int localScopeDepth = pc[5].u.operand; >+ //instructions[i + 5].u.pointer = nullptr; >+ //ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode()); >+ //RETURN_IF_EXCEPTION(throwScope, false); >+ >+ //instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); >+ //if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) >+ //instructions[i + 5].u.watchpointSet = op.watchpointSet; >+ //else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { >+ //if (op.watchpointSet) >+ //op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident)); >+ //} else if (op.structure) >+ //instructions[i + 5].u.structure.set(vm, this, op.structure); >+ //instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); > > break; > } >@@ -735,98 +722,98 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > case op_profile_type: { > RELEASE_ASSERT(vm.typeProfiler()); > // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? >- size_t instructionOffset = i + opLength - 1; >- unsigned divotStart, divotEnd; >- GlobalVariableID globalVariableID = 0; >- RefPtr<TypeSet> globalTypeSet; >- bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); >- VirtualRegister profileRegister(pc[1].u.operand); >- ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand); >- SymbolTable* symbolTable = nullptr; >- >- switch (flag) { >- case ProfileTypeBytecodeClosureVar: { >- const Identifier& ident = identifier(pc[4].u.operand); >- int localScopeDepth = pc[2].u.operand; >- ResolveType type = static_cast<ResolveType>(pc[5].u.operand); >- // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because >- // we're abstractly "read"ing from a JSScope. >- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization); >- RETURN_IF_EXCEPTION(throwScope, false); >- >- if (op.type == ClosureVar || op.type == ModuleVar) >- symbolTable = op.lexicalEnvironment->symbolTable(); >- else if (op.type == GlobalVar) >- symbolTable = m_globalObject.get()->symbolTable(); >- >- UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl(); >- if (symbolTable) { >- ConcurrentJSLocker locker(symbolTable->m_lock); >- // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. >- symbolTable->prepareForTypeProfiling(locker); >- globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm); >- globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm); >- } else >- globalVariableID = TypeProfilerNoGlobalIDExists; >- >- break; >- } >- case ProfileTypeBytecodeLocallyResolved: { >- int symbolTableIndex = pc[2].u.operand; >- SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); >- const Identifier& ident = identifier(pc[4].u.operand); >- ConcurrentJSLocker locker(symbolTable->m_lock); >- // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. >- globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm); >- globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm); >- >- break; >- } >- case ProfileTypeBytecodeDoesNotHaveGlobalID: >- case ProfileTypeBytecodeFunctionArgument: { >- globalVariableID = TypeProfilerNoGlobalIDExists; >- break; >- } >- case ProfileTypeBytecodeFunctionReturnStatement: { >- RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); >- globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); >- globalVariableID = TypeProfilerReturnStatement; >- if (!shouldAnalyze) { >- // Because a return statement can be added implicitly to return undefined at the end of a function, >- // and these nodes don't emit expression ranges because they aren't in the actual source text of >- // the user's program, give the type profiler some range to identify these return statements. >- // Currently, the text offset that is used as identification is "f" in the function keyword >- // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. >- divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(); >- shouldAnalyze = true; >- } >- break; >- } >- } >- >- std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, >- ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm); >- TypeLocation* location = locationPair.first; >- bool isNewLocation = locationPair.second; >- >- if (flag == ProfileTypeBytecodeFunctionReturnStatement) >- location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(); >- >- if (shouldAnalyze && isNewLocation) >- vm.typeProfiler()->insertNewLocation(location); >- >- instructions[i + 2].u.location = location; >+ //size_t instructionOffset = i + opLength - 1; >+ //unsigned divotStart, divotEnd; >+ //GlobalVariableID globalVariableID = 0; >+ //RefPtr<TypeSet> globalTypeSet; >+ //bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); >+ //VirtualRegister profileRegister(pc[1].u.operand); >+ //ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand); >+ //SymbolTable* symbolTable = nullptr; >+ >+ //switch (flag) { >+ //case ProfileTypeBytecodeClosureVar: { >+ //const Identifier& ident = identifier(pc[4].u.operand); >+ //int localScopeDepth = pc[2].u.operand; >+ //ResolveType type = static_cast<ResolveType>(pc[5].u.operand); >+ //// Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because >+ //// we're abstractly "read"ing from a JSScope. >+ //ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization); >+ //RETURN_IF_EXCEPTION(throwScope, false); >+ >+ //if (op.type == ClosureVar || op.type == ModuleVar) >+ //symbolTable = op.lexicalEnvironment->symbolTable(); >+ //else if (op.type == GlobalVar) >+ //symbolTable = m_globalObject.get()->symbolTable(); >+ >+ //UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl(); >+ //if (symbolTable) { >+ //ConcurrentJSLocker locker(symbolTable->m_lock); >+ //// If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. >+ //symbolTable->prepareForTypeProfiling(locker); >+ //globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm); >+ //globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm); >+ //} else >+ //globalVariableID = TypeProfilerNoGlobalIDExists; >+ >+ //break; >+ //} >+ //case ProfileTypeBytecodeLocallyResolved: { >+ //int symbolTableIndex = pc[2].u.operand; >+ //SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); >+ //const Identifier& ident = identifier(pc[4].u.operand); >+ //ConcurrentJSLocker locker(symbolTable->m_lock); >+ //// If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. >+ //globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm); >+ //globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm); >+ >+ //break; >+ //} >+ //case ProfileTypeBytecodeDoesNotHaveGlobalID: >+ //case ProfileTypeBytecodeFunctionArgument: { >+ //globalVariableID = TypeProfilerNoGlobalIDExists; >+ //break; >+ //} >+ //case ProfileTypeBytecodeFunctionReturnStatement: { >+ //RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); >+ //globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); >+ //globalVariableID = TypeProfilerReturnStatement; >+ //if (!shouldAnalyze) { >+ //// Because a return statement can be added implicitly to return undefined at the end of a function, >+ //// and these nodes don't emit expression ranges because they aren't in the actual source text of >+ //// the user's program, give the type profiler some range to identify these return statements. >+ //// Currently, the text offset that is used as identification is "f" in the function keyword >+ //// and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. >+ //divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(); >+ //shouldAnalyze = true; >+ //} >+ //break; >+ //} >+ //} >+ >+ //std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, >+ //ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm); >+ //TypeLocation* location = locationPair.first; >+ //bool isNewLocation = locationPair.second; >+ >+ //if (flag == ProfileTypeBytecodeFunctionReturnStatement) >+ //location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(); >+ >+ //if (shouldAnalyze && isNewLocation) >+ //vm.typeProfiler()->insertNewLocation(location); >+ >+ //instructions[i + 2].u.location = location; > break; > } > > case op_debug: { >- if (pc[1].u.unsignedValue == DidReachBreakpoint) >+ if (instruction->as<OpDebug>().debugHookType == DidReachBreakpoint) > m_hasDebuggerStatement = true; > break; > } > > case op_create_rest: { >- int numberOfArgumentsToSkip = instructions[i + 3].u.operand; >+ int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().numParametersToSkip; > ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0); > // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT."); > m_numberOfArgumentsToSkip = numberOfArgumentsToSkip; >@@ -836,14 +823,10 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > default: > break; > } >- >- i += opLength; > } > > if (vm.controlFlowProfiler()) >- insertBasicBlockBoundariesForControlFlowProfiler(instructions); >- >- m_instructions = WTFMove(instructions); >+ insertBasicBlockBoundariesForControlFlowProfiler(); > > // Set optimization thresholds only after m_instructions is initialized, since these > // rely on the instruction count (and are in theory permitted to also inspect the >@@ -859,7 +842,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > if (Options::dumpGeneratedBytecodes()) > dumpBytecode(); > >- heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction)); >+ heap()->reportExtraMemoryAllocated(m_instructions->sizeInBytes()); > > return true; > } >@@ -998,7 +981,7 @@ CodeBlock* CodeBlock::specialOSREntryBlockOrNull() > size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm) > { > CodeBlock* thisObject = jsCast<CodeBlock*>(cell); >- size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction); >+ size_t extraMemoryAllocated = thisObject->m_instructions->sizeInBytes(); > if (thisObject->m_jitCode) > extraMemoryAllocated += thisObject->m_jitCode->size(); > return Base::estimatedSize(cell, vm) + extraMemoryAllocated; >@@ -1021,15 +1004,8 @@ void CodeBlock::visitChildren(SlotVisitor& visitor) > > if (m_jitCode) > visitor.reportExtraMemoryVisited(m_jitCode->size()); >- if (m_instructions.size()) { >- unsigned refCount = m_instructions.refCount(); >- if (!refCount) { >- dataLog("CodeBlock: ", RawPointer(this), "\n"); >- dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n"); >- dataLog("refCount: ", refCount, "\n"); >- RELEASE_ASSERT_NOT_REACHED(); >- } >- visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount); >+ if (m_instructions->sizeInBytes()) { >+ visitor.reportExtraMemoryVisited(m_instructions->sizeInBytes()); > } > > stronglyVisitStrongReferences(locker, visitor); >@@ -1129,13 +1105,13 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& vis > VM& vm = *m_poisonedVM; > > if (jitType() == JITCode::InterpreterThunk) { >- const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); >+ const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); > for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { >- Instruction* instruction = &instructions()[propertyAccessInstructions[i]]; >- switch (Interpreter::getOpcodeID(instruction[0])) { >- case op_put_by_id: { >- StructureID oldStructureID = instruction[4].u.structureID; >- StructureID newStructureID = instruction[6].u.structureID; >+ auto instruction = m_instructions->at(propertyAccessInstructions[i]); >+ if (instruction->is<OpPutById>()) { >+ auto& metadata = instruction->as<OpPutById>().metadata(this); >+ StructureID oldStructureID = metadata.oldStructure; >+ StructureID newStructureID = metadata.newStructure; > if (!oldStructureID || !newStructureID) > break; > Structure* oldStructure = >@@ -1146,9 +1122,6 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& vis > visitor.appendUnbarriered(newStructure); > break; > } >- default: >- break; >- } > } > } > >@@ -1243,54 +1216,52 @@ void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visito > #endif // ENABLE(DFG_JIT) > } > >-void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction) >-{ >- instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id); >- instruction[4].u.pointer = nullptr; >- instruction[5].u.pointer = nullptr; >- instruction[6].u.pointer = nullptr; >-} >- > void CodeBlock::finalizeLLIntInlineCaches() > { > VM& vm = *m_poisonedVM; >- const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); >+ const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); >+ >+ auto handleGetPutFromScope = [](auto& metadata) { >+ GetPutInfo getPutInfo = metadata.getPutInfo; >+ if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks >+ || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) >+ return; >+ WriteBarrierBase<Structure>& structure = metadata.structure; >+ if (!structure || Heap::isMarked(structure.get())) >+ return; >+ if (Options::verboseOSR()) >+ dataLogF("Clearing scope access with structure %p.\n", structure.get()); >+ structure.clear(); >+ }; >+ > for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { >- Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]]; >- switch (Interpreter::getOpcodeID(curInstruction[0])) { >+ const auto curInstruction = m_instructions->at(propertyAccessInstructions[i]); >+ switch (curInstruction->opcodeID()) { > case op_get_by_id: { >- StructureID oldStructureID = curInstruction[4].u.structureID; >+ auto& metadata = curInstruction->as<OpGetById>().metadata(this); >+ StructureID oldStructureID = metadata.structure; > if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) > break; > if (Options::verboseOSR()) > dataLogF("Clearing LLInt property access.\n"); >- clearLLIntGetByIdCache(curInstruction); >- break; >- } >- case op_get_by_id_direct: { >- StructureID oldStructureID = curInstruction[4].u.structureID; >- if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) >- break; >- if (Options::verboseOSR()) >- dataLogF("Clearing LLInt property access.\n"); >- curInstruction[4].u.pointer = nullptr; >- curInstruction[5].u.pointer = nullptr; >+ LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata); > break; > } > case op_put_by_id: { >- StructureID oldStructureID = curInstruction[4].u.structureID; >- StructureID newStructureID = curInstruction[6].u.structureID; >- StructureChain* chain = curInstruction[7].u.structureChain.get(); >+ auto& metadata = curInstruction->as<OpPutById>().metadata(this); >+ StructureID oldStructureID = metadata.oldStructure; >+ StructureID newStructureID = metadata.newStructure; >+ StructureChain* chain = metadata.structureChain.get(); > if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) > && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID))) > && (!chain || Heap::isMarked(chain))) > break; > if (Options::verboseOSR()) > dataLogF("Clearing LLInt put transition.\n"); >- curInstruction[4].u.structureID = 0; >- curInstruction[5].u.operand = 0; >- curInstruction[6].u.structureID = 0; >- curInstruction[7].u.structureChain.clear(); >+ metadata.oldStructure = 0; >+ metadata.offset = 0; >+ metadata.newStructure = 0; >+ metadata.structureChain.clear(); > break; > } > // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418 >@@ -1301,17 +1272,19 @@ void CodeBlock::finalizeLLIntInlineCaches() > case op_get_by_id_unset: > case op_get_array_length: > break; >- case op_to_this: >- if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get())) >+ case op_to_this: { >+ auto& metadata = curInstruction->as<OpToThis>().metadata(this); >+ if (!metadata.cachedStructure || Heap::isMarked(metadata.cachedStructure.get())) > break; > if (Options::verboseOSR()) >- dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get()); >- curInstruction[2].u.structure.clear(); >- curInstruction[3].u.toThisStatus = merge( >- curInstruction[3].u.toThisStatus, ToThisClearedByGC); >+ dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.cachedStructure.get()); >+ metadata.cachedStructure.clear(); >+ metadata.toThisStatus = merge(metadata.toThisStatus, ToThisClearedByGC); > break; >+ } > case op_create_this: { >- auto& cacheWriteBarrier = curInstruction[4].u.jsCell; >+ auto& metadata = curInstruction->as<OpCreateThis>().metadata(this); >+ auto& cacheWriteBarrier = metadata.cachedCallee; > if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects()) > break; > JSCell* cachedFunction = cacheWriteBarrier.get(); >@@ -1326,7 +1299,8 @@ void CodeBlock::finalizeLLIntInlineCaches() > // Right now this isn't strictly necessary. Any symbol tables that this will refer to > // are for outer functions, and we refer to those functions strongly, and they refer > // to the symbol table strongly. But it's nice to be on the safe side. >- WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable; >+ auto metadata = curInstruction->as<OpResolveScope>().metadata(this); >+ WriteBarrierBase<SymbolTable>& symbolTable = metadata.symbolTable; > if (!symbolTable || Heap::isMarked(symbolTable.get())) > break; > if (Options::verboseOSR()) >@@ -1335,22 +1309,14 @@ void CodeBlock::finalizeLLIntInlineCaches() > break; > } > case op_get_from_scope: >- case op_put_to_scope: { >- GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand); >- if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks >- || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) >- continue; >- WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure; >- if (!structure || Heap::isMarked(structure.get())) >- break; >- if (Options::verboseOSR()) >- dataLogF("Clearing scope access with structure %p.\n", structure.get()); >- structure.clear(); >+ handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this)); >+ break; >+ case op_put_to_scope: >+ handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this)); > break; >- } > default: >- OpcodeID opcodeID = Interpreter::getOpcodeID(curInstruction[0]); >- ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); >+ OpcodeID opcodeID = curInstruction->opcodeID(); >+ ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %lu", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); > } > } > >@@ -1358,12 +1324,12 @@ void CodeBlock::finalizeLLIntInlineCaches() > // then cleared the cache without GCing in between. > m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool { > auto clear = [&] () { >- Instruction* instruction = std::get<1>(pair.key); >- OpcodeID opcode = Interpreter::getOpcodeID(*instruction); >+ const Instruction* instruction = std::get<1>(pair.key); >+ OpcodeID opcode = instruction->opcodeID(); > if (opcode == op_get_by_id_proto_load || opcode == op_get_by_id_unset) { > if (Options::verboseOSR()) > dataLogF("Clearing LLInt property access.\n"); >- clearLLIntGetByIdCache(instruction); >+ LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this)); > } > return true; > }; >@@ -1463,22 +1429,22 @@ StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType) > return m_stubInfos.add(accessType); > } > >-JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, Instruction* instruction) >+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction) > { > return m_addICs.add(arithProfile, instruction); > } > >-JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, Instruction* instruction) >+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction) > { > return m_mulICs.add(arithProfile, instruction); > } > >-JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, Instruction* instruction) >+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction) > { > return m_subICs.add(arithProfile, instruction); > } > >-JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, Instruction* instruction) >+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction) > { > return m_negICs.add(arithProfile, instruction); > } >@@ -1693,9 +1659,33 @@ CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex origina > #endif > } > >-void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned bytecodeOffset) >+ >+ >+void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset) >+{ >+ auto instruction = m_instructions->at(bytecodeOffset); >+ OpCatch op = instruction->as<OpCatch>(); >+ auto& metadata = op.metadata(this); >+ if (!!metadata.buffer) { >+#if !ASSERT_DISABLED >+ ConcurrentJSLocker locker(m_lock); >+ bool found = false; >+ for (auto& profile : m_catchProfiles) { >+ if (profile.get() == metadata.buffer) { >+ found = true; >+ break; >+ } >+ } >+ ASSERT(found); >+#endif >+ return; >+ } >+ >+ ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset); >+} >+ >+void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset) > { >- ASSERT(Interpreter::getOpcodeID(m_instructions[bytecodeOffset]) == op_catch); > BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis(); > > // We get the live-out set of variables at op_catch, not the live-in. This >@@ -1722,7 +1712,7 @@ void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned byte > // the compiler thread reads fully initialized data. > WTF::storeStoreFence(); > >- m_instructions[bytecodeOffset + 3].u.pointer = profiles.get(); >+ op.metadata(this).buffer = profiles.get(); > > { > ConcurrentJSLocker locker(m_lock); >@@ -1773,20 +1763,15 @@ void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& d > > bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column) > { >- const Instruction* begin = instructions().begin(); >- const Instruction* end = instructions().end(); >- for (const Instruction* it = begin; it != end;) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(*it); >- if (opcodeID == op_debug) { >- unsigned bytecodeOffset = it - begin; >+ for (const auto& it : *m_instructions) { >+ if (it->is<OpDebug>()) { > int unused; > unsigned opDebugLine; > unsigned opDebugColumn; >- expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn); >+ expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn); > if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn)) > return true; > } >- it += opcodeLengths[opcodeID]; > } > return false; > } >@@ -2764,7 +2749,7 @@ size_t CodeBlock::predictedMachineCodeSize() > if (multiplier < 0 || multiplier > 1000) > return 0; > >- double doubleResult = multiplier * m_instructions.size(); >+ double doubleResult = multiplier * m_instructions->size(); > > // Be even more paranoid: silently reject values that won't fit into a size_t. If > // the function is so huge that we can't even fit it into virtual memory then we >@@ -2808,14 +2793,6 @@ ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset) > getValueProfileBytecodeOffset<ValueProfile>); > } > >-ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset) >-{ >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructions()[bytecodeOffset]); >- unsigned length = opcodeLength(opcodeID); >- ASSERT(!!tryGetValueProfileForBytecodeOffset(bytecodeOffset)); >- return *instructions()[bytecodeOffset + length - 1].u.profile; >-} >- > void CodeBlock::validate() > { > BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint. >@@ -2849,9 +2826,9 @@ void CodeBlock::validate() > } > } > >- for (unsigned bytecodeOffset = 0; bytecodeOffset < m_instructions.size(); ) { >- OpcodeID opcode = Interpreter::getOpcodeID(m_instructions[bytecodeOffset]); >- if (!!baselineAlternative()->handlerForBytecodeOffset(bytecodeOffset)) { >+ for (const auto& instruction : *m_instructions) { >+ OpcodeID opcode = instruction->opcodeID(); >+ if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) { > if (opcode == op_catch || opcode == op_enter) { > // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be > // inside of a try block because they are responsible for bootstrapping state. And they >@@ -2863,7 +2840,6 @@ void CodeBlock::validate() > endValidationDidFail(); > } > } >- bytecodeOffset += opcodeLength(opcode); > } > } > >@@ -2918,25 +2894,30 @@ unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset) > return 0; > } > >-ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset) >+ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset) > { >- return arithProfileForPC(&instructions()[bytecodeOffset]); >+ return arithProfileForPC(m_instructions->at(bytecodeOffset).ptr()); > } > >-ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc) >+ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc) > { >- auto opcodeID = Interpreter::getOpcodeID(pc[0]); >- switch (opcodeID) { >+ switch (pc->opcodeID()) { > case op_negate: >- return bitwise_cast<ArithProfile*>(&pc[3].u.operand); >+ return &pc->as<OpNegate>().metadata(this).arithProfile; > case op_bitor: >+ return &pc->as<OpBitor>().metadata(this).arithProfile; > case op_bitand: >+ return &pc->as<OpBitand>().metadata(this).arithProfile; > case op_bitxor: >+ return &pc->as<OpBitxor>().metadata(this).arithProfile; > case op_add: >+ return &pc->as<OpAdd>().metadata(this).arithProfile; > case op_mul: >+ return &pc->as<OpMul>().metadata(this).arithProfile; > case op_sub: >+ return &pc->as<OpSub>().metadata(this).arithProfile; > case op_div: >- return bitwise_cast<ArithProfile*>(&pc[4].u.operand); >+ return &pc->as<OpDiv>().metadata(this).arithProfile; > default: > break; > } >@@ -2944,7 +2925,7 @@ ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc) > return nullptr; > } > >-bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset) >+bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset) > { > if (!hasBaselineJITProfiling()) > return false; >@@ -2963,22 +2944,26 @@ DFG::CapabilityLevel CodeBlock::capabilityLevel() > } > #endif > >-void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions) >+void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler() > { > if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets()) > return; >- const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); >+ const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); > for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) { > // Because op_profile_control_flow is emitted at the beginning of every basic block, finding > // the next op_profile_control_flow will give us the text range of a single basic block. > size_t startIdx = bytecodeOffsets[i]; >- RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[startIdx]) == op_profile_control_flow); >- int basicBlockStartOffset = instructions[startIdx + 1].u.operand; >+ auto instruction = m_instructions->at(startIdx); >+ RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow); >+ auto bytecode = instruction->as<OpProfileControlFlow>(); >+ auto& metadata = bytecode.metadata(this); >+ int basicBlockStartOffset = bytecode.textOffset; > int basicBlockEndOffset; > if (i + 1 < offsetsLength) { > size_t endIdx = bytecodeOffsets[i + 1]; >- RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[endIdx]) == op_profile_control_flow); >- basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1; >+ auto endInstruction = m_instructions->at(endIdx); >+ RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow); >+ basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().textOffset - 1; > } else { > basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace. > basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before. >@@ -3004,7 +2989,7 @@ void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray > // m: op_profile_control_flow > if (basicBlockEndOffset < basicBlockStartOffset) { > RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock. >- instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); >+ metadata.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); > continue; > } > >@@ -3028,7 +3013,7 @@ void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray > for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs) > insertFunctionGaps(executable); > >- instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation; >+ metadata.basicBlockLocation = basicBlockLocation; > } > } > >@@ -3067,7 +3052,7 @@ std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex > bytecodeOffset = callSiteIndex.bits(); > #else > Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits()); >- bytecodeOffset = this->bytecodeOffset(instruction); >+ bytecodeOffset = 0; // this->bytecodeOffset(instruction); > #endif > } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) { > #if ENABLE(DFG_JIT) >diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h >index a3a3d263900d3122c09e204f32b57b3e101a32b2..ce5bd8694e18c1b5a6495ccd66cbfde66e60df7b 100644 >--- a/Source/JavaScriptCore/bytecode/CodeBlock.h >+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h >@@ -47,6 +47,7 @@ > #include "HandlerInfo.h" > #include "ICStatusMap.h" > #include "Instruction.h" >+#include "InstructionStream.h" > #include "JITCode.h" > #include "JITCodeMap.h" > #include "JITMathICForwards.h" >@@ -55,7 +56,6 @@ > #include "JSGlobalObject.h" > #include "JumpTable.h" > #include "LLIntCallLinkInfo.h" >-#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h" > #include "LazyOperandValueProfile.h" > #include "ModuleProgramExecutable.h" > #include "ObjectAllocationProfile.h" >@@ -85,10 +85,10 @@ struct OSRExitState; > > class BytecodeLivenessAnalysis; > class CodeBlockSet; >-class ExecState; > class ExecutableToCodeBlockEdge; > class JSModuleEnvironment; > class LLIntOffsetsExtractor; >+class LLIntPrototypeLoadAdaptiveStructureWatchpoint; > class PCToCodeOriginMap; > class RegisterAtOffsetList; > class StructureStubInfo; >@@ -96,6 +96,7 @@ class StructureStubInfo; > enum class AccessType : int8_t; > > struct ArithProfile; >+struct OpCatch; > > enum ReoptimizationMode { DontCountReoptimization, CountReoptimization }; > >@@ -197,7 +198,7 @@ public: > > void dumpBytecode(); > void dumpBytecode(PrintStream&); >- void dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap& = ICStatusMap()); >+ void dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& = ICStatusMap()); > void dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& = ICStatusMap()); > > void dumpExceptionHandlers(PrintStream&); >@@ -242,22 +243,22 @@ public: > void getICStatusMap(ICStatusMap& result); > > #if ENABLE(JIT) >- JITAddIC* addJITAddIC(ArithProfile*, Instruction*); >- JITMulIC* addJITMulIC(ArithProfile*, Instruction*); >- JITNegIC* addJITNegIC(ArithProfile*, Instruction*); >- JITSubIC* addJITSubIC(ArithProfile*, Instruction*); >+ JITAddIC* addJITAddIC(ArithProfile*, const Instruction*); >+ JITMulIC* addJITMulIC(ArithProfile*, const Instruction*); >+ JITNegIC* addJITNegIC(ArithProfile*, const Instruction*); >+ JITSubIC* addJITSubIC(ArithProfile*, const Instruction*); > > template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type> >- JITAddIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITAddIC(profile, instruction); } >+ JITAddIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITAddIC(profile, instruction); } > > template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type> >- JITMulIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITMulIC(profile, instruction); } >+ JITMulIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITMulIC(profile, instruction); } > > template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type> >- JITNegIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITNegIC(profile, instruction); } >+ JITNegIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITNegIC(profile, instruction); } > > template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type> >- JITSubIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITSubIC(profile, instruction); } >+ JITSubIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITSubIC(profile, instruction); } > > StructureStubInfo* addStubInfo(AccessType); > auto stubInfoBegin() { return m_stubInfos.begin(); } >@@ -306,24 +307,20 @@ public: > } > #endif > >- typedef JSC::Instruction Instruction; >- typedef PoisonedRefCountedArray<CodeBlockPoison, Instruction>& UnpackedInstructions; >- >- static void clearLLIntGetByIdCache(Instruction*); >- >- unsigned bytecodeOffset(Instruction* returnAddress) >+ unsigned bytecodeOffset(const Instruction* returnAddress) > { >- RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end()); >- return static_cast<Instruction*>(returnAddress) - instructions().begin(); >+ const auto* instructionsBegin = instructions().at(0).ptr(); >+ const auto* instructionsEnd = reinterpret_cast<const Instruction*>(reinterpret_cast<uintptr_t>(instructionsBegin) + instructions().size()); >+ RELEASE_ASSERT(returnAddress >= instructionsBegin && returnAddress < instructionsEnd); >+ return returnAddress - instructionsBegin;; > } > >- unsigned numberOfInstructions() const { return m_instructions.size(); } >- PoisonedRefCountedArray<CodeBlockPoison, Instruction>& instructions() { return m_instructions; } >- const PoisonedRefCountedArray<CodeBlockPoison, Instruction>& instructions() const { return m_instructions; } >+ unsigned numberOfInstructions() const { return m_instructions->size(); } >+ const InstructionStream& instructions() const { return *m_instructions; } > > size_t predictedMachineCodeSize(); > >- unsigned instructionCount() const { return m_instructions.size(); } >+ unsigned instructionCount() const { return m_instructions->size(); } > > // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) > CodeBlock* newReplacement(); >@@ -425,7 +422,6 @@ public: > > unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } > ValueProfile& valueProfile(int index) { return m_valueProfiles[index]; } >- ValueProfile& valueProfileForBytecodeOffset(int bytecodeOffset); > ValueProfile* tryGetValueProfileForBytecodeOffset(int bytecodeOffset); > SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset) > { >@@ -445,6 +441,12 @@ public: > return valueProfile(index - numberOfArgumentValueProfiles()); > } > >+ template<typename Metadata> >+ Metadata*& metadata(OpcodeID opcodeID, unsigned metadataID) >+ { >+ return *reinterpret_cast<Metadata**>(&m_metadata[opcodeID][metadataID]); >+ } >+ > RareCaseProfile* addRareCaseProfile(int bytecodeOffset); > unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); } > RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset); >@@ -466,10 +468,10 @@ public: > return value >= Options::couldTakeSlowCaseMinimumCount(); > } > >- ArithProfile* arithProfileForBytecodeOffset(int bytecodeOffset); >- ArithProfile* arithProfileForPC(Instruction*); >+ ArithProfile* arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset); >+ ArithProfile* arithProfileForPC(const Instruction*); > >- bool couldTakeSpecialFastCase(int bytecodeOffset); >+ bool couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset); > > unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); } > const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; } >@@ -478,6 +480,7 @@ public: > ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset); > ArrayProfile* getArrayProfile(unsigned bytecodeOffset); > ArrayProfile* getOrAddArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset); >+ > ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset); > > // Exception handling support >@@ -619,7 +622,7 @@ public: > return m_llintExecuteCounter; > } > >- typedef HashMap<std::tuple<Structure*, Instruction*>, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap; >+ typedef HashMap<std::tuple<Structure*, const Instruction*>, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap; > StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; } > > // Functions for controlling when tiered compilation kicks in. This >@@ -849,25 +852,7 @@ public: > > CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite); > >- void ensureCatchLivenessIsComputedForBytecodeOffset(unsigned bytecodeOffset) >- { >- if (!!m_instructions[bytecodeOffset + 3].u.pointer) { >-#if !ASSERT_DISABLED >- ConcurrentJSLocker locker(m_lock); >- bool found = false; >- for (auto& profile : m_catchProfiles) { >- if (profile.get() == m_instructions[bytecodeOffset + 3].u.pointer) { >- found = true; >- break; >- } >- } >- ASSERT(found); >-#endif >- return; >- } >- >- ensureCatchLivenessIsComputedForBytecodeOffsetSlow(bytecodeOffset); >- } >+ void ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset); > > #if ENABLE(JIT) > void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&); >@@ -932,8 +917,8 @@ private: > m_rareData = std::make_unique<RareData>(); > } > >- void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>&); >- void ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned); >+ void insertBasicBlockBoundariesForControlFlowProfiler(); >+ void ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch&, InstructionStream::Offset); > > int m_numCalleeLocals; > int m_numVars; >@@ -952,7 +937,7 @@ private: > WriteBarrier<ExecutableToCodeBlockEdge> m_ownerEdge; > Poisoned<CodeBlockPoison, VM*> m_poisonedVM; > >- PoisonedRefCountedArray<CodeBlockPoison, Instruction> m_instructions; >+ const InstructionStream* m_instructions; > VirtualRegister m_thisRegister; > VirtualRegister m_scopeRegister; > mutable CodeBlockHash m_hash; >@@ -987,6 +972,7 @@ private: > RefCountedArray<ValueProfile> m_argumentValueProfiles; > RefCountedArray<ValueProfile> m_valueProfiles; > Vector<std::unique_ptr<ValueProfileAndOperandBuffer>> m_catchProfiles; >+ SegmentedVector<Vector<void*>, 8> m_metadata; > SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; > RefCountedArray<ArrayAllocationProfile> m_arrayAllocationProfiles; > ArrayProfileVector m_arrayProfiles; >diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp >index b0946c4e097f58e14d9fdcb7bab9c42ed129ba14..a8e7cdd16e7274ba97e5240634b31b37466ad155 100644 >--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp >+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp >@@ -26,6 +26,7 @@ > #include "config.h" > #include "GetByIdStatus.h" > >+#include "BytecodeStructs.h" > #include "CodeBlock.h" > #include "ComplexGetStatus.h" > #include "GetterSetterAccessCase.h" >@@ -55,30 +56,16 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned > { > VM& vm = *profiledBlock->vm(); > >- Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; >+ auto instruction = profiledBlock->instructions().at(bytecodeIndex); > >- switch (Interpreter::getOpcodeID(instruction[0].u.opcode)) { >+ StructureID structureID; >+ switch (instruction->opcodeID()) { > case op_get_by_id: >- case op_get_by_id_direct: { >- StructureID structureID = instruction[4].u.structureID; >- if (!structureID) >- return GetByIdStatus(NoInformation, false); >- >- Structure* structure = vm.heap.structureIDTable().get(structureID); >- >- if (structure->takesSlowPathInDFGForImpureProperty()) >- return GetByIdStatus(NoInformation, false); >- >- unsigned attributes; >- PropertyOffset offset = structure->getConcurrently(uid, attributes); >- if (!isValidOffset(offset)) >- return GetByIdStatus(NoInformation, false); >- if (attributes & PropertyAttribute::CustomAccessor) >- return GetByIdStatus(NoInformation, false); >- >- return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); >- } >- >+ structureID = instruction->as<OpGetById>().metadata(profiledBlock).structure; >+ break; >+ case op_get_by_id_direct: >+ structureID = instruction->as<OpGetByIdDirect>().metadata(profiledBlock).structure; >+ break; > case op_get_array_length: > case op_try_get_by_id: > case op_get_by_id_proto_load: >@@ -93,6 +80,23 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned > return GetByIdStatus(NoInformation, false); > } > } >+ >+ if (!structureID) >+ return GetByIdStatus(NoInformation, false); >+ >+ Structure* structure = vm.heap.structureIDTable().get(structureID); >+ >+ if (structure->takesSlowPathInDFGForImpureProperty()) >+ return GetByIdStatus(NoInformation, false); >+ >+ unsigned attributes; >+ PropertyOffset offset = structure->getConcurrently(uid, attributes); >+ if (!isValidOffset(offset)) >+ return GetByIdStatus(NoInformation, false); >+ if (attributes & PropertyAttribute::CustomAccessor) >+ return GetByIdStatus(NoInformation, false); >+ >+ return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); > } > > GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid, ExitFlag didExit, CallLinkStatus::ExitSiteData callExitSiteData) >diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h >deleted file mode 100644 >index c133578b3263d3029845e48379a35960704a6efd..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/bytecode/Instruction.h >+++ /dev/null >@@ -1,160 +0,0 @@ >-/* >- * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. >- * >- * Redistribution and use in source and binary forms, with or without >- * modification, are permitted provided that the following conditions >- * are met: >- * >- * 1. Redistributions of source code must retain the above copyright >- * notice, this list of conditions and the following disclaimer. >- * 2. Redistributions in binary form must reproduce the above copyright >- * notice, this list of conditions and the following disclaimer in the >- * documentation and/or other materials provided with the distribution. >- * 3. Neither the name of Apple Inc. ("Apple") nor the names of >- * its contributors may be used to endorse or promote products derived >- * from this software without specific prior written permission. >- * >- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- */ >- >-#pragma once >- >-#include "BasicBlockLocation.h" >-#include "PutByIdFlags.h" >-#include "SymbolTable.h" >-#include "TypeLocation.h" >-#include "PropertySlot.h" >-#include "SpecialPointer.h" >-#include "Structure.h" >-#include "StructureChain.h" >-#include "ToThisStatus.h" >-#include <wtf/VectorTraits.h> >- >-namespace JSC { >- >-class ArrayAllocationProfile; >-class ArrayProfile; >-class ObjectAllocationProfile; >-class WatchpointSet; >-struct LLIntCallLinkInfo; >-struct ValueProfile; >- >-#if ENABLE(COMPUTED_GOTO_OPCODES) >-typedef void* Opcode; >-#else >-typedef OpcodeID Opcode; >-#endif >- >-struct Instruction { >- constexpr Instruction() >- : u({ nullptr }) >- { >- } >- >- Instruction(Opcode opcode) >- { >-#if !ENABLE(COMPUTED_GOTO_OPCODES) >- // We have to initialize one of the pointer members to ensure that >- // the entire struct is initialized, when opcode is not a pointer. >- u.jsCell.clear(); >-#endif >- u.opcode = opcode; >- } >- >- Instruction(int operand) >- { >- // We have to initialize one of the pointer members to ensure that >- // the entire struct is initialized in 64-bit. >- u.jsCell.clear(); >- u.operand = operand; >- } >- Instruction(unsigned unsignedValue) >- { >- // We have to initialize one of the pointer members to ensure that >- // the entire struct is initialized in 64-bit. >- u.jsCell.clear(); >- u.unsignedValue = unsignedValue; >- } >- >- Instruction(PutByIdFlags flags) >- { >- u.putByIdFlags = flags; >- } >- >- Instruction(VM& vm, JSCell* owner, Structure* structure) >- { >- u.structure.clear(); >- u.structure.set(vm, owner, structure); >- } >- Instruction(VM& vm, JSCell* owner, StructureChain* structureChain) >- { >- u.structureChain.clear(); >- u.structureChain.set(vm, owner, structureChain); >- } >- Instruction(VM& vm, JSCell* owner, JSCell* jsCell) >- { >- u.jsCell.clear(); >- u.jsCell.set(vm, owner, jsCell); >- } >- >- Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; } >- >- Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; } >- Instruction(ValueProfile* profile) { u.profile = profile; } >- Instruction(ArrayProfile* profile) { u.arrayProfile = profile; } >- Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; } >- Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; } >- Instruction(WriteBarrier<Unknown>* variablePointer) { u.variablePointer = variablePointer; } >- Instruction(Special::Pointer pointer) { u.specialPointer = pointer; } >- Instruction(UniquedStringImpl* uid) { u.uid = uid; } >- Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; } >- >- union { >- void* pointer; >- Opcode opcode; >- int operand; >- unsigned unsignedValue; >- WriteBarrierBase<Structure> structure; >- StructureID structureID; >- WriteBarrierBase<SymbolTable> symbolTable; >- WriteBarrierBase<StructureChain> structureChain; >- WriteBarrierBase<JSCell> jsCell; >- WriteBarrier<Unknown>* variablePointer; >- Special::Pointer specialPointer; >- PropertySlot::GetValueFunc getterFunc; >- LLIntCallLinkInfo* callLinkInfo; >- UniquedStringImpl* uid; >- ValueProfile* profile; >- ArrayProfile* arrayProfile; >- ArrayAllocationProfile* arrayAllocationProfile; >- ObjectAllocationProfile* objectAllocationProfile; >- WatchpointSet* watchpointSet; >- bool* predicatePointer; >- ToThisStatus toThisStatus; >- TypeLocation* location; >- BasicBlockLocation* basicBlockLocation; >- PutByIdFlags putByIdFlags; >- } u; >- >-private: >- Instruction(StructureChain*); >- Instruction(Structure*); >-}; >-static_assert(sizeof(Instruction) == sizeof(void*), ""); >- >-} // namespace JSC >- >-namespace WTF { >- >-template<> struct VectorTraits<JSC::Instruction> : VectorTraitsBase<true, JSC::Instruction> { }; >- >-} // namespace WTF >diff --git a/Source/JavaScriptCore/bytecode/InstructionStream.cpp b/Source/JavaScriptCore/bytecode/InstructionStream.cpp >new file mode 100644 >index 0000000000000000000000000000000000000000..d2816015809c1cf67cd58e5c5974427217ad0be7 >--- /dev/null >+++ b/Source/JavaScriptCore/bytecode/InstructionStream.cpp >@@ -0,0 +1,42 @@ >+/* >+ * Copyright (C) 2014 Apple Inc. All Rights Reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#include "config.h" >+#include "InstructionStream.h" >+ >+#include "Opcode.h" >+ >+namespace JSC { >+ >+InstructionStream::InstructionStream(InstructionBuffer&& instructions) >+ : m_instructions(WTFMove(instructions)) >+{ } >+ >+size_t InstructionStream::sizeInBytes() const >+{ >+ return m_instructions.size(); >+} >+ >+} >diff --git a/Source/JavaScriptCore/bytecode/InstructionStream.h b/Source/JavaScriptCore/bytecode/InstructionStream.h >new file mode 100644 >index 0000000000000000000000000000000000000000..d3ac9622d07405521c8b39defdb75f8ffa3a5821 >--- /dev/null >+++ b/Source/JavaScriptCore/bytecode/InstructionStream.h >@@ -0,0 +1,245 @@ >+/* >+ * Copyright (C) 2014 Apple Inc. All Rights Reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+ >+#pragma once >+ >+#include "Instruction.h" >+#include <wtf/Vector.h> >+ >+namespace JSC { >+ >+class InstructionStream { >+ WTF_MAKE_FAST_ALLOCATED; >+ // WTF_MAKE_NONCOPYABLE(InstructionStream); >+ >+ using InstructionBuffer = Vector<uint8_t, 0, UnsafeVectorOverflow>; >+ >+ friend class InstructionStreamWriter; >+public: >+ size_t sizeInBytes() const; >+ >+ using Offset = size_t; >+ >+private: >+ template<class InstructionBuffer> >+ class BaseRef { >+ WTF_MAKE_FAST_ALLOCATED; >+ >+ friend class InstructionStream; >+ >+ public: >+ const Instruction* operator->() const { return unwrap(); } >+ const Instruction* ptr() const { return unwrap(); } >+ >+ bool operator!=(const BaseRef<InstructionBuffer>& other) const >+ { >+ return &m_instructions != &other.m_instructions && m_index != other.m_index; >+ } >+ >+ BaseRef next() const >+ { >+ return BaseRef { m_instructions, m_index + ptr()->size() }; >+ } >+ >+ Offset offset() const >+ { >+ return m_index; >+ } >+ >+ bool isValid() const >+ { >+ return m_index < m_instructions.size(); >+ } >+ >+ private: >+ BaseRef(InstructionBuffer& instructions, size_t index) >+ : m_instructions(instructions) >+ , m_index(index) >+ { } >+ >+ >+ const Instruction* unwrap() const { return reinterpret_cast<const Instruction*>(&m_instructions[m_index]); } >+ >+ InstructionBuffer& m_instructions; >+ protected: >+ Offset m_index; >+ }; >+ >+public: >+ using Ref = BaseRef<const InstructionBuffer>; >+ >+ class MutableRef : public BaseRef<InstructionBuffer> { >+ using BaseRef<InstructionBuffer>::BaseRef; >+ >+ friend class InstructionStreamWriter; >+ public: >+ Ref freeze() const { return Ref { m_instructions, m_index }; } >+ Instruction* operator->() { return unwrap(); } >+ Instruction* ptr() { return unwrap(); } >+ operator Ref() { >+ return Ref { m_instructions, m_index }; >+ } >+ private: >+ Instruction* unwrap() { return reinterpret_cast<Instruction*>(&m_instructions[m_index]); } >+ }; >+ >+private: >+ class iterator : public Ref { >+ friend class InstructionStream; >+ >+ public: >+ using Ref::Ref; >+ >+ Ref& operator*() >+ { >+ return *this; >+ } >+ >+ iterator operator++() >+ { >+ m_index += ptr()->size(); >+ return *this; >+ } >+ }; >+ >+public: >+ iterator begin() const >+ { >+ return iterator { m_instructions, 0 }; >+ } >+ >+ iterator end() const >+ { >+ return iterator { m_instructions, m_instructions.size() }; >+ } >+ >+ const Ref at(Offset offset) const >+ { >+ ASSERT(offset < m_instructions.size()); >+ return Ref { m_instructions, offset }; >+ } >+ >+ size_t size() const >+ { >+ return m_instructions.size(); >+ } >+ >+private: >+ explicit InstructionStream(InstructionBuffer&&); >+ >+protected: >+ InstructionBuffer m_instructions; >+}; >+ >+class InstructionStreamWriter : public InstructionStream { >+ friend class BytecodeRewriter; >+public: >+ InstructionStreamWriter() >+ : InstructionStream({ }) >+ { } >+ >+ MutableRef ref(Offset offset) >+ { >+ ASSERT(offset < m_instructions.size()); >+ return MutableRef { m_instructions, offset }; >+ } >+ >+ >+ void write(uint8_t byte) { ASSERT(!m_finalized); m_instructions.append(byte); } >+ void write(uint32_t i) >+ { >+ ASSERT(!m_finalized); >+ union { >+ uint32_t i; >+ uint8_t bytes[4]; >+ } u { i }; >+#if CPU(BIG_ENDIAN) >+ write(u.bytes[0]); >+ write(u.bytes[1]); >+ write(u.bytes[2]); >+ write(u.bytes[3]); >+#else // !CPU(BIG_ENDIAN) >+ write(u.bytes[3]); >+ write(u.bytes[2]); >+ write(u.bytes[1]); >+ write(u.bytes[0]); >+#endif // !CPU(BIG_ENDIAN) >+ } >+ >+ void rewind(MutableRef& ref) >+ { >+ ASSERT(ref.offset() < m_instructions.size()); >+ m_instructions.shrink(ref.offset()); >+ } >+ >+ std::unique_ptr<InstructionStream> finalize() >+ { >+ m_finalized = true; >+ m_instructions.shrinkToFit(); >+ return std::unique_ptr<InstructionStream> { new InstructionStream(WTFMove(m_instructions)) }; >+ } >+ >+ MutableRef ref() >+ { >+ return MutableRef { m_instructions, m_instructions.size() }; >+ } >+ >+private: >+ class iterator : public MutableRef { >+ friend class InstructionStreamWriter; >+ >+ public: >+ using MutableRef::MutableRef; >+ >+ MutableRef& operator*() >+ { >+ return *this; >+ } >+ >+ iterator operator++() >+ { >+ m_index += ptr()->size(); >+ return *this; >+ } >+ }; >+ >+public: >+ iterator begin() >+ { >+ return iterator { m_instructions, 0 }; >+ } >+ >+ iterator end() >+ { >+ return iterator { m_instructions, m_instructions.size() }; >+ } >+ >+private: >+ bool m_finalized { false }; >+}; >+ >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp >index eecd8fbc59a37eac92af8ab8088797e8f10fb309..d7e515cec12ff765ee2fc3862f61047205163666 100644 >--- a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp >+++ b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp >@@ -32,9 +32,9 @@ > > namespace JSC { > >-LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, Instruction* getByIdInstruction) >+LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, OpGetById::Metadata& getByIdMetadata) > : m_key(key) >- , m_getByIdInstruction(getByIdInstruction) >+ , m_getByIdMetadata(getByIdMetadata) > { > RELEASE_ASSERT(key.watchingRequiresStructureTransitionWatchpoint()); > RELEASE_ASSERT(!key.watchingRequiresReplacementWatchpoint()); >@@ -54,7 +54,16 @@ void LLIntPrototypeLoadAdaptiveStructureWatchpoint::fireInternal(VM& vm, const F > return; > } > >- CodeBlock::clearLLIntGetByIdCache(m_getByIdInstruction); >+ clearLLIntGetByIdCache(m_getByIdMetadata); > } > >+void LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(OpGetById::Metadata&) >+{ >+ //instruction[0].u.opcode = op_get_by_id; >+ //instruction[4].u.pointer = nullptr; >+ //instruction[5].u.pointer = nullptr; >+ //instruction[6].u.pointer = nullptr; >+} >+ >+ > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h >index e0e1be8d07057bd6b611f8a5391d5d87b2a32d54..27a8e317f463a67d5d3e2686aa8d4433d8aab447 100644 >--- a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h >+++ b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h >@@ -25,7 +25,7 @@ > > #pragma once > >-#include "Instruction.h" >+#include "BytecodeStructs.h" > #include "ObjectPropertyCondition.h" > #include "Watchpoint.h" > >@@ -34,10 +34,12 @@ namespace JSC { > class LLIntPrototypeLoadAdaptiveStructureWatchpoint : public Watchpoint { > public: > LLIntPrototypeLoadAdaptiveStructureWatchpoint() = default; >- LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, Instruction*); >+ LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, OpGetById::Metadata&); > > void install(VM&); > >+ static void clearLLIntGetByIdCache(OpGetById::Metadata&); >+ > const ObjectPropertyCondition& key() const { return m_key; } > > protected: >@@ -45,7 +47,7 @@ protected: > > private: > ObjectPropertyCondition m_key; >- Instruction* m_getByIdInstruction { nullptr }; >+ OpGetById::Metadata& m_getByIdMetadata; > }; > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h >index 07d9a7314eeb109c731237d4be327131b11c94ee..74fc2e68c6bb4763fb777bc6c70bb15049b40f47 100644 >--- a/Source/JavaScriptCore/bytecode/Opcode.h >+++ b/Source/JavaScriptCore/bytecode/Opcode.h >@@ -68,6 +68,10 @@ const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS; > FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS); > #undef OPCODE_ID_LENGTHS > >+#define OPCODE_ID_WIDE_LENGTHS(id, length) const int id##_wide_length = length * 4; >+ FOR_EACH_OPCODE_ID(OPCODE_ID_WIDE_LENGTHS); >+#undef OPCODE_ID_WIDE_LENGTHS >+ > #define OPCODE_LENGTH(opcode) opcode##_length > > #define OPCODE_ID_LENGTH_MAP(opcode, length) length, >diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp >index 56306fd7ce8bc1367e6b1eef9c98feeb4d6573c7..1e7122c75003333a733b1af5886aea1582f10947 100644 >--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp >+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp >@@ -32,20 +32,20 @@ > > namespace JSC { > >-template <size_t vectorSize, typename Block, typename Instruction> >-static void getJumpTargetsForBytecodeOffset(Block* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, vectorSize>& out) >+template <size_t vectorSize, typename Block> >+static void getJumpTargetsForInstruction(Block* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, vectorSize>& out) > { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- extractStoredJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) { >- out.append(bytecodeOffset + relativeOffset); >+ extractStoredJumpTargetsForInstruction(codeBlock, instruction, [&](int32_t relativeOffset) { >+ out.append(instruction.offset() + relativeOffset); > }); >+ OpcodeID opcodeID = instruction->opcodeID(); > // op_loop_hint does not have jump target stored in bytecode instructions. > if (opcodeID == op_loop_hint) >- out.append(bytecodeOffset); >+ out.append(instruction.offset()); > else if (opcodeID == op_enter && codeBlock->hasTailCalls() && Options::optimizeRecursiveTailCalls()) { > // We need to insert a jump after op_enter, so recursive tail calls have somewhere to jump to. > // But we only want to pay that price for functions that have at least one tail call. >- out.append(bytecodeOffset + opcodeLengths[op_enter]); >+ out.append(instruction.next().offset()); > } > } > >@@ -54,8 +54,8 @@ enum class ComputePreciseJumpTargetsMode { > ForceCompute, > }; > >-template<ComputePreciseJumpTargetsMode Mode, typename Block, typename Instruction, size_t vectorSize> >-void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, vectorSize>& out) >+template<ComputePreciseJumpTargetsMode Mode, typename Block, size_t vectorSize> >+void computePreciseJumpTargetsInternal(Block* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, vectorSize>& out) > { > ASSERT(out.isEmpty()); > >@@ -69,10 +69,8 @@ void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructio > out.append(codeBlock->exceptionHandler(i).end); > } > >- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- getJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out); >- bytecodeOffset += opcodeLengths[opcodeID]; >+ for (const auto& instruction : instructions) { >+ getJumpTargetsForInstruction(codeBlock, instruction, out); > } > > std::sort(out.begin(), out.end()); >@@ -91,34 +89,34 @@ void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructio > out.shrinkCapacity(toIndex); > } > >-void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out) >+void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<InstructionStream::Offset, 32>& out) > { >- computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, codeBlock->instructions().begin(), codeBlock->instructions().size(), out); >+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, codeBlock->instructions(), out); > } > >-void computePreciseJumpTargets(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out) >+void computePreciseJumpTargets(CodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out) > { >- computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructionsBegin, instructionCount, out); >+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructions, out); > } > >-void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out) >+void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out) > { >- computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructionsBegin, instructionCount, out); >+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructions, out); > } > >-void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned>& out) >+void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset>& out) > { >- computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::ForceCompute>(codeBlock, instructionsBegin, instructionCount, out); >+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::ForceCompute>(codeBlock, instructions, out); > } > >-void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out) >+void findJumpTargetsForInstruction(CodeBlock* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out) > { >- getJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out); >+ getJumpTargetsForInstruction(codeBlock, instruction, out); > } > >-void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out) >+void findJumpTargetsForInstruction(UnlinkedCodeBlock* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out) > { >- getJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out); >+ getJumpTargetsForInstruction(codeBlock, instruction, out); > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h >index bcc9346cd5d7020465def09a5b259cf4872d9b93..023de86c1ea7de57cd85a4ac4286259cafb0df2b 100644 >--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h >+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h >@@ -30,16 +30,15 @@ > namespace JSC { > > class UnlinkedCodeBlock; >-struct UnlinkedInstruction; > > // Return a sorted list of bytecode index that are the destination of a jump. >-void computePreciseJumpTargets(CodeBlock*, Vector<unsigned, 32>& out); >-void computePreciseJumpTargets(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out); >-void computePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out); >+void computePreciseJumpTargets(CodeBlock*, Vector<InstructionStream::Offset, 32>& out); >+void computePreciseJumpTargets(CodeBlock*, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out); >+void computePreciseJumpTargets(UnlinkedCodeBlock*, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out); > >-void recomputePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned>& out); >+void recomputePreciseJumpTargets(UnlinkedCodeBlock*, const InstructionStream& instructions, Vector<InstructionStream::Offset>& out); > >-void findJumpTargetsForBytecodeOffset(CodeBlock*, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out); >-void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out); >+void findJumpTargetsForInstruction(CodeBlock*, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out); >+void findJumpTargetsForInstruction(UnlinkedCodeBlock*, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out); > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h >index 070fde9a0b3d6afdc3d4b5fb98694041f1637d01..7e93f7b42fe7c48477af431e0db012d7b9eb5c56 100644 >--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h >+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h >@@ -25,64 +25,108 @@ > > #pragma once > >+#include "BytecodeStructs.h" > #include "InterpreterInlines.h" > #include "Opcode.h" > #include "PreciseJumpTargets.h" > > namespace JSC { > >-template<typename Block, typename Instruction, typename Function> >-inline void extractStoredJumpTargetsForBytecodeOffset(Block* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Function function) >+#define SWITCH_JMP(CASE_OP, JMP_TARGET) \ >+ switch (instruction->opcodeID()) { \ >+ CASE_OP(OpJmp) \ >+ /* TODO: unify as instruction->as<UnaryJmp>() */ \ >+ CASE_OP(OpJtrue) \ >+ CASE_OP(OpJfalse) \ >+ CASE_OP(OpJeqNull) \ >+ CASE_OP(OpJneqNull) \ >+ CASE_OP(OpJneqPtr) \ >+ /* TODO: unify as instruction->as<BinaryJmp>() */ \ >+ CASE_OP(OpJless) \ >+ CASE_OP(OpJlesseq) \ >+ CASE_OP(OpJgreater) \ >+ CASE_OP(OpJgreatereq) \ >+ CASE_OP(OpJnless) \ >+ CASE_OP(OpJnlesseq) \ >+ CASE_OP(OpJngreater) \ >+ CASE_OP(OpJngreatereq) \ >+ CASE_OP(OpJeq) \ >+ CASE_OP(OpJneq) \ >+ CASE_OP(OpJstricteq) \ >+ CASE_OP(OpJnstricteq) \ >+ CASE_OP(OpJbelow) \ >+ CASE_OP(OpJbeloweq) \ >+ case op_switch_imm: { \ >+ auto bytecode = instruction->as<OpSwitchImm>(); \ >+ auto& table = codeBlock->switchJumpTable(bytecode.tableIndex); \ >+ for (unsigned i = table.branchOffsets.size(); i--;) \ >+ JMP_TARGET(table.branchOffsets[i]); \ >+ JMP_TARGET(bytecode.defaultOffset); \ >+ break; \ >+ } \ >+ case op_switch_char: { \ >+ auto bytecode = instruction->as<OpSwitchChar>(); \ >+ auto& table = codeBlock->switchJumpTable(bytecode.tableIndex); \ >+ for (unsigned i = table.branchOffsets.size(); i--;) \ >+ JMP_TARGET(table.branchOffsets[i]); \ >+ JMP_TARGET(bytecode.defaultOffset); \ >+ break; \ >+ } \ >+ case op_switch_string: { \ >+ auto bytecode = instruction->as<OpSwitchImm>(); \ >+ auto& table = codeBlock->stringSwitchJumpTable(bytecode.tableIndex); \ >+ auto iter = table.offsetTable.begin(); \ >+ auto end = table.offsetTable.end(); \ >+ for (; iter != end; ++iter) \ >+ JMP_TARGET(iter->value.branchOffset); \ >+ JMP_TARGET(bytecode.defaultOffset); \ >+ break; \ >+ } \ >+ default: \ >+ break; \ >+ } \ >+ >+template<typename Block, typename Function> >+inline void extractStoredJumpTargetsForInstruction(Block* codeBlock, const InstructionStream::Ref& instruction, Function function) > { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- Instruction* current = instructionsBegin + bytecodeOffset; >- switch (opcodeID) { >- case op_jmp: >- function(current[1].u.operand); >- break; >- case op_jtrue: >- case op_jfalse: >- case op_jeq_null: >- case op_jneq_null: >- function(current[2].u.operand); >- break; >- case op_jneq_ptr: >- case op_jless: >- case op_jlesseq: >- case op_jgreater: >- case op_jgreatereq: >- case op_jnless: >- case op_jnlesseq: >- case op_jngreater: >- case op_jngreatereq: >- case op_jeq: >- case op_jneq: >- case op_jstricteq: >- case op_jnstricteq: >- case op_jbelow: >- case op_jbeloweq: >- function(current[3].u.operand); >- break; >- case op_switch_imm: >- case op_switch_char: { >- auto& table = codeBlock->switchJumpTable(current[1].u.operand); >- for (unsigned i = table.branchOffsets.size(); i--;) >- function(table.branchOffsets[i]); >- function(current[2].u.operand); >+#define CASE_OP(__op) \ >+ case __op::opcodeID(): \ >+ function(instruction->as<__op>().target); \ > break; >- } >- case op_switch_string: { >- auto& table = codeBlock->stringSwitchJumpTable(current[1].u.operand); >- auto iter = table.offsetTable.begin(); >- auto end = table.offsetTable.end(); >- for (; iter != end; ++iter) >- function(iter->value.branchOffset); >- function(current[2].u.operand); >- break; >- } >- default: >- break; >- } >+ >+#define JMP_TARGET(__target) \ >+ function(__target) >+ >+SWITCH_JMP(CASE_OP, JMP_TARGET) >+ >+#undef CASE_OP >+#undef JMP_TARGET >+} >+ >+template<typename Block, typename Function> >+inline void updateStoredJumpTargetsForInstruction(Block* codeBlock, InstructionStream::MutableRef instruction, Function function) >+{ >+#define CASE_OP(__op) \ >+ case __op::opcodeID(): { \ >+ int32_t target = instruction->as<__op>().target; \ >+ int32_t newTarget = function(target); \ >+ if (newTarget != target) \ >+ instruction->cast<__op>()->setTarget(newTarget); \ >+ break; \ >+ } >+ >+#define JMP_TARGET(__target) \ >+ do { \ >+ int32_t target = __target; \ >+ int32_t newTarget = function(target); \ >+ if (newTarget != target) \ >+ __target = newTarget; \ >+ } while(false) >+ >+SWITCH_JMP(CASE_OP, JMP_TARGET) >+ >+#undef CASE_OP >+#undef JMP_TARGET > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp >index aac974cea948218478c6c885e8f4d7b9ac3b64e3..03abdf49719b95088772a9129d552d50e5c85fe3 100644 >--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp >+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp >@@ -26,6 +26,7 @@ > #include "config.h" > #include "PutByIdStatus.h" > >+#include "BytecodeStructs.h" > #include "CodeBlock.h" > #include "ComplexGetStatus.h" > #include "GetterSetterAccessCase.h" >@@ -55,21 +56,18 @@ ExitFlag PutByIdStatus::hasExitSite(CodeBlock* profiledBlock, unsigned bytecodeI > > PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) > { >- UNUSED_PARAM(profiledBlock); >- UNUSED_PARAM(bytecodeIndex); >- UNUSED_PARAM(uid); >- > VM& vm = *profiledBlock->vm(); > >- Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; >+ auto instruction = profiledBlock->instructions().at(bytecodeIndex); >+ auto& metadata = instruction->as<OpPutById>().metadata(profiledBlock); > >- StructureID structureID = instruction[4].u.structureID; >+ StructureID structureID = metadata.oldStructure; > if (!structureID) > return PutByIdStatus(NoInformation); > > Structure* structure = vm.heap.structureIDTable().get(structureID); > >- StructureID newStructureID = instruction[6].u.structureID; >+ StructureID newStructureID = metadata.newStructure; > if (!newStructureID) { > PropertyOffset offset = structure->getConcurrently(uid); > if (!isValidOffset(offset)) >@@ -87,7 +85,7 @@ PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned > return PutByIdStatus(NoInformation); > > ObjectPropertyConditionSet conditionSet; >- if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) { >+ if (!(metadata.flags & PutByIdIsDirect)) { > conditionSet = > generateConditionsForPropertySetterMissConcurrently( > vm, profiledBlock->globalObject(), structure, uid); >diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp >index 2e2d64f06f97fa1da5625ba6b584dcfd8d911d0b..ba94ce774b545fcafade2afe7d778129f63ebf6d 100644 >--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp >+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp >@@ -34,6 +34,7 @@ > #include "CodeCache.h" > #include "ExecutableInfo.h" > #include "FunctionOverrides.h" >+#include "InstructionStream.h" > #include "JSCInlines.h" > #include "JSString.h" > #include "Parser.h" >@@ -43,7 +44,6 @@ > #include "SymbolTable.h" > #include "UnlinkedEvalCodeBlock.h" > #include "UnlinkedFunctionCodeBlock.h" >-#include "UnlinkedInstructionStream.h" > #include "UnlinkedModuleProgramCodeBlock.h" > #include "UnlinkedProgramCodeBlock.h" > #include <wtf/DataLog.h> >@@ -95,20 +95,20 @@ void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) > for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr) > visitor.append(*ptr); > visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size()); >- if (thisObject->m_unlinkedInstructions) >- visitor.reportExtraMemoryVisited(thisObject->m_unlinkedInstructions->sizeInBytes()); >+ if (thisObject->m_instructions) >+ visitor.reportExtraMemoryVisited(thisObject->m_instructions->sizeInBytes()); > } > > size_t UnlinkedCodeBlock::estimatedSize(JSCell* cell, VM& vm) > { > UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell); >- size_t extraSize = thisObject->m_unlinkedInstructions ? thisObject->m_unlinkedInstructions->sizeInBytes() : 0; >+ size_t extraSize = thisObject->m_instructions ? thisObject->m_instructions->sizeInBytes() : 0; > return Base::estimatedSize(cell, vm) + extraSize; > } > > int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) > { >- ASSERT(bytecodeOffset < instructions().count()); >+ ASSERT(bytecodeOffset < instructions().size()); > int divot { 0 }; > int startOffset { 0 }; > int endOffset { 0 }; >@@ -139,13 +139,12 @@ inline void UnlinkedCodeBlock::getLineAndColumn(const ExpressionRangeInfo& info, > } > > #ifndef NDEBUG >-static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) >+static void dumpLineColumnEntry(size_t index, const InstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) > { >- const auto& instructions = instructionStream.unpackForDebugging(); >- OpcodeID opcode = instructions[instructionOffset].u.opcode; >+ const auto instruction = instructionStream.at(instructionOffset); > const char* event = ""; >- if (opcode == op_debug) { >- switch (instructions[instructionOffset + 1].u.operand) { >+ if (instruction->is<OpDebug>()) { >+ switch (instruction->as<OpDebug>().debugHookType) { > case WillExecuteProgram: event = " WillExecuteProgram"; break; > case DidExecuteProgram: event = " DidExecuteProgram"; break; > case DidEnterCallFrame: event = " DidEnterCallFrame"; break; >@@ -155,7 +154,7 @@ static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& i > case WillExecuteExpression: event = " WillExecuteExpression"; break; > } > } >- dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, opcodeNames[opcode], event); >+ dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, instruction->name(), event); > } > > void UnlinkedCodeBlock::dumpExpressionRangeInfo() >@@ -178,7 +177,7 @@ void UnlinkedCodeBlock::dumpExpressionRangeInfo() > void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, > int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const > { >- ASSERT(bytecodeOffset < instructions().count()); >+ ASSERT(bytecodeOffset < instructions().size()); > > if (!m_expressionInfo.size()) { > startOffset = 0; >@@ -304,20 +303,20 @@ UnlinkedCodeBlock::~UnlinkedCodeBlock() > { > } > >-void UnlinkedCodeBlock::setInstructions(std::unique_ptr<UnlinkedInstructionStream> instructions) >+void UnlinkedCodeBlock::setInstructions(std::unique_ptr<InstructionStream> instructions) > { > ASSERT(instructions); > { > auto locker = holdLock(cellLock()); >- m_unlinkedInstructions = WTFMove(instructions); >+ m_instructions = WTFMove(instructions); > } >- Heap::heap(this)->reportExtraMemoryAllocated(m_unlinkedInstructions->sizeInBytes()); >+ Heap::heap(this)->reportExtraMemoryAllocated(m_instructions->sizeInBytes()); > } > >-const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const >+const InstructionStream& UnlinkedCodeBlock::instructions() const > { >- ASSERT(m_unlinkedInstructions.get()); >- return *m_unlinkedInstructions; >+ ASSERT(m_instructions.get()); >+ return *m_instructions; > } > > UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler) >@@ -332,20 +331,15 @@ UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForIndex(unsigned index, Required > return UnlinkedHandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler); > } > >-void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter, UnpackedInstructions& instructions) >+void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter, InstructionStreamWriter& instructions) > { > // Before applying the changes, we adjust the jumps based on the original bytecode offset, the offset to the jump target, and > // the insertion information. > >- UnlinkedInstruction* instructionsBegin = instructions.begin(); // OOPS: make this an accessor on rewriter. >- >- for (int bytecodeOffset = 0, instructionCount = instructions.size(); bytecodeOffset < instructionCount;) { >- UnlinkedInstruction* current = instructionsBegin + bytecodeOffset; >- OpcodeID opcodeID = current[0].u.opcode; >- extractStoredJumpTargetsForBytecodeOffset(this, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) { >- relativeOffset = rewriter.adjustJumpTarget(bytecodeOffset, bytecodeOffset + relativeOffset); >+ for (const auto& instruction : instructions) { >+ updateStoredJumpTargetsForInstruction(this, instruction, [&](int32_t relativeOffset) { >+ return rewriter.adjustJumpTarget(instruction.offset(), instruction.offset() + relativeOffset); > }); >- bytecodeOffset += opcodeLength(opcodeID); > } > > // Then, exception handlers should be adjusted. >@@ -378,7 +372,7 @@ void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter, UnpackedIn > > // And recompute the jump target based on the modified unlinked instructions. > m_jumpTargets.clear(); >- recomputePreciseJumpTargets(this, instructions.begin(), instructions.size(), m_jumpTargets); >+ recomputePreciseJumpTargets(this, instructions, m_jumpTargets); > } > > void UnlinkedCodeBlock::shrinkToFit() >diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h >index da77bc9379dc522445989498b7ea98776d92d162..edf848afe1532b9507ee423bb932ee6e81260a3d 100644 >--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h >+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h >@@ -31,6 +31,7 @@ > #include "ExpressionRangeInfo.h" > #include "HandlerInfo.h" > #include "Identifier.h" >+#include "InstructionStream.h" > #include "JSCast.h" > #include "LockDuringMarking.h" > #include "ParserModes.h" >@@ -60,7 +61,6 @@ class SourceProvider; > class UnlinkedCodeBlock; > class UnlinkedFunctionCodeBlock; > class UnlinkedFunctionExecutable; >-class UnlinkedInstructionStream; > struct ExecutableInfo; > > typedef unsigned UnlinkedValueProfile; >@@ -101,17 +101,6 @@ struct UnlinkedSimpleJumpTable { > } > }; > >-struct UnlinkedInstruction { >- UnlinkedInstruction() { u.operand = 0; } >- UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; } >- UnlinkedInstruction(int operand) { u.operand = operand; } >- union { >- OpcodeID opcode; >- int32_t operand; >- unsigned unsignedValue; >- } u; >-}; >- > class UnlinkedCodeBlock : public JSCell { > public: > typedef JSCell Base; >@@ -121,9 +110,6 @@ public: > > enum { CallFunction, ApplyFunction }; > >- typedef UnlinkedInstruction Instruction; >- typedef Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow> UnpackedInstructions; >- > bool isConstructor() const { return m_isConstructor; } > bool isStrictMode() const { return m_isStrictMode; } > bool usesEval() const { return m_usesEval; } >@@ -237,8 +223,8 @@ public: > > void shrinkToFit(); > >- void setInstructions(std::unique_ptr<UnlinkedInstructionStream>); >- const UnlinkedInstructionStream& instructions() const; >+ void setInstructions(std::unique_ptr<InstructionStream>); >+ const InstructionStream& instructions() const; > > int numCalleeLocals() const { return m_numCalleeLocals; } > int numVars() const { return m_numVars; } >@@ -276,6 +262,16 @@ public: > UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } > size_t numberOfFunctionExprs() { return m_functionExprs.size(); } > >+ unsigned addMetadataFor(OpcodeID opcodeID) >+ { >+ auto it = m_metadataCount.find(opcodeID); >+ if (it != m_metadataCount.end()) >+ return it->value++; >+ >+ m_metadataCount.add(opcodeID, 1); >+ return 0; >+ } >+ > // Exception handling support > size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } > void addExceptionHandler(const UnlinkedHandlerInfo& handler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(handler); } >@@ -305,13 +301,13 @@ public: > VirtualRegister thisRegister() const { return m_thisRegister; } > VirtualRegister scopeRegister() const { return m_scopeRegister; } > >- void addPropertyAccessInstruction(unsigned propertyAccessInstruction) >+ void addPropertyAccessInstruction(InstructionStream::Offset propertyAccessInstruction) > { > m_propertyAccessInstructions.append(propertyAccessInstruction); > } > > size_t numberOfPropertyAccessInstructions() const { return m_propertyAccessInstructions.size(); } >- const Vector<unsigned>& propertyAccessInstructions() const { return m_propertyAccessInstructions; } >+ const Vector<InstructionStream::Offset>& propertyAccessInstructions() const { return m_propertyAccessInstructions; } > > bool hasRareData() const { return m_rareData.get(); } > >@@ -401,7 +397,7 @@ private: > friend class BytecodeRewriter; > friend class BytecodeGenerator; > >- void applyModification(BytecodeRewriter&, UnpackedInstructions&); >+ void applyModification(BytecodeRewriter&, InstructionStreamWriter&); > > void createRareDataIfNecessary() > { >@@ -414,7 +410,7 @@ private: > void getLineAndColumn(const ExpressionRangeInfo&, unsigned& line, unsigned& column) const; > BytecodeLivenessAnalysis& livenessAnalysisSlow(CodeBlock*); > >- std::unique_ptr<UnlinkedInstructionStream> m_unlinkedInstructions; >+ std::unique_ptr<InstructionStream> m_instructions; > std::unique_ptr<BytecodeLivenessAnalysis> m_liveness; > > VirtualRegister m_thisRegister; >@@ -458,9 +454,9 @@ private: > SourceParseMode m_parseMode; > CodeType m_codeType; > >- Vector<unsigned> m_jumpTargets; >+ Vector<InstructionStream::Offset> m_jumpTargets; > >- Vector<unsigned> m_propertyAccessInstructions; >+ Vector<InstructionStream::Offset> m_propertyAccessInstructions; > > // Constant Pools > Vector<Identifier> m_identifiers; >@@ -473,6 +469,7 @@ private: > FunctionExpressionVector m_functionExprs; > std::array<unsigned, LinkTimeConstantCount> m_linkTimeConstants; > >+ HashMap<unsigned, unsigned> m_metadataCount; > unsigned m_arrayProfileCount { 0 }; > unsigned m_arrayAllocationProfileCount { 0 }; > unsigned m_objectAllocationProfileCount { 0 }; >diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp >deleted file mode 100644 >index 48c816a149b1bf406075f03572eb95200ed7862d..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp >+++ /dev/null >@@ -1,132 +0,0 @@ >-/* >- * Copyright (C) 2014 Apple Inc. All Rights Reserved. >- * >- * Redistribution and use in source and binary forms, with or without >- * modification, are permitted provided that the following conditions >- * are met: >- * 1. Redistributions of source code must retain the above copyright >- * notice, this list of conditions and the following disclaimer. >- * 2. Redistributions in binary form must reproduce the above copyright >- * notice, this list of conditions and the following disclaimer in the >- * documentation and/or other materials provided with the distribution. >- * >- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- */ >- >-#include "config.h" >-#include "UnlinkedInstructionStream.h" >- >-#include "Opcode.h" >- >-namespace JSC { >- >-static void append8(unsigned char*& ptr, unsigned char value) >-{ >- *(ptr++) = value; >-} >- >-static void append32(unsigned char*& ptr, unsigned value) >-{ >- if (!(value & 0xffffffe0)) { >- *(ptr++) = value; >- return; >- } >- >- if ((value & 0xffffffe0) == 0xffffffe0) { >- *(ptr++) = (Negative5Bit << 5) | (value & 0x1f); >- return; >- } >- >- if ((value & 0xffffffe0) == 0x40000000) { >- *(ptr++) = (ConstantRegister5Bit << 5) | (value & 0x1f); >- return; >- } >- >- if (!(value & 0xffffe000)) { >- *(ptr++) = (Positive13Bit << 5) | ((value >> 8) & 0x1f); >- *(ptr++) = value & 0xff; >- return; >- } >- >- if ((value & 0xffffe000) == 0xffffe000) { >- *(ptr++) = (Negative13Bit << 5) | ((value >> 8) & 0x1f); >- *(ptr++) = value & 0xff; >- return; >- } >- >- if ((value & 0xffffe000) == 0x40000000) { >- *(ptr++) = (ConstantRegister13Bit << 5) | ((value >> 8) & 0x1f); >- *(ptr++) = value & 0xff; >- return; >- } >- >- *(ptr++) = Full32Bit << 5; >- *(ptr++) = value & 0xff; >- *(ptr++) = (value >> 8) & 0xff; >- *(ptr++) = (value >> 16) & 0xff; >- *(ptr++) = (value >> 24) & 0xff; >-} >- >-UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions) >- : m_instructionCount(instructions.size()) >-{ >- Vector<unsigned char> buffer; >- >- // Reserve enough space up front so we never have to reallocate when appending. >- buffer.resizeToFit(m_instructionCount * 5); >- unsigned char* ptr = buffer.data(); >- >- const UnlinkedInstruction* instructionsData = instructions.data(); >- for (unsigned i = 0; i < m_instructionCount;) { >- const UnlinkedInstruction* pc = &instructionsData[i]; >- OpcodeID opcode = pc[0].u.opcode; >- append8(ptr, opcode); >- >- unsigned opLength = opcodeLength(opcode); >- >- for (unsigned j = 1; j < opLength; ++j) >- append32(ptr, pc[j].u.unsignedValue); >- >- i += opLength; >- } >- >- buffer.shrink(ptr - buffer.data()); >- m_data = RefCountedArray<unsigned char>(buffer); >-} >- >-size_t UnlinkedInstructionStream::sizeInBytes() const >-{ >- return m_data.size() * sizeof(unsigned char); >-} >- >-#ifndef NDEBUG >-const RefCountedArray<UnlinkedInstruction>& UnlinkedInstructionStream::unpackForDebugging() const >-{ >- if (!m_unpackedInstructionsForDebugging.size()) { >- m_unpackedInstructionsForDebugging = RefCountedArray<UnlinkedInstruction>(m_instructionCount); >- >- Reader instructionReader(*this); >- for (unsigned i = 0; !instructionReader.atEnd(); ) { >- const UnlinkedInstruction* pc = instructionReader.next(); >- unsigned opLength = opcodeLength(pc[0].u.opcode); >- for (unsigned j = 0; j < opLength; ++j) >- m_unpackedInstructionsForDebugging[i++] = pc[j]; >- } >- } >- >- return m_unpackedInstructionsForDebugging; >-} >-#endif >- >-} >- >diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h >deleted file mode 100644 >index 8c0bf5742dbfdd52bc6ea822c8b77efa5021886f..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h >+++ /dev/null >@@ -1,149 +0,0 @@ >-/* >- * Copyright (C) 2014 Apple Inc. All Rights Reserved. >- * >- * Redistribution and use in source and binary forms, with or without >- * modification, are permitted provided that the following conditions >- * are met: >- * 1. Redistributions of source code must retain the above copyright >- * notice, this list of conditions and the following disclaimer. >- * 2. Redistributions in binary form must reproduce the above copyright >- * notice, this list of conditions and the following disclaimer in the >- * documentation and/or other materials provided with the distribution. >- * >- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- */ >- >- >-#pragma once >- >-#include "Opcode.h" >-#include "UnlinkedCodeBlock.h" >-#include <wtf/RefCountedArray.h> >- >-namespace JSC { >- >-class UnlinkedInstructionStream { >- WTF_MAKE_FAST_ALLOCATED; >-public: >- explicit UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>&); >- >- unsigned count() const { return m_instructionCount; } >- size_t sizeInBytes() const; >- >- class Reader { >- public: >- explicit Reader(const UnlinkedInstructionStream&); >- >- const UnlinkedInstruction* next(); >- bool atEnd() const { return m_index == m_stream.m_data.size(); } >- >- private: >- unsigned char read8(); >- unsigned read32(); >- >- const UnlinkedInstructionStream& m_stream; >- UnlinkedInstruction m_unpackedBuffer[16]; >- unsigned m_index; >- }; >- >-#ifndef NDEBUG >- const RefCountedArray<UnlinkedInstruction>& unpackForDebugging() const; >-#endif >- >-private: >- friend class Reader; >- >-#ifndef NDEBUG >- mutable RefCountedArray<UnlinkedInstruction> m_unpackedInstructionsForDebugging; >-#endif >- >- RefCountedArray<unsigned char> m_data; >- unsigned m_instructionCount; >-}; >- >-// Unlinked instructions are packed in a simple stream format. >-// >-// The first byte is always the opcode. >-// It's followed by an opcode-dependent number of argument values. >-// The first 3 bits of each value determines the format: >-// >-// 5-bit positive integer (1 byte total) >-// 5-bit negative integer (1 byte total) >-// 13-bit positive integer (2 bytes total) >-// 13-bit negative integer (2 bytes total) >-// 5-bit constant register index, based at 0x40000000 (1 byte total) >-// 13-bit constant register index, based at 0x40000000 (2 bytes total) >-// 32-bit raw value (5 bytes total) >- >-enum PackedValueType { >- Positive5Bit = 0, >- Negative5Bit, >- Positive13Bit, >- Negative13Bit, >- ConstantRegister5Bit, >- ConstantRegister13Bit, >- Full32Bit >-}; >- >-ALWAYS_INLINE UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream) >- : m_stream(stream) >- , m_index(0) >-{ >-} >- >-ALWAYS_INLINE unsigned char UnlinkedInstructionStream::Reader::read8() >-{ >- return m_stream.m_data.data()[m_index++]; >-} >- >-ALWAYS_INLINE unsigned UnlinkedInstructionStream::Reader::read32() >-{ >- const unsigned char* data = &m_stream.m_data.data()[m_index]; >- unsigned char type = data[0] >> 5; >- >- switch (type) { >- case Positive5Bit: >- m_index++; >- return data[0]; >- case Negative5Bit: >- m_index++; >- return 0xffffffe0 | data[0]; >- case Positive13Bit: >- m_index += 2; >- return ((data[0] & 0x1F) << 8) | data[1]; >- case Negative13Bit: >- m_index += 2; >- return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1]; >- case ConstantRegister5Bit: >- m_index++; >- return 0x40000000 | (data[0] & 0x1F); >- case ConstantRegister13Bit: >- m_index += 2; >- return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1]; >- default: >- ASSERT(type == Full32Bit); >- m_index += 5; >- return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24; >- } >-} >- >-ALWAYS_INLINE const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next() >-{ >- m_unpackedBuffer[0].u.opcode = static_cast<OpcodeID>(read8()); >- unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode); >- for (unsigned i = 1; i < opLength; ++i) >- m_unpackedBuffer[i].u.unsignedValue = read32(); >- return m_unpackedBuffer; >-} >- >-} // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.h b/Source/JavaScriptCore/bytecode/VirtualRegister.h >index f32e8d24f6d4dc1366f2f1efb7796401b0f68bb9..16aa68358960158c05a65a1d41e6933f275537a9 100644 >--- a/Source/JavaScriptCore/bytecode/VirtualRegister.h >+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.h >@@ -42,11 +42,15 @@ inline bool operandIsArgument(int operand) > } > > >+class RegisterID; >+ > class VirtualRegister { > public: > friend VirtualRegister virtualRegisterForLocal(int); > friend VirtualRegister virtualRegisterForArgument(int, int); > >+ VirtualRegister(RegisterID*); >+ > VirtualRegister() > : m_virtualRegister(s_invalidVirtualRegister) > { } >diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >index 00afc9f96c2a95c17735f1634cbe70576cac3d17..a26d84d55919438a74055856c5ba35fac019fbcc 100644 >--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >@@ -36,6 +36,7 @@ > #include "BuiltinNames.h" > #include "BytecodeGeneratorification.h" > #include "BytecodeLivenessAnalysis.h" >+#include "BytecodeStructs.h" > #include "CatchScope.h" > #include "DefinePropertyAttributes.h" > #include "Interpreter.h" >@@ -56,7 +57,6 @@ > #include "UnlinkedCodeBlock.h" > #include "UnlinkedEvalCodeBlock.h" > #include "UnlinkedFunctionCodeBlock.h" >-#include "UnlinkedInstructionStream.h" > #include "UnlinkedModuleProgramCodeBlock.h" > #include "UnlinkedProgramCodeBlock.h" > #include <wtf/BitVector.h> >@@ -67,6 +67,21 @@ > > namespace JSC { > >+template<typename CallOp, typename = std::true_type> >+struct VarArgsOp; >+ >+template<typename CallOp> >+struct VarArgsOp<CallOp, std::enable_if_t<std::is_same<CallOp, OpTailCall>::value, std::true_type>> { >+ using type = OpTailCallVarargs; >+}; >+ >+ >+template<typename CallOp> >+struct VarArgsOp<CallOp, std::enable_if_t<!std::is_same<CallOp, OpTailCall>::value, std::true_type>> { >+ using type = OpCallVarargs; >+}; >+ >+ > template<typename T> > static inline void shrinkToFit(T& segmentedVector) > { >@@ -78,9 +93,45 @@ void Label::setLocation(BytecodeGenerator& generator, unsigned location) > { > m_location = location; > >- unsigned size = m_unresolvedJumps.size(); >- for (unsigned i = 0; i < size; ++i) >- generator.instructions()[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first; >+ for (auto offset : m_unresolvedJumps) { >+ auto instruction = generator.m_writer.ref(offset); >+ int target = m_location - offset; >+ >+#define CASE(__op) \ >+ case __op::opcodeID(): \ >+ instruction->cast<__op>()->setTarget(target); \ >+ return; >+ >+ switch (instruction->opcodeID()) { >+ CASE(OpJmp) >+ CASE(OpJtrue) >+ CASE(OpJfalse) >+ CASE(OpJeqNull) >+ CASE(OpJneqNull) >+ CASE(OpJeq) >+ CASE(OpJstricteq) >+ CASE(OpJneq) >+ CASE(OpJnstricteq) >+ CASE(OpJless) >+ CASE(OpJlesseq) >+ CASE(OpJgreater) >+ CASE(OpJgreatereq) >+ CASE(OpJnless) >+ CASE(OpJnlesseq) >+ CASE(OpJngreater) >+ CASE(OpJngreatereq) >+ CASE(OpJbelow) >+ CASE(OpJbeloweq) >+ default: >+ ASSERT_NOT_REACHED(); >+ } >+#undef CASE >+ } >+} >+ >+int Label::bind(BytecodeGenerator* generator) >+{ >+ return bind(generator->instructions().size()); > } > > void Variable::dump(PrintStream& out) const >@@ -159,10 +210,7 @@ ParserError BytecodeGenerator::generate() > > for (auto& tuple : m_catchesToEmit) { > Ref<Label> realCatchTarget = newEmittedLabel(); >- emitOpcode(op_catch); >- instructions().append(std::get<1>(tuple)); >- instructions().append(std::get<2>(tuple)); >- instructions().append(0); >+ OpCatch::emit(this, std::get<1>(tuple), std::get<2>(tuple)); > > TryData* tryData = std::get<0>(tuple); > emitJump(tryData->target.get()); >@@ -207,10 +255,10 @@ ParserError BytecodeGenerator::generate() > > > if (isGeneratorOrAsyncFunctionBodyParseMode(m_codeBlock->parseMode())) >- performGeneratorification(m_codeBlock.get(), m_instructions, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex); >+ performGeneratorification(*this, m_codeBlock.get(), m_writer, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex); > > RELEASE_ASSERT(static_cast<unsigned>(m_codeBlock->numCalleeLocals()) < static_cast<unsigned>(FirstConstantRegisterIndex)); >- m_codeBlock->setInstructions(std::make_unique<UnlinkedInstructionStream>(m_instructions)); >+ m_codeBlock->setInstructions(m_writer.finalize()); > > m_codeBlock->shrinkToFit(); > >@@ -448,20 +496,12 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke > entry.disableWatching(*m_vm); > functionSymbolTable->set(NoLockingNecessary, name, entry); > } >- emitOpcode(op_put_to_scope); >- instructions().append(m_lexicalEnvironmentRegister->index()); >- instructions().append(UINT_MAX); >- instructions().append(virtualRegisterForArgument(1 + i).offset()); >- instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand()); >- instructions().append(symbolTableConstantIndex); >- instructions().append(offset.offset()); >+ OpPutToScope::emit(this, m_lexicalEnvironmentRegister, UINT_MAX, virtualRegisterForArgument(1 + i), GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), symbolTableConstantIndex); > } > > // This creates a scoped arguments object and copies the overflow arguments into the > // scope. It's the equivalent of calling ScopedArguments::createByCopying(). >- emitOpcode(op_create_scoped_arguments); >- instructions().append(m_argumentsRegister->index()); >- instructions().append(m_lexicalEnvironmentRegister->index()); >+ OpCreateScopedArguments::emit(this, m_argumentsRegister, m_lexicalEnvironmentRegister); > } else { > // We're going to put all parameters into the DirectArguments object. First ensure > // that the symbol table knows that this is happening. >@@ -470,8 +510,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke > functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(DirectArgumentsOffset(i)))); > } > >- emitOpcode(op_create_direct_arguments); >- instructions().append(m_argumentsRegister->index()); >+ OpCreateDirectArguments::emit(this, m_argumentsRegister); > } > } else if (isSimpleParameterList) { > // Create the formal parameters the normal way. Any of them could be captured, or not. If >@@ -495,20 +534,13 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke > static_cast<const BindingNode*>(parameters.at(i).first)->boundProperty(); > functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(offset))); > >- emitOpcode(op_put_to_scope); >- instructions().append(m_lexicalEnvironmentRegister->index()); >- instructions().append(addConstant(ident)); >- instructions().append(virtualRegisterForArgument(1 + i).offset()); >- instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand()); >- instructions().append(symbolTableConstantIndex); >- instructions().append(offset.offset()); >+ OpPutToScope::emit(this, m_lexicalEnvironmentRegister, addConstant(ident), virtualRegisterForArgument(1 + i), GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), symbolTableConstantIndex); > } > } > > if (needsArguments && (codeBlock->isStrictMode() || !isSimpleParameterList)) { > // Allocate a cloned arguments object. >- emitOpcode(op_create_cloned_arguments); >- instructions().append(m_argumentsRegister->index()); >+ OpCreateClonedArguments::emit(this, m_argumentsRegister); > } > > // There are some variables that need to be preinitialized to something other than Undefined: >@@ -1165,15 +1197,9 @@ void BytecodeGenerator::initializeVarLexicalEnvironment(int symbolTableConstantI > { > if (hasCapturedVariables) { > RELEASE_ASSERT(m_lexicalEnvironmentRegister); >- emitOpcode(op_create_lexical_environment); >- instructions().append(m_lexicalEnvironmentRegister->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(symbolTableConstantIndex); >- instructions().append(addConstantValue(jsUndefined())->index()); >+ OpCreateLexicalEnvironment::emit(this, m_lexicalEnvironmentRegister, scopeRegister(), symbolTableConstantIndex, addConstantValue(jsUndefined())); > >- emitOpcode(op_mov); >- instructions().append(scopeRegister()->index()); >- instructions().append(m_lexicalEnvironmentRegister->index()); >+ OpMov::emit(this, scopeRegister(), m_lexicalEnvironmentRegister); > > pushLocalControlFlowScope(); > } >@@ -1267,17 +1293,6 @@ void BytecodeGenerator::emitLabel(Label& l0) > m_lastOpcodeID = op_end; > } > >-void BytecodeGenerator::emitOpcode(OpcodeID opcodeID) >-{ >-#ifndef NDEBUG >- size_t opcodePosition = instructions().size(); >- ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end); >- m_lastOpcodePosition = opcodePosition; >-#endif >- instructions().append(opcodeID); >- m_lastOpcodeID = opcodeID; >-} >- > UnlinkedArrayProfile BytecodeGenerator::newArrayProfile() > { > return m_codeBlock->addArrayProfile(); >@@ -1293,18 +1308,9 @@ UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile() > return m_codeBlock->addObjectAllocationProfile(); > } > >-UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID) >-{ >- emitOpcode(opcodeID); >- if (!m_vm->canUseJIT()) >- return static_cast<UnlinkedValueProfile>(-1); >- UnlinkedValueProfile result = m_codeBlock->addValueProfile(); >- return result; >-} >- > void BytecodeGenerator::emitEnter() > { >- emitOpcode(op_enter); >+ OpEnter::emit(this); > > if (LIKELY(Options::optimizeRecursiveTailCalls())) { > // We must add the end of op_enter as a potential jump target, because the bytecode parser may decide to split its basic block >@@ -1317,272 +1323,149 @@ void BytecodeGenerator::emitEnter() > > void BytecodeGenerator::emitLoopHint() > { >- emitOpcode(op_loop_hint); >+ OpLoopHint::emit(this); > emitCheckTraps(); > } > > void BytecodeGenerator::emitCheckTraps() > { >- emitOpcode(op_check_traps); >+ OpCheckTraps::emit(this); > } > >-void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index) >+void ALWAYS_INLINE BytecodeGenerator::rewind() > { >- ASSERT(instructions().size() >= 4); >- size_t size = instructions().size(); >- dstIndex = instructions().at(size - 3).u.operand; >- src1Index = instructions().at(size - 2).u.operand; >- src2Index = instructions().at(size - 1).u.operand; >+ ASSERT(m_lastInstruction.isValid()); >+ m_lastOpcodeID = m_lastInstruction->opcodeID(); >+ m_writer.rewind(m_lastInstruction); > } > >-void BytecodeGenerator::retrieveLastUnaryOp(int& dstIndex, int& srcIndex) >+template<typename BinOp, typename JmpOp> >+bool BytecodeGenerator::fuseCompareAndJump(RegisterID* cond, Label& target, bool swapOperands) > { >- ASSERT(instructions().size() >= 3); >- size_t size = instructions().size(); >- dstIndex = instructions().at(size - 2).u.operand; >- srcIndex = instructions().at(size - 1).u.operand; >-} >+ auto binop = m_lastInstruction->as<BinOp>(); >+ if (cond->index() == binop.dst.offset() && cond->isTemporary() && !cond->refCount()) { >+ rewind(); > >-void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp() >-{ >- ASSERT(instructions().size() >= 4); >- instructions().shrink(instructions().size() - 4); >- m_lastOpcodeID = op_end; >-} >+ if (swapOperands) >+ std::swap(binop.lhs, binop.rhs); > >-void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp() >-{ >- ASSERT(instructions().size() >= 3); >- instructions().shrink(instructions().size() - 3); >- m_lastOpcodeID = op_end; >+ JmpOp::emit(this, binop.lhs, binop.rhs, target.bind(this)); >+ return true; >+ } >+ return false; > } > >-void BytecodeGenerator::emitJump(Label& target) >+template<typename UnaryOp, typename JmpOp> >+bool BytecodeGenerator::fuseTestAndJmp(RegisterID* cond, Label& target) > { >- size_t begin = instructions().size(); >- emitOpcode(op_jmp); >- instructions().append(target.bind(begin, instructions().size())); >+ auto unop = m_lastInstruction->as<UnaryOp>(); >+ if (cond->index() == unop.dst.offset() && cond->isTemporary() && !cond->refCount()) { >+ rewind(); >+ >+ JmpOp::emit(this, unop.operand, target.bind(this)); >+ return true; >+ } >+ return false; > } > > void BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label& target) > { >- auto fuseCompareAndJump = [&] (OpcodeID jumpID) { >- int dstIndex; >- int src1Index; >- int src2Index; >- >- retrieveLastBinaryOp(dstIndex, src1Index, src2Index); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindBinaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(jumpID); >- instructions().append(src1Index); >- instructions().append(src2Index); >- instructions().append(target.bind(begin, instructions().size())); >- return true; >- } >- return false; >- }; > > if (m_lastOpcodeID == op_less) { >- if (fuseCompareAndJump(op_jless)) >+ if (fuseCompareAndJump<OpLess, OpJless>(cond, target)) > return; > } else if (m_lastOpcodeID == op_lesseq) { >- if (fuseCompareAndJump(op_jlesseq)) >+ if (fuseCompareAndJump<OpLesseq, OpJlesseq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_greater) { >- if (fuseCompareAndJump(op_jgreater)) >+ if (fuseCompareAndJump<OpGreater, OpJgreater>(cond, target)) > return; > } else if (m_lastOpcodeID == op_greatereq) { >- if (fuseCompareAndJump(op_jgreatereq)) >+ if(fuseCompareAndJump<OpGreatereq, OpJgreatereq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_eq) { >- if (fuseCompareAndJump(op_jeq)) >+ if(fuseCompareAndJump<OpEq, OpJeq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_stricteq) { >- if (fuseCompareAndJump(op_jstricteq)) >+ if (fuseCompareAndJump<OpStricteq, OpJstricteq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_neq) { >- if (fuseCompareAndJump(op_jneq)) >+ if (fuseCompareAndJump<OpNeq, OpJneq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_nstricteq) { >- if (fuseCompareAndJump(op_jnstricteq)) >+ if (fuseCompareAndJump<OpNstricteq, OpJnstricteq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_below) { >- if (fuseCompareAndJump(op_jbelow)) >+ if (fuseCompareAndJump<OpBelow, OpJbelow>(cond, target)) > return; > } else if (m_lastOpcodeID == op_beloweq) { >- if (fuseCompareAndJump(op_jbeloweq)) >+ if (fuseCompareAndJump<OpBeloweq, OpJbeloweq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_eq_null && target.isForward()) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jeq_null); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpEqNull, OpJeqNull>(cond, target)) > return; >- } > } else if (m_lastOpcodeID == op_neq_null && target.isForward()) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jneq_null); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpNeqNull, OpJneqNull>(cond, target)) > return; >- } > } > >- size_t begin = instructions().size(); >- >- emitOpcode(op_jtrue); >- instructions().append(cond->index()); >- instructions().append(target.bind(begin, instructions().size())); >+ OpJtrue::emit(this, cond, target.bind(this)); > } > > void BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label& target) > { >- auto fuseCompareAndJump = [&] (OpcodeID jumpID, bool replaceOperands) { >- int dstIndex; >- int src1Index; >- int src2Index; >- >- retrieveLastBinaryOp(dstIndex, src1Index, src2Index); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindBinaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(jumpID); >- // Since op_below and op_beloweq only accepts Int32, replacing operands is not observable to users. >- if (replaceOperands) >- std::swap(src1Index, src2Index); >- instructions().append(src1Index); >- instructions().append(src2Index); >- instructions().append(target.bind(begin, instructions().size())); >- return true; >- } >- return false; >- }; >- > if (m_lastOpcodeID == op_less && target.isForward()) { >- if (fuseCompareAndJump(op_jnless, false)) >+ if (fuseCompareAndJump<OpLess, OpJnless>(cond, target)) > return; > } else if (m_lastOpcodeID == op_lesseq && target.isForward()) { >- if (fuseCompareAndJump(op_jnlesseq, false)) >+ if (fuseCompareAndJump<OpLesseq, OpJnlesseq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_greater && target.isForward()) { >- if (fuseCompareAndJump(op_jngreater, false)) >+ if (fuseCompareAndJump<OpGreater, OpJngreater>(cond, target)) > return; > } else if (m_lastOpcodeID == op_greatereq && target.isForward()) { >- if (fuseCompareAndJump(op_jngreatereq, false)) >+ if (fuseCompareAndJump<OpGreatereq, OpJngreatereq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_eq && target.isForward()) { >- if (fuseCompareAndJump(op_jneq, false)) >+ if (fuseCompareAndJump<OpEq, OpJneq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_stricteq && target.isForward()) { >- if (fuseCompareAndJump(op_jnstricteq, false)) >+ if (fuseCompareAndJump<OpStricteq, OpJnstricteq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_neq && target.isForward()) { >- if (fuseCompareAndJump(op_jeq, false)) >+ if (fuseCompareAndJump<OpNeq, OpJeq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_nstricteq && target.isForward()) { >- if (fuseCompareAndJump(op_jstricteq, false)) >+ if (fuseCompareAndJump<OpNstricteq, OpJstricteq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_below && target.isForward()) { >- if (fuseCompareAndJump(op_jbeloweq, true)) >+ if (fuseCompareAndJump<OpBelow, OpJbeloweq>(cond, target, true)) > return; > } else if (m_lastOpcodeID == op_beloweq && target.isForward()) { >- if (fuseCompareAndJump(op_jbelow, true)) >+ if (fuseCompareAndJump<OpBeloweq, OpJbelow>(cond, target, true)) > return; > } else if (m_lastOpcodeID == op_not) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jtrue); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpNot, OpJtrue>(cond, target)) > return; >- } > } else if (m_lastOpcodeID == op_eq_null && target.isForward()) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jneq_null); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpEqNull, OpJneqNull>(cond, target)) > return; >- } > } else if (m_lastOpcodeID == op_neq_null && target.isForward()) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jeq_null); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpNeqNull, OpJeqNull>(cond, target)) > return; >- } > } > >- size_t begin = instructions().size(); >- emitOpcode(op_jfalse); >- instructions().append(cond->index()); >- instructions().append(target.bind(begin, instructions().size())); >+ OpJfalse::emit(this, cond, target.bind(this)); > } > > void BytecodeGenerator::emitJumpIfNotFunctionCall(RegisterID* cond, Label& target) > { >- size_t begin = instructions().size(); >- >- emitOpcode(op_jneq_ptr); >- instructions().append(cond->index()); >- instructions().append(Special::CallFunction); >- instructions().append(target.bind(begin, instructions().size())); >- instructions().append(0); >+ OpJneqPtr::emit(this, cond, Special::CallFunction, target.bind(this)); > } > > void BytecodeGenerator::emitJumpIfNotFunctionApply(RegisterID* cond, Label& target) > { >- size_t begin = instructions().size(); >- >- emitOpcode(op_jneq_ptr); >- instructions().append(cond->index()); >- instructions().append(Special::ApplyFunction); >- instructions().append(target.bind(begin, instructions().size())); >- instructions().append(0); >+ OpJneqPtr::emit(this, cond, Special::ApplyFunction, target.bind(this)); > } > > bool BytecodeGenerator::hasConstant(const Identifier& ident) const >@@ -1644,9 +1527,7 @@ RegisterID* BytecodeGenerator::moveLinkTimeConstant(RegisterID* dst, LinkTimeCon > if (!dst) > return m_linkTimeConstantRegisters[constantIndex]; > >- emitOpcode(op_mov); >- instructions().append(dst->index()); >- instructions().append(m_linkTimeConstantRegisters[constantIndex]->index()); >+ OpMov::emit(this, dst, m_linkTimeConstantRegisters[constantIndex]); > > return dst; > } >@@ -1655,9 +1536,8 @@ RegisterID* BytecodeGenerator::moveEmptyValue(RegisterID* dst) > { > RefPtr<RegisterID> emptyValue = addConstantEmptyValue(); > >- emitOpcode(op_mov); >- instructions().append(dst->index()); >- instructions().append(emptyValue->index()); >+ OpMov::emit(this, dst, emptyValue.get()); >+ > return dst; > } > >@@ -1665,163 +1545,169 @@ RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src) > { > ASSERT(src != m_emptyValueRegister); > >- m_staticPropertyAnalyzer.mov(dst->index(), src->index()); >- emitOpcode(op_mov); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ m_staticPropertyAnalyzer.mov(dst, src); >+ OpMov::emit(this, dst, src); > > return dst; > } > >-RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src) >+RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src, OperandTypes types) > { >- ASSERT_WITH_MESSAGE(op_to_number != opcodeID, "op_to_number has a Value Profile."); >- ASSERT_WITH_MESSAGE(op_negate != opcodeID, "op_negate has an Arith Profile."); >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- >+ switch (opcodeID) { >+ case op_not: >+ emitUnaryOp<OpNot>(dst, src); >+ break; >+ case op_negate: >+ OpNegate::emit(this, dst, src, types); >+ break; >+ case op_to_number: >+ emitUnaryOp<OpToNumber>(dst, src); >+ break; >+ default: >+ ASSERT_NOT_REACHED(); >+ } > return dst; > } > >-RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src, OperandTypes types) >+RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) > { >- ASSERT_WITH_MESSAGE(op_to_number != opcodeID, "op_to_number has a Value Profile."); >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ switch (opcodeID) { >+ case op_eq: >+ return emitBinaryOp<OpEq>(dst, src1, src2, types); >+ case op_neq: >+ return emitBinaryOp<OpNeq>(dst, src1, src2, types); >+ case op_stricteq: >+ return emitBinaryOp<OpStricteq>(dst, src1, src2, types); >+ case op_nstricteq: >+ return emitBinaryOp<OpNstricteq>(dst, src1, src2, types); >+ case op_less: >+ return emitBinaryOp<OpLess>(dst, src1, src2, types); >+ case op_lesseq: >+ return emitBinaryOp<OpLesseq>(dst, src1, src2, types); >+ case op_greater: >+ return emitBinaryOp<OpGreater>(dst, src1, src2, types); >+ case op_greatereq: >+ return emitBinaryOp<OpGreatereq>(dst, src1, src2, types); >+ case op_below: >+ return emitBinaryOp<OpBelow>(dst, src1, src2, types); >+ case op_beloweq: >+ return emitBinaryOp<OpBeloweq>(dst, src1, src2, types); >+ case op_mod: >+ return emitBinaryOp<OpMod>(dst, src1, src2, types); >+ case op_pow: >+ return emitBinaryOp<OpPow>(dst, src1, src2, types); >+ case op_lshift: >+ return emitBinaryOp<OpLshift>(dst, src1, src2, types); >+ case op_rshift: >+ return emitBinaryOp<OpRshift>(dst, src1, src2, types); >+ case op_urshift: >+ return emitBinaryOp<OpUrshift>(dst, src1, src2, types); >+ case op_add: >+ return emitBinaryOp<OpAdd>(dst, src1, src2, types); >+ case op_mul: >+ return emitBinaryOp<OpMul>(dst, src1, src2, types); >+ case op_div: >+ return emitBinaryOp<OpDiv>(dst, src1, src2, types); >+ case op_sub: >+ return emitBinaryOp<OpSub>(dst, src1, src2, types); >+ case op_bitand: >+ return emitBinaryOp<OpBitand>(dst, src1, src2, types); >+ case op_bitxor: >+ return emitBinaryOp<OpBitxor>(dst, src1, src2, types); >+ case op_bitor: >+ return emitBinaryOp<OpBitor>(dst, src1, src2, types); >+ default: >+ ASSERT_NOT_REACHED(); >+ } >+} > >- if (opcodeID == op_negate) >- instructions().append(ArithProfile(types.first()).bits()); >+RegisterID* BytecodeGenerator::emitToObject(RegisterID* dst, RegisterID* src, const Identifier& message) >+{ >+ OpToObject::emit(this, dst, src, addConstant(message)); > return dst; > } > >-RegisterID* BytecodeGenerator::emitUnaryOpProfiled(OpcodeID opcodeID, RegisterID* dst, RegisterID* src) >+RegisterID* BytecodeGenerator::emitToNumber(RegisterID* dst, RegisterID* src) > { >- UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- instructions().append(profile); >- return dst; >+ return emitUnaryOp<OpToNumber>(dst, src); > } > >-RegisterID* BytecodeGenerator::emitToObject(RegisterID* dst, RegisterID* src, const Identifier& message) >+RegisterID* BytecodeGenerator::emitToString(RegisterID* dst, RegisterID* src) > { >- UnlinkedValueProfile profile = emitProfiledOpcode(op_to_object); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- instructions().append(addConstant(message)); >- instructions().append(profile); >- return dst; >+ return emitUnaryOp<OpToString>(dst, src); > } > >-RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst) >+RegisterID* BytecodeGenerator::emitTypeOf(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_inc); >- instructions().append(srcDst->index()); >- return srcDst; >+ return emitUnaryOp<OpTypeof>(dst, src); > } > >-RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst) >+RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst) > { >- emitOpcode(op_dec); >- instructions().append(srcDst->index()); >+ OpInc::emit(this, srcDst); > return srcDst; > } > >-RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) >+RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst) > { >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src1->index()); >- instructions().append(src2->index()); >- >- if (opcodeID == op_bitor || opcodeID == op_bitand || opcodeID == op_bitxor || >- opcodeID == op_add || opcodeID == op_mul || opcodeID == op_sub || opcodeID == op_div) >- instructions().append(ArithProfile(types.first(), types.second()).bits()); >- >- return dst; >+ OpDec::emit(this, srcDst); >+ return srcDst; > } > >-RegisterID* BytecodeGenerator::emitEqualityOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2) >+template<typename EqOp> >+RegisterID* BytecodeGenerator::emitEqualityOp(RegisterID* dst, RegisterID* src1, RegisterID* src2) > { >- if (m_lastOpcodeID == op_typeof) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (src1->index() == dstIndex >+ if (m_lastInstruction->is<OpTypeof>()) { >+ auto op = m_lastInstruction->as<OpTypeof>(); >+ if (src1->index() == op.dst.offset() > && src1->isTemporary() > && m_codeBlock->isConstantRegisterIndex(src2->index()) > && m_codeBlock->constantRegister(src2->index()).get().isString()) { > const String& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue(); > if (value == "undefined") { >- rewindUnaryOp(); >- emitOpcode(op_is_undefined); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsUndefined::emit(this, dst, op.value); > return dst; > } > if (value == "boolean") { >- rewindUnaryOp(); >- emitOpcode(op_is_boolean); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsBoolean::emit(this, dst, op.value); > return dst; > } > if (value == "number") { >- rewindUnaryOp(); >- emitOpcode(op_is_number); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsNumber::emit(this, dst, op.value); > return dst; > } > if (value == "string") { >- rewindUnaryOp(); >- emitOpcode(op_is_cell_with_type); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >- instructions().append(StringType); >+ rewind(); >+ OpIsCellWithType::emit(this, dst, op.value, StringType); > return dst; > } > if (value == "symbol") { >- rewindUnaryOp(); >- emitOpcode(op_is_cell_with_type); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >- instructions().append(SymbolType); >+ rewind(); >+ OpIsCellWithType::emit(this, dst, op.value, SymbolType); > return dst; > } > if (Options::useBigInt() && value == "bigint") { >- rewindUnaryOp(); >- emitOpcode(op_is_cell_with_type); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >- instructions().append(BigIntType); >+ rewind(); >+ OpIsCellWithType::emit(this, dst, op.value, BigIntType); > return dst; > } > if (value == "object") { >- rewindUnaryOp(); >- emitOpcode(op_is_object_or_null); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsObjectOrNull::emit(this, dst, op.value); > return dst; > } > if (value == "function") { >- rewindUnaryOp(); >- emitOpcode(op_is_function); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsFunction::emit(this, dst, op.value); > return dst; > } > } > } > >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src1->index()); >- instructions().append(src2->index()); >+ EqOp::emit(this, dst, src1, src2); > return dst; > } > >@@ -1843,12 +1729,7 @@ void BytecodeGenerator::emitProfileType(RegisterID* registerToProfile, ProfileTy > if (!registerToProfile) > return; > >- emitOpcode(op_profile_type); >- instructions().append(registerToProfile->index()); >- instructions().append(0); >- instructions().append(flag); >- instructions().append(0); >- instructions().append(resolveType()); >+ OpProfileType::emit(this, registerToProfile, 0, flag, {}, resolveType()); > > // Don't emit expression info for this version of profile type. This generally means > // we're profiling information for something that isn't in the actual text of a JavaScript >@@ -1869,13 +1750,7 @@ void BytecodeGenerator::emitProfileType(RegisterID* registerToProfile, ProfileTy > return; > > // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? >- emitOpcode(op_profile_type); >- instructions().append(registerToProfile->index()); >- instructions().append(0); >- instructions().append(flag); >- instructions().append(0); >- instructions().append(resolveType()); >- >+ OpProfileType::emit(this, registerToProfile, 0, flag, {}, resolveType()); > emitTypeProfilerExpressionInfo(startDivot, endDivot); > } > >@@ -1899,12 +1774,7 @@ void BytecodeGenerator::emitProfileType(RegisterID* registerToProfile, const Var > } > > // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? >- emitOpcode(op_profile_type); >- instructions().append(registerToProfile->index()); >- instructions().append(symbolTableOrScopeDepth); >- instructions().append(flag); >- instructions().append(addConstant(var.ident())); >- instructions().append(resolveType()); >+ OpProfileType::emit(this, registerToProfile, symbolTableOrScopeDepth, flag, addConstant(var.ident()), resolveType()); > > emitTypeProfilerExpressionInfo(startDivot, endDivot); > } >@@ -1916,8 +1786,7 @@ void BytecodeGenerator::emitProfileControlFlow(int textOffset) > size_t bytecodeOffset = instructions().size(); > m_codeBlock->addOpProfileControlFlowBytecodeOffset(bytecodeOffset); > >- emitOpcode(op_profile_control_flow); >- instructions().append(textOffset); >+ OpProfileControlFlow::emit(this, textOffset); > } > } > >@@ -2116,11 +1985,7 @@ void BytecodeGenerator::pushLexicalScopeInternal(VariableEnvironment& environmen > if (constantSymbolTableResult) > *constantSymbolTableResult = constantSymbolTable; > >- emitOpcode(op_create_lexical_environment); >- instructions().append(newScope->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(constantSymbolTable->index()); >- instructions().append(addConstantValue(tdzRequirement == TDZRequirement::UnderTDZ ? jsTDZValue() : jsUndefined())->index()); >+ OpCreateLexicalEnvironment::emit(this, newScope, scopeRegister(), symbolTableConstantIndex, addConstantValue(tdzRequirement == TDZRequirement::UnderTDZ ? jsTDZValue() : jsUndefined())); > > move(scopeRegister(), newScope); > >@@ -2251,10 +2116,7 @@ RegisterID* BytecodeGenerator::emitResolveScopeForHoistingFuncDeclInEval(Registe > ASSERT(m_codeType == EvalCode); > > dst = finalDestination(dst); >- emitOpcode(op_resolve_scope_for_hoisting_func_decl_in_eval); >- instructions().append(kill(dst)); >- instructions().append(m_topMostScope->index()); >- instructions().append(addConstant(property)); >+ OpResolveScopeForHoistingFuncDeclInEval::emit(this, kill(dst), m_topMostScope, addConstant(property)); > return dst; > } > >@@ -2352,11 +2214,7 @@ void BytecodeGenerator::prepareLexicalScopeForNextForLoopIteration(VariableEnvir > RefPtr<RegisterID> parentScope = emitGetParentScope(newTemporary(), loopScope); > move(scopeRegister(), parentScope.get()); > >- emitOpcode(op_create_lexical_environment); >- instructions().append(loopScope->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(loopSymbolTable->index()); >- instructions().append(addConstantValue(jsTDZValue())->index()); >+ OpCreateLexicalEnvironment::emit(this, loopScope, scopeRegister(), loopSymbolTable->index(), addConstantValue(jsTDZValue())); > > move(scopeRegister(), loopScope); > >@@ -2481,10 +2339,7 @@ void BytecodeGenerator::createVariable( > > RegisterID* BytecodeGenerator::emitOverridesHasInstance(RegisterID* dst, RegisterID* constructor, RegisterID* hasInstanceValue) > { >- emitOpcode(op_overrides_has_instance); >- instructions().append(dst->index()); >- instructions().append(constructor->index()); >- instructions().append(hasInstanceValue->index()); >+ OpOverridesHasInstance::emit(this, dst, constructor, hasInstanceValue); > return dst; > } > >@@ -2549,13 +2404,7 @@ RegisterID* BytecodeGenerator::emitResolveScope(RegisterID* dst, const Variable& > > // resolve_scope dst, id, ResolveType, depth > dst = tempDestination(dst); >- emitOpcode(op_resolve_scope); >- instructions().append(kill(dst)); >- instructions().append(scopeRegister()->index()); >- instructions().append(addConstant(variable.ident())); >- instructions().append(resolveType()); >- instructions().append(localScopeDepth()); >- instructions().append(0); >+ OpResolveScope::emit(this, kill(dst), scopeRegister(), addConstant(variable.ident()), resolveType(), localScopeDepth()); > return dst; > } > >@@ -2570,11 +2419,7 @@ RegisterID* BytecodeGenerator::emitGetFromScope(RegisterID* dst, RegisterID* sco > return move(dst, variable.local()); > > case VarKind::DirectArgument: { >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_arguments); >- instructions().append(kill(dst)); >- instructions().append(scope->index()); >- instructions().append(variable.offset().capturedArgumentsOffset().offset()); >- instructions().append(profile); >+ OpGetFromArguments::emit(this, kill(dst), scope, variable.offset().capturedArgumentsOffset().offset()); > return dst; > } > >@@ -2583,14 +2428,13 @@ RegisterID* BytecodeGenerator::emitGetFromScope(RegisterID* dst, RegisterID* sco > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > > // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_scope); >- instructions().append(kill(dst)); >- instructions().append(scope->index()); >- instructions().append(addConstant(variable.ident())); >- instructions().append(GetPutInfo(resolveMode, variable.offset().isScope() ? LocalClosureVar : resolveType(), InitializationMode::NotInitialization).operand()); >- instructions().append(localScopeDepth()); >- instructions().append(variable.offset().isScope() ? variable.offset().scopeOffset().offset() : 0); >- instructions().append(profile); >+ OpGetFromScope::emit( >+ this, >+ kill(dst), >+ scope, >+ addConstant(variable.ident()), >+ GetPutInfo(resolveMode, variable.offset().isScope() ? LocalClosureVar : resolveType(), InitializationMode::NotInitialization), >+ localScopeDepth()); > return dst; > } } > >@@ -2605,10 +2449,7 @@ RegisterID* BytecodeGenerator::emitPutToScope(RegisterID* scope, const Variable& > return value; > > case VarKind::DirectArgument: >- emitOpcode(op_put_to_arguments); >- instructions().append(scope->index()); >- instructions().append(variable.offset().capturedArgumentsOffset().offset()); >- instructions().append(value->index()); >+ OpPutToArguments::emit(this, scope, variable.offset().capturedArgumentsOffset().offset(), value); > return value; > > case VarKind::Scope: >@@ -2616,21 +2457,17 @@ RegisterID* BytecodeGenerator::emitPutToScope(RegisterID* scope, const Variable& > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > > // put_to_scope scope, id, value, GetPutInfo, Structure, Operand >- emitOpcode(op_put_to_scope); >- instructions().append(scope->index()); >- instructions().append(addConstant(variable.ident())); >- instructions().append(value->index()); >- ScopeOffset offset; >+ GetPutInfo getPutInfo(0); >+ int scopeDepth; > if (variable.offset().isScope()) { >- offset = variable.offset().scopeOffset(); >- instructions().append(GetPutInfo(resolveMode, LocalClosureVar, initializationMode).operand()); >- instructions().append(variable.symbolTableConstantIndex()); >+ getPutInfo = GetPutInfo(resolveMode, LocalClosureVar, initializationMode); >+ scopeDepth = variable.symbolTableConstantIndex(); > } else { > ASSERT(resolveType() != LocalClosureVar); >- instructions().append(GetPutInfo(resolveMode, resolveType(), initializationMode).operand()); >- instructions().append(localScopeDepth()); >+ getPutInfo = GetPutInfo(resolveMode, resolveType(), initializationMode); >+ scopeDepth = localScopeDepth(); > } >- instructions().append(!!offset ? offset.offset() : 0); >+ OpPutToScope::emit(this, scope, addConstant(variable.ident()), value, getPutInfo, scopeDepth); > return value; > } } > >@@ -2646,40 +2483,25 @@ RegisterID* BytecodeGenerator::initializeVariable(const Variable& variable, Regi > > RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype) > { >- emitOpcode(op_instanceof); >- instructions().append(dst->index()); >- instructions().append(value->index()); >- instructions().append(basePrototype->index()); >+ OpInstanceof::emit(this, dst, value, basePrototype); > return dst; > } > > RegisterID* BytecodeGenerator::emitInstanceOfCustom(RegisterID* dst, RegisterID* value, RegisterID* constructor, RegisterID* hasInstanceValue) > { >- emitOpcode(op_instanceof_custom); >- instructions().append(dst->index()); >- instructions().append(value->index()); >- instructions().append(constructor->index()); >- instructions().append(hasInstanceValue->index()); >+ OpInstanceofCustom::emit(this, dst, value, constructor, hasInstanceValue); > return dst; > } > > RegisterID* BytecodeGenerator::emitInByVal(RegisterID* dst, RegisterID* property, RegisterID* base) > { >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- emitOpcode(op_in_by_val); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(arrayProfile); >+ OpInByVal::emit(this, dst, base, property); > return dst; > } > > RegisterID* BytecodeGenerator::emitInById(RegisterID* dst, RegisterID* base, const Identifier& property) > { >- emitOpcode(op_in_by_id); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >+ OpInById::emit(this, dst, base, addConstant(property)); > return dst; > } > >@@ -2687,11 +2509,7 @@ RegisterID* BytecodeGenerator::emitTryGetById(RegisterID* dst, RegisterID* base, > { > ASSERT_WITH_MESSAGE(!parseIndex(property), "Indexed properties are not supported with tryGetById."); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_try_get_by_id); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >- instructions().append(profile); >+ OpTryGetById::emit(this, kill(dst), base, addConstant(property)); > return dst; > } > >@@ -2701,15 +2519,8 @@ RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, co > > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >- instructions().append(0); >- instructions().append(0); >- instructions().append(0); >- instructions().append(Options::prototypeHitCountForLLIntCaching()); >- instructions().append(profile); >+ OpGetById::emit(this, kill(dst), base, addConstant(property)); >+ // TODO: instructions().append(Options::prototypeHitCountForLLIntCaching()); > return dst; > } > >@@ -2717,12 +2528,7 @@ RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, Re > { > ASSERT_WITH_MESSAGE(!parseIndex(property), "Indexed properties should be handled with get_by_val."); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id_with_this); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(thisVal->index()); >- instructions().append(addConstant(property)); >- instructions().append(profile); >+ OpGetByIdWithThis::emit(this, kill(dst), base, thisVal, addConstant(property)); > return dst; > } > >@@ -2732,13 +2538,7 @@ RegisterID* BytecodeGenerator::emitDirectGetById(RegisterID* dst, RegisterID* ba > > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id_direct); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >- instructions().append(0); >- instructions().append(0); >- instructions().append(profile); >+ OpGetByIdDirect::emit(this, kill(dst), base, addConstant(property)); > return dst; > } > >@@ -2748,19 +2548,11 @@ RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& p > > unsigned propertyIndex = addConstant(property); > >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > >- m_codeBlock->addPropertyAccessInstruction(instructions().size()); >+ // TODO: m_codeBlock->addPropertyAccessInstruction(m_writer.ref()); > >- emitOpcode(op_put_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(value->index()); >- instructions().append(0); // old structure >- instructions().append(0); // offset >- instructions().append(0); // new structure >- instructions().append(0); // structure chain >- instructions().append(static_cast<int>(PutByIdNone)); // is not direct >+ OpPutById::emit(this, base, propertyIndex, value, PutByIdNone); // is not direct > > return value; > } >@@ -2771,11 +2563,7 @@ RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, RegisterID* thisVal > > unsigned propertyIndex = addConstant(property); > >- emitOpcode(op_put_by_id_with_this); >- instructions().append(base->index()); >- instructions().append(thisValue->index()); >- instructions().append(propertyIndex); >- instructions().append(value->index()); >+ OpPutByIdWithThis::emit(this, base, thisValue, propertyIndex, value); > > return value; > } >@@ -2786,76 +2574,48 @@ RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identif > > unsigned propertyIndex = addConstant(property); > >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > >- emitOpcode(op_put_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(value->index()); >- instructions().append(0); // old structure >- instructions().append(0); // offset >- instructions().append(0); // new structure >- instructions().append(0); // structure chain (unused if direct) >- instructions().append(static_cast<int>((putType == PropertyNode::KnownDirect || property != m_vm->propertyNames->underscoreProto) ? PutByIdIsDirect : PutByIdNone)); >+ PutByIdFlags type = (putType == PropertyNode::KnownDirect || property != m_vm->propertyNames->underscoreProto) ? PutByIdIsDirect : PutByIdNone; >+ OpPutById::emit(this, base, propertyIndex, value, type); > return value; > } > > void BytecodeGenerator::emitPutGetterById(RegisterID* base, const Identifier& property, unsigned attributes, RegisterID* getter) > { > unsigned propertyIndex = addConstant(property); >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > >- emitOpcode(op_put_getter_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(attributes); >- instructions().append(getter->index()); >+ OpPutGetterById::emit(this, base, propertyIndex, attributes, getter); > } > > void BytecodeGenerator::emitPutSetterById(RegisterID* base, const Identifier& property, unsigned attributes, RegisterID* setter) > { > unsigned propertyIndex = addConstant(property); >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > >- emitOpcode(op_put_setter_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(attributes); >- instructions().append(setter->index()); >+ OpPutSetterById::emit(this, base, propertyIndex, attributes, setter); > } > > void BytecodeGenerator::emitPutGetterSetter(RegisterID* base, const Identifier& property, unsigned attributes, RegisterID* getter, RegisterID* setter) > { > unsigned propertyIndex = addConstant(property); > >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > >- emitOpcode(op_put_getter_setter_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(attributes); >- instructions().append(getter->index()); >- instructions().append(setter->index()); >+ OpPutGetterSetterById::emit(this, base, propertyIndex, attributes, getter, setter); > } > > void BytecodeGenerator::emitPutGetterByVal(RegisterID* base, RegisterID* property, unsigned attributes, RegisterID* getter) > { >- emitOpcode(op_put_getter_by_val); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(attributes); >- instructions().append(getter->index()); >+ OpPutGetterByVal::emit(this, base, property, attributes, getter); > } > > void BytecodeGenerator::emitPutSetterByVal(RegisterID* base, RegisterID* property, unsigned attributes, RegisterID* setter) > { >- emitOpcode(op_put_setter_by_val); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(attributes); >- instructions().append(setter->index()); >+ OpPutSetterByVal::emit(this, base, property, attributes, setter); > } > > void BytecodeGenerator::emitPutGeneratorFields(RegisterID* nextFunction) >@@ -2896,10 +2656,7 @@ void BytecodeGenerator::emitPutAsyncGeneratorFields(RegisterID* nextFunction) > > RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property) > { >- emitOpcode(op_del_by_id); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >+ OpDelById::emit(this, dst, base, addConstant(property)); > return dst; > } > >@@ -2920,133 +2677,85 @@ RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, R > > ASSERT(context.type() == ForInContext::StructureForInContextType); > StructureForInContext& structureContext = static_cast<StructureForInContext&>(context); >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_direct_pname); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(structureContext.index()->index()); >- instructions().append(structureContext.enumerator()->index()); >- instructions().append(profile); >- >- structureContext.addGetInst(instIndex, property->index(), profile); >+ OpGetDirectPname::emit(this, kill(dst), base, property, structureContext.index(), structureContext.enumerator()); >+ >+ structureContext.addGetInst(instIndex, property->index()); > return dst; > } > >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(arrayProfile); >- instructions().append(profile); >+ OpGetByVal::emit(this, kill(dst), base, property); > return dst; > } > > RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* thisValue, RegisterID* property) > { >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val_with_this); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(thisValue->index()); >- instructions().append(property->index()); >- instructions().append(profile); >+ OpGetByValWithThis::emit(this, kill(dst), base, thisValue, property); > return dst; > } > > RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value) > { >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- emitOpcode(op_put_by_val); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(value->index()); >- instructions().append(arrayProfile); >- >+ OpPutByVal::emit(this, base, property, value); > return value; > } > > RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* thisValue, RegisterID* property, RegisterID* value) > { >- emitOpcode(op_put_by_val_with_this); >- instructions().append(base->index()); >- instructions().append(thisValue->index()); >- instructions().append(property->index()); >- instructions().append(value->index()); >- >+ OpPutByValWithThis::emit(this, base, thisValue, property, value); > return value; > } > > RegisterID* BytecodeGenerator::emitDirectPutByVal(RegisterID* base, RegisterID* property, RegisterID* value) > { >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- emitOpcode(op_put_by_val_direct); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(value->index()); >- instructions().append(arrayProfile); >+ OpPutByValDirect::emit(this, base, property, value); > return value; > } > > RegisterID* BytecodeGenerator::emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property) > { >- emitOpcode(op_del_by_val); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(property->index()); >+ OpDelByVal::emit(this, dst, base, property); > return dst; > } > > void BytecodeGenerator::emitSuperSamplerBegin() > { >- emitOpcode(op_super_sampler_begin); >+ OpSuperSamplerBegin::emit(this); > } > > void BytecodeGenerator::emitSuperSamplerEnd() > { >- emitOpcode(op_super_sampler_end); >+ OpSuperSamplerEnd::emit(this); > } > > RegisterID* BytecodeGenerator::emitIdWithProfile(RegisterID* src, SpeculatedType profile) > { >- emitOpcode(op_identity_with_profile); >- instructions().append(src->index()); >- instructions().append(static_cast<uint32_t>(profile >> 32)); >- instructions().append(static_cast<uint32_t>(profile)); >+ OpIdentityWithProfile::emit(this, src, static_cast<uint32_t>(profile >> 32), static_cast<uint32_t>(profile)); > return src; > } > > void BytecodeGenerator::emitUnreachable() > { >- emitOpcode(op_unreachable); >+ OpUnreachable::emit(this); > } > > RegisterID* BytecodeGenerator::emitGetArgument(RegisterID* dst, int32_t index) > { >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_argument); >- instructions().append(dst->index()); >- instructions().append(index + 1); // Including |this|. >- instructions().append(profile); >+ OpGetArgument::emit(this, dst, index + 1 /* Including |this| */); > return dst; > } > > RegisterID* BytecodeGenerator::emitCreateThis(RegisterID* dst) > { >- size_t begin = instructions().size(); >- m_staticPropertyAnalyzer.createThis(dst->index(), begin + 3); >+ m_staticPropertyAnalyzer.createThis(dst, m_writer.ref()); > > m_codeBlock->addPropertyAccessInstruction(instructions().size()); >- emitOpcode(op_create_this); >- instructions().append(dst->index()); >- instructions().append(dst->index()); >- instructions().append(0); >- instructions().append(0); >+ OpCreateThis::emit(this, dst, dst, 0); > return dst; > } > > void BytecodeGenerator::emitTDZCheck(RegisterID* target) > { >- emitOpcode(op_check_tdz); >- instructions().append(target->index()); >+ OpCheckTdz::emit(this, target); > } > > bool BytecodeGenerator::needsTDZCheck(const Variable& variable) >@@ -3146,13 +2855,9 @@ void BytecodeGenerator::restoreTDZStack(const BytecodeGenerator::PreservedTDZSta > > RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst) > { >- size_t begin = instructions().size(); >- m_staticPropertyAnalyzer.newObject(dst->index(), begin + 2); >+ m_staticPropertyAnalyzer.newObject(dst, m_writer.ref()); > >- emitOpcode(op_new_object); >- instructions().append(dst->index()); >- instructions().append(0); >- instructions().append(newObjectAllocationProfile()); >+ OpNewObject::emit(this, dst, 0); > return dst; > } > >@@ -3195,10 +2900,7 @@ RegisterID* BytecodeGenerator::addTemplateObjectConstant(Ref<TemplateObjectDescr > > RegisterID* BytecodeGenerator::emitNewArrayBuffer(RegisterID* dst, JSImmutableButterfly* array, IndexingType recommendedIndexingType) > { >- emitOpcode(op_new_array_buffer); >- instructions().append(dst->index()); >- instructions().append(addConstantValue(array)->index()); >- instructions().append(newArrayAllocationProfile(recommendedIndexingType)); >+ OpNewArrayBuffer::emit(this, dst, addConstantValue(array), recommendedIndexingType); > return dst; > } > >@@ -3216,11 +2918,7 @@ RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elemen > emitNode(argv.last().get(), n->value()); > } > ASSERT(!length); >- emitOpcode(op_new_array); >- instructions().append(dst->index()); >- instructions().append(argv.size() ? argv[0]->index() : 0); // argv >- instructions().append(argv.size()); // argc >- instructions().append(newArrayAllocationProfile(recommendedIndexingType)); >+ OpNewArray::emit(this, dst, argv.size() ? argv[0].get() : VirtualRegister {}, argv.size(), recommendedIndexingType); > return dst; > } > >@@ -3246,9 +2944,7 @@ RegisterID* BytecodeGenerator::emitNewArrayWithSpread(RegisterID* dst, ElementNo > RefPtr<RegisterID> tmp = newTemporary(); > emitNode(tmp.get(), expression); > >- emitOpcode(op_spread); >- instructions().append(argv[i].get()->index()); >- instructions().append(tmp.get()->index()); >+ OpSpread::emit(this, argv[i].get(), tmp.get()); > } else { > ExpressionNode* expression = node->value(); > emitNode(argv[i].get(), expression); >@@ -3258,30 +2954,19 @@ RegisterID* BytecodeGenerator::emitNewArrayWithSpread(RegisterID* dst, ElementNo > } > > unsigned bitVectorIndex = m_codeBlock->addBitVector(WTFMove(bitVector)); >- emitOpcode(op_new_array_with_spread); >- instructions().append(dst->index()); >- instructions().append(argv[0]->index()); // argv >- instructions().append(argv.size()); // argc >- instructions().append(bitVectorIndex); >- >+ OpNewArrayWithSpread::emit(this, dst, argv[0].get(), argv.size(), bitVectorIndex); > return dst; > } > > RegisterID* BytecodeGenerator::emitNewArrayWithSize(RegisterID* dst, RegisterID* length) > { >- emitOpcode(op_new_array_with_size); >- instructions().append(dst->index()); >- instructions().append(length->index()); >- instructions().append(newArrayAllocationProfile(ArrayWithUndecided)); >- >+ OpNewArrayWithSize::emit(this, dst, length); > return dst; > } > > RegisterID* BytecodeGenerator::emitNewRegExp(RegisterID* dst, RegExp* regExp) > { >- emitOpcode(op_new_regexp); >- instructions().append(dst->index()); >- instructions().append(addConstantValue(regExp)->index()); >+ OpNewRegexp::emit(this, dst, addConstantValue(regExp)); > return dst; > } > >@@ -3289,30 +2974,25 @@ void BytecodeGenerator::emitNewFunctionExpressionCommon(RegisterID* dst, Functio > { > unsigned index = m_codeBlock->addFunctionExpr(makeFunction(function)); > >- OpcodeID opcodeID = op_new_func_exp; > switch (function->parseMode()) { > case SourceParseMode::GeneratorWrapperFunctionMode: > case SourceParseMode::GeneratorWrapperMethodMode: >- opcodeID = op_new_generator_func_exp; >+ OpNewGeneratorFuncExp::emit(this, dst, scopeRegister(), index); > break; > case SourceParseMode::AsyncFunctionMode: > case SourceParseMode::AsyncMethodMode: > case SourceParseMode::AsyncArrowFunctionMode: >- opcodeID = op_new_async_func_exp; >+ OpNewAsyncFuncExp::emit(this, dst, scopeRegister(), index); > break; > case SourceParseMode::AsyncGeneratorWrapperFunctionMode: > case SourceParseMode::AsyncGeneratorWrapperMethodMode: > ASSERT(Options::useAsyncIterator()); >- opcodeID = op_new_async_generator_func_exp; >+ OpNewAsyncGeneratorFuncExp::emit(this, dst, scopeRegister(), index); > break; > default: >+ OpNewFuncExp::emit(this, dst, scopeRegister(), index); > break; > } >- >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(index); > } > > RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* dst, FuncExprNode* func) >@@ -3345,10 +3025,7 @@ RegisterID* BytecodeGenerator::emitNewDefaultConstructor(RegisterID* dst, Constr > > unsigned index = m_codeBlock->addFunctionExpr(executable); > >- emitOpcode(op_new_func_exp); >- instructions().append(dst->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(index); >+ OpNewFuncExp::emit(this, dst, scopeRegister(), index); > return dst; > } > >@@ -3356,17 +3033,14 @@ RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FunctionMetadata > { > unsigned index = m_codeBlock->addFunctionDecl(makeFunction(function)); > if (isGeneratorWrapperParseMode(function->parseMode())) >- emitOpcode(op_new_generator_func); >+ OpNewGeneratorFunc::emit(this, dst, scopeRegister(), index); > else if (function->parseMode() == SourceParseMode::AsyncFunctionMode) >- emitOpcode(op_new_async_func); >+ OpNewAsyncFunc::emit(this, dst, scopeRegister(), index); > else if (isAsyncGeneratorWrapperParseMode(function->parseMode())) { > ASSERT(Options::useAsyncIterator()); >- emitOpcode(op_new_async_generator_func); >+ OpNewAsyncGeneratorFunc::emit(this, dst, scopeRegister(), index); > } else >- emitOpcode(op_new_func); >- instructions().append(dst->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(index); >+ OpNewFunc::emit(this, dst, scopeRegister(), index); > return dst; > } > >@@ -3387,28 +3061,26 @@ void BytecodeGenerator::emitSetFunctionNameIfNeeded(ExpressionNode* valueNode, R > > // FIXME: We should use an op_call to an internal function here instead. > // https://bugs.webkit.org/show_bug.cgi?id=155547 >- emitOpcode(op_set_function_name); >- instructions().append(value->index()); >- instructions().append(name->index()); >+ OpSetFunctionName::emit(this, value, name); > } > > RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); >+ return emitCall<OpCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitCallInTailPosition(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { > if (m_inTailPosition) { > m_codeBlock->setHasTailCalls(); >- return emitCall(op_tail_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); >+ return emitCall<OpTailCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); > } >- return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); >+ return emitCall<OpCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCall(op_call_eval, dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); >+ return emitCall<OpCallEval>(dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); > } > > ExpectedFunction BytecodeGenerator::expectedFunctionForIdentifier(const Identifier& identifier) >@@ -3429,12 +3101,7 @@ ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, > if (callArguments.argumentCountIncludingThis() >= 2) > return NoExpectedFunction; > >- size_t begin = instructions().size(); >- emitOpcode(op_jneq_ptr); >- instructions().append(func->index()); >- instructions().append(Special::ObjectConstructor); >- instructions().append(realCall->bind(begin, instructions().size())); >- instructions().append(0); >+ OpJneqPtr::emit(this, func, Special::ObjectConstructor, realCall->bind(this)); > > if (dst != ignoredResult()) > emitNewObject(dst); >@@ -3450,23 +3117,14 @@ ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, > if (callArguments.argumentCountIncludingThis() > 2) > return NoExpectedFunction; > >- size_t begin = instructions().size(); >- emitOpcode(op_jneq_ptr); >- instructions().append(func->index()); >- instructions().append(Special::ArrayConstructor); >- instructions().append(realCall->bind(begin, instructions().size())); >- instructions().append(0); >+ OpJneqPtr::emit(this, func, Special::ArrayConstructor, realCall->bind(this)); > > if (dst != ignoredResult()) { > if (callArguments.argumentCountIncludingThis() == 2) > emitNewArrayWithSize(dst, callArguments.argumentRegister(0)); > else { > ASSERT(callArguments.argumentCountIncludingThis() == 1); >- emitOpcode(op_new_array); >- instructions().append(dst->index()); >- instructions().append(0); >- instructions().append(0); >- instructions().append(newArrayAllocationProfile(ArrayWithUndecided)); >+ OpNewArray::emit(this, dst, {}, 0, ArrayWithUndecided); > } > } > break; >@@ -3477,16 +3135,16 @@ ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, > return NoExpectedFunction; > } > >- size_t begin = instructions().size(); >- emitOpcode(op_jmp); >- instructions().append(done.bind(begin, instructions().size())); >+ OpJmp::emit(this, done.bind(this)); > emitLabel(realCall.get()); > > return expectedFunction; > } > >-RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) >+template<typename CallOp> >+RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >+ constexpr auto opcodeID = CallOp::opcodeID(); > ASSERT(opcodeID == op_call || opcodeID == op_call_eval || opcodeID == op_tail_call); > ASSERT(func->refCount()); > >@@ -3502,18 +3160,15 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi > if (elements && !elements->next() && elements->value()->isSpreadExpression()) { > ExpressionNode* expression = static_cast<SpreadExpressionNode*>(elements->value())->expression(); > RefPtr<RegisterID> argumentRegister = emitNode(callArguments.argumentRegister(0), expression); >- emitOpcode(op_spread); >- instructions().append(argumentRegister.get()->index()); >- instructions().append(argumentRegister.get()->index()); >+ OpSpread::emit(this, argumentRegister.get(), argumentRegister.get()); > >- RefPtr<RegisterID> thisRegister = move(newTemporary(), callArguments.thisRegister()); >- return emitCallVarargs(opcodeID == op_tail_call ? op_tail_call_varargs : op_call_varargs, dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<typename VarArgsOp<CallOp>::type>(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); > } > } > RefPtr<RegisterID> argumentRegister; > argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0)); > RefPtr<RegisterID> thisRegister = move(newTemporary(), callArguments.thisRegister()); >- return emitCallVarargs(opcodeID == op_tail_call ? op_tail_call_varargs : op_call_varargs, dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<typename VarArgsOp<CallOp>::type>(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); > } > for (; n; n = n->m_next) > emitNode(callArguments.argumentRegister(argument++), n); >@@ -3536,18 +3191,9 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi > emitLogShadowChickenTailIfNecessary(); > > // Emit call. >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID); > ASSERT(dst); > ASSERT(dst != ignoredResult()); >- instructions().append(dst->index()); >- instructions().append(func->index()); >- instructions().append(callArguments.argumentCountIncludingThis()); >- instructions().append(callArguments.stackOffset()); >- instructions().append(m_codeBlock->addLLIntCallLinkInfo()); >- instructions().append(0); >- instructions().append(arrayProfile); >- instructions().append(profile); >+ CallOp::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset()); > > if (expectedFunction != NoExpectedFunction) > emitLabel(done.get()); >@@ -3557,47 +3203,41 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi > > RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCallVarargs(op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<OpCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitCallVarargsInTailPosition(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCallVarargs(m_inTailPosition ? op_tail_call_varargs : op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ if (m_inTailPosition) >+ return emitCallVarargs<OpTailCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<OpCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCallVarargs(op_construct_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<OpConstructVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitCallForwardArgumentsInTailPosition(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { > ASSERT(m_inTailPosition); >- return emitCallVarargs(op_tail_call_forward_arguments, dst, func, thisRegister, nullptr, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<OpTailCallForwardArguments>(dst, func, thisRegister, nullptr, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); > } > >-RegisterID* BytecodeGenerator::emitCallVarargs(OpcodeID opcode, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) >+template<typename VarargsOp> >+RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { > if (m_shouldEmitDebugHooks && debuggableCall == DebuggableCall::Yes) > emitDebugHook(WillExecuteExpression, divotStart); > > emitExpressionInfo(divot, divotStart, divotEnd); > >- if (opcode == op_tail_call_varargs) >+ if (VarargsOp::opcodeID() == op_tail_call_varargs) > emitLogShadowChickenTailIfNecessary(); > > // Emit call. >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- UnlinkedValueProfile profile = emitProfiledOpcode(opcode); > ASSERT(dst != ignoredResult()); >- instructions().append(dst->index()); >- instructions().append(func->index()); >- instructions().append(thisRegister ? thisRegister->index() : 0); >- instructions().append(arguments ? arguments->index() : 0); >- instructions().append(firstFreeRegister->index()); >- instructions().append(firstVarArgOffset); >- instructions().append(arrayProfile); >- instructions().append(profile); >+ VarargsOp::emit(this, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset); > return dst; > } > >@@ -3605,17 +3245,14 @@ void BytecodeGenerator::emitLogShadowChickenPrologueIfNecessary() > { > if (!m_shouldEmitDebugHooks && !Options::alwaysUseShadowChicken()) > return; >- emitOpcode(op_log_shadow_chicken_prologue); >- instructions().append(scopeRegister()->index()); >+ OpLogShadowChickenPrologue::emit(this, scopeRegister()); > } > > void BytecodeGenerator::emitLogShadowChickenTailIfNecessary() > { > if (!m_shouldEmitDebugHooks && !Options::alwaysUseShadowChicken()) > return; >- emitOpcode(op_log_shadow_chicken_tail); >- instructions().append(thisRegister()->index()); >- instructions().append(scopeRegister()->index()); >+ OpLogShadowChickenTail::emit(this, thisRegister(), scopeRegister()); > } > > void BytecodeGenerator::emitCallDefineProperty(RegisterID* newObj, RegisterID* propertyNameRegister, >@@ -3661,18 +3298,9 @@ void BytecodeGenerator::emitCallDefineProperty(RegisterID* newObj, RegisterID* p > else > setter = throwTypeErrorFunction; > >- emitOpcode(op_define_accessor_property); >- instructions().append(newObj->index()); >- instructions().append(propertyNameRegister->index()); >- instructions().append(getter->index()); >- instructions().append(setter->index()); >- instructions().append(emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))->index()); >+ OpDefineAccessorProperty::emit(this, newObj, propertyNameRegister, getter.get(), setter.get(), emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))); > } else { >- emitOpcode(op_define_data_property); >- instructions().append(newObj->index()); >- instructions().append(propertyNameRegister->index()); >- instructions().append(valueRegister->index()); >- instructions().append(emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))->index()); >+ OpDefineDataProperty::emit(this, newObj, propertyNameRegister, valueRegister, emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))); > } > } > >@@ -3696,18 +3324,12 @@ RegisterID* BytecodeGenerator::emitReturn(RegisterID* src, ReturnFrom from) > emitLabel(isUndefinedLabel.get()); > emitTDZCheck(&m_thisRegister); > } >- emitUnaryNoDstOp(op_ret, &m_thisRegister); >+ OpRet::emit(this, &m_thisRegister); > emitLabel(isObjectLabel.get()); > } > } > >- return emitUnaryNoDstOp(op_ret, src); >-} >- >-RegisterID* BytecodeGenerator::emitUnaryNoDstOp(OpcodeID opcodeID, RegisterID* src) >-{ >- emitOpcode(opcodeID); >- instructions().append(src->index()); >+ OpRet::emit(this, src); > return src; > } > >@@ -3728,9 +3350,7 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, > if (elements && !elements->next() && elements->value()->isSpreadExpression()) { > ExpressionNode* expression = static_cast<SpreadExpressionNode*>(elements->value())->expression(); > RefPtr<RegisterID> argumentRegister = emitNode(callArguments.argumentRegister(0), expression); >- emitOpcode(op_spread); >- instructions().append(argumentRegister.get()->index()); >- instructions().append(argumentRegister.get()->index()); >+ OpSpread::emit(this, argumentRegister.get(), argumentRegister.get()); > > move(callArguments.thisRegister(), lazyThis); > RefPtr<RegisterID> thisRegister = move(newTemporary(), callArguments.thisRegister()); >@@ -3759,16 +3379,7 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, > Ref<Label> done = newLabel(); > expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get()); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_construct); >- ASSERT(dst != ignoredResult()); >- instructions().append(dst->index()); >- instructions().append(func->index()); >- instructions().append(callArguments.argumentCountIncludingThis()); >- instructions().append(callArguments.stackOffset()); >- instructions().append(m_codeBlock->addLLIntCallLinkInfo()); >- instructions().append(0); >- instructions().append(0); >- instructions().append(profile); >+ OpConstruct::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset()); > > if (expectedFunction != NoExpectedFunction) > emitLabel(done.get()); >@@ -3778,25 +3389,18 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, > > RegisterID* BytecodeGenerator::emitStrcat(RegisterID* dst, RegisterID* src, int count) > { >- emitOpcode(op_strcat); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- instructions().append(count); >- >+ OpStrcat::emit(this, dst, src, count); > return dst; > } > > void BytecodeGenerator::emitToPrimitive(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_to_primitive); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpToPrimitive::emit(this, dst, src); > } > > void BytecodeGenerator::emitGetScope() > { >- emitOpcode(op_get_scope); >- instructions().append(scopeRegister()->index()); >+ OpGetScope::emit(this, scopeRegister()); > } > > RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* objectScope) >@@ -3805,10 +3409,7 @@ RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* objectScope) > RegisterID* newScope = newBlockScopeVariable(); > newScope->ref(); > >- emitOpcode(op_push_with_scope); >- instructions().append(newScope->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(objectScope->index()); >+ OpPushWithScope::emit(this, newScope, scopeRegister(), objectScope); > > move(scopeRegister(), newScope); > m_lexicalScopeStack.append({ nullptr, newScope, true, 0 }); >@@ -3818,9 +3419,7 @@ RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* objectScope) > > RegisterID* BytecodeGenerator::emitGetParentScope(RegisterID* dst, RegisterID* scope) > { >- emitOpcode(op_get_parent_scope); >- instructions().append(dst->index()); >- instructions().append(scope->index()); >+ OpGetParentScope::emit(this, dst, scope); > return dst; > } > >@@ -3845,9 +3444,7 @@ void BytecodeGenerator::emitDebugHook(DebugHookType debugHookType, const JSTextP > return; > > emitExpressionInfo(divot, divot, divot); >- emitOpcode(op_debug); >- instructions().append(debugHookType); >- instructions().append(false); >+ OpDebug::emit(this, debugHookType, false); > } > > void BytecodeGenerator::emitDebugHook(DebugHookType debugHookType, unsigned line, unsigned charOffset, unsigned lineStart) >@@ -4006,7 +3603,7 @@ void BytecodeGenerator::popTry(TryData* tryData, Label& end) > > void BytecodeGenerator::emitCatch(RegisterID* exceptionRegister, RegisterID* thrownValueRegister, TryData* data) > { >- m_catchesToEmit.append(CatchEntry { data, exceptionRegister->index(), thrownValueRegister->index() }); >+ m_catchesToEmit.append(CatchEntry { data, exceptionRegister, thrownValueRegister }); > } > > void BytecodeGenerator::restoreScopeRegister(int lexicalScopeIndex) >@@ -4062,16 +3659,12 @@ void BytecodeGenerator::emitThrowStaticError(ErrorType errorType, RegisterID* ra > { > RefPtr<RegisterID> message = newTemporary(); > emitToString(message.get(), raw); >- emitOpcode(op_throw_static_error); >- instructions().append(message->index()); >- instructions().append(static_cast<unsigned>(errorType)); >+ OpThrowStaticError::emit(this, message.get(), errorType); > } > > void BytecodeGenerator::emitThrowStaticError(ErrorType errorType, const Identifier& message) > { >- emitOpcode(op_throw_static_error); >- instructions().append(addConstantValue(addStringConstant(message))->index()); >- instructions().append(static_cast<unsigned>(errorType)); >+ OpThrowStaticError::emit(this, addConstantValue(addStringConstant(message)), errorType); > } > > void BytecodeGenerator::emitThrowReferenceError(const String& message) >@@ -4153,21 +3746,18 @@ void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::S > SwitchInfo info = { static_cast<uint32_t>(instructions().size()), type }; > switch (type) { > case SwitchInfo::SwitchImmediate: >- emitOpcode(op_switch_imm); >+ OpSwitchImm::emit(this, 0, 0, scrutineeRegister); > break; > case SwitchInfo::SwitchCharacter: >- emitOpcode(op_switch_char); >+ OpSwitchChar::emit(this, 0, 0, scrutineeRegister); > break; > case SwitchInfo::SwitchString: >- emitOpcode(op_switch_string); >+ OpSwitchString::emit(this, 0, 0, scrutineeRegister); > break; > default: > RELEASE_ASSERT_NOT_REACHED(); > } > >- instructions().append(0); // place holder for table index >- instructions().append(0); // place holder for default target >- instructions().append(scrutineeRegister->index()); > m_switchContextStack.append(info); > } > >@@ -4208,7 +3798,7 @@ static void prepareJumpTableForSwitch( > // We're emitting this after the clause labels should have been fixed, so > // the labels should not be "forward" references > ASSERT(!labels[i]->isForward()); >- jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3)); >+ jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress)); > } > } > >@@ -4221,7 +3811,7 @@ static void prepareJumpTableForStringSwitch(UnlinkedStringJumpTable& jumpTable, > > ASSERT(nodes[i]->isString()); > StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl(); >- jumpTable.offsetTable.add(clause, UnlinkedStringJumpTable::OffsetLocation { labels[i]->bind(switchAddress, switchAddress + 3) }); >+ jumpTable.offsetTable.add(clause, UnlinkedStringJumpTable::OffsetLocation { labels[i]->bind(switchAddress) }); > } > } > >@@ -4229,12 +3819,10 @@ void BytecodeGenerator::endSwitch(uint32_t clauseCount, const Vector<Ref<Label>, > { > SwitchInfo switchInfo = m_switchContextStack.last(); > m_switchContextStack.removeLast(); >- >- switch (switchInfo.switchType) { >- case SwitchInfo::SwitchImmediate: >- case SwitchInfo::SwitchCharacter: { >- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfSwitchJumpTables(); >- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel.bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); >+ >+ auto handleSwitch = [&](auto* op) { >+ op->setTableIndex(m_codeBlock->numberOfSwitchJumpTables()); >+ op->setDefaultOffset(defaultLabel.bind(switchInfo.bytecodeOffset)); > > UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addSwitchJumpTable(); > prepareJumpTableForSwitch( >@@ -4242,12 +3830,24 @@ void BytecodeGenerator::endSwitch(uint32_t clauseCount, const Vector<Ref<Label>, > switchInfo.switchType == SwitchInfo::SwitchImmediate > ? keyForImmediateSwitch > : keyForCharacterSwitch); >+ }; >+ >+ switch (switchInfo.switchType) { >+ case SwitchInfo::SwitchImmediate: { >+ auto* op = m_writer.ref(switchInfo.bytecodeOffset)->cast<OpSwitchImm>(); >+ handleSwitch(op); >+ break; >+ } >+ case SwitchInfo::SwitchCharacter: { >+ auto* op = m_writer.ref(switchInfo.bytecodeOffset)->cast<OpSwitchChar>(); >+ handleSwitch(op); > break; > } > > case SwitchInfo::SwitchString: { >- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables(); >- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel.bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); >+ auto* op = m_writer.ref(switchInfo.bytecodeOffset)->cast<OpSwitchString>(); >+ op->setTableIndex(m_codeBlock->numberOfSwitchJumpTables()); >+ op->setDefaultOffset(defaultLabel.bind(switchInfo.bytecodeOffset)); > > UnlinkedStringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable(); > prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes); >@@ -4459,114 +4059,79 @@ RegisterID* BytecodeGenerator::emitGetGlobalPrivate(RegisterID* dst, const Ident > > RegisterID* BytecodeGenerator::emitGetEnumerableLength(RegisterID* dst, RegisterID* base) > { >- emitOpcode(op_get_enumerable_length); >- instructions().append(dst->index()); >- instructions().append(base->index()); >+ OpGetEnumerableLength::emit(this, dst, base); > return dst; > } > > RegisterID* BytecodeGenerator::emitHasGenericProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName) > { >- emitOpcode(op_has_generic_property); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(propertyName->index()); >+ OpHasGenericProperty::emit(this, dst, base, propertyName); > return dst; > } > > RegisterID* BytecodeGenerator::emitHasIndexedProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName) > { >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- emitOpcode(op_has_indexed_property); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(propertyName->index()); >- instructions().append(arrayProfile); >+ OpHasIndexedProperty::emit(this, dst, base, propertyName); > return dst; > } > > RegisterID* BytecodeGenerator::emitHasStructureProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName, RegisterID* enumerator) > { >- emitOpcode(op_has_structure_property); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(propertyName->index()); >- instructions().append(enumerator->index()); >+ OpHasStructureProperty::emit(this, dst, base, propertyName, enumerator); > return dst; > } > > RegisterID* BytecodeGenerator::emitGetPropertyEnumerator(RegisterID* dst, RegisterID* base) > { >- emitOpcode(op_get_property_enumerator); >- instructions().append(dst->index()); >- instructions().append(base->index()); >+ OpGetPropertyEnumerator::emit(this, dst, base); > return dst; > } > > RegisterID* BytecodeGenerator::emitEnumeratorStructurePropertyName(RegisterID* dst, RegisterID* enumerator, RegisterID* index) > { >- emitOpcode(op_enumerator_structure_pname); >- instructions().append(dst->index()); >- instructions().append(enumerator->index()); >- instructions().append(index->index()); >+ OpEnumeratorStructurePname::emit(this, dst, enumerator, index); > return dst; > } > > RegisterID* BytecodeGenerator::emitEnumeratorGenericPropertyName(RegisterID* dst, RegisterID* enumerator, RegisterID* index) > { >- emitOpcode(op_enumerator_generic_pname); >- instructions().append(dst->index()); >- instructions().append(enumerator->index()); >- instructions().append(index->index()); >+ OpEnumeratorGenericPname::emit(this, dst, enumerator, index); > return dst; > } > > RegisterID* BytecodeGenerator::emitToIndexString(RegisterID* dst, RegisterID* index) > { >- emitOpcode(op_to_index_string); >- instructions().append(dst->index()); >- instructions().append(index->index()); >+ OpToIndexString::emit(this, dst, index); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsCellWithType(RegisterID* dst, RegisterID* src, JSType type) > { >- emitOpcode(op_is_cell_with_type); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- instructions().append(type); >+ OpIsCellWithType::emit(this, dst, src, type); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsObject(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_is_object); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpIsObject::emit(this, dst, src); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsNumber(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_is_number); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpIsNumber::emit(this, dst, src); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsUndefined(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_is_undefined); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpIsUndefined::emit(this, dst, src); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsEmpty(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_is_empty); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpIsEmpty::emit(this, dst, src); > return dst; > } > >@@ -4771,14 +4336,9 @@ void BytecodeGenerator::invalidateForInContextForLocal(RegisterID* localRegister > RegisterID* BytecodeGenerator::emitRestParameter(RegisterID* result, unsigned numParametersToSkip) > { > RefPtr<RegisterID> restArrayLength = newTemporary(); >- emitOpcode(op_get_rest_length); >- instructions().append(restArrayLength->index()); >- instructions().append(numParametersToSkip); >+ OpGetRestLength::emit(this, restArrayLength.get(), numParametersToSkip); > >- emitOpcode(op_create_rest); >- instructions().append(result->index()); >- instructions().append(restArrayLength->index()); >- instructions().append(numParametersToSkip); >+ OpCreateRest::emit(this, result, restArrayLength.get(), numParametersToSkip); > > return result; > } >@@ -4789,9 +4349,7 @@ void BytecodeGenerator::emitRequireObjectCoercible(RegisterID* value, const Stri > // thus incorrectly throws a TypeError for interfaces like HTMLAllCollection. > Ref<Label> target = newLabel(); > size_t begin = instructions().size(); >- emitOpcode(op_jneq_null); >- instructions().append(value->index()); >- instructions().append(target->bind(begin, instructions().size())); >+ OpJneqNull::emit(this, value, target->bind(begin)); > emitThrowTypeError(error); > emitLabel(target.get()); > } >@@ -4822,10 +4380,7 @@ void BytecodeGenerator::emitYieldPoint(RegisterID* argument, JSAsyncGeneratorFun > Vector<TryContext> savedTryContextStack; > m_tryContextStack.swap(savedTryContextStack); > >- emitOpcode(op_yield); >- instructions().append(generatorFrameRegister()->index()); >- instructions().append(yieldPointIndex); >- instructions().append(argument->index()); >+ OpYield::emit(this, generatorFrameRegister(), yieldPointIndex, argument); > > // Restore the try contexts, which start offset is updated to the merge point. > m_tryContextStack.swap(savedTryContextStack); >@@ -4838,11 +4393,11 @@ RegisterID* BytecodeGenerator::emitYield(RegisterID* argument, JSAsyncGeneratorF > > Ref<Label> normalLabel = newLabel(); > RefPtr<RegisterID> condition = newTemporary(); >- emitEqualityOp(op_stricteq, condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); >+ emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); > emitJumpIfTrue(condition.get(), normalLabel.get()); > > Ref<Label> throwLabel = newLabel(); >- emitEqualityOp(op_stricteq, condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); >+ emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); > emitJumpIfTrue(condition.get(), throwLabel.get()); > // Return. > { >@@ -4891,7 +4446,7 @@ RegisterID* BytecodeGenerator::emitGetAsyncIterator(RegisterID* argument, Throwa > Ref<Label> asyncIteratorFound = newLabel(); > Ref<Label> iteratorReceived = newLabel(); > >- emitJumpIfTrue(emitUnaryOp(op_eq_null, newTemporary(), iterator.get()), asyncIteratorNotFound.get()); >+ emitJumpIfTrue(emitUnaryOp<OpEqNull>(newTemporary(), iterator.get()), asyncIteratorNotFound.get()); > > emitJump(asyncIteratorFound.get()); > emitLabel(asyncIteratorNotFound.get()); >@@ -4950,10 +4505,10 @@ RegisterID* BytecodeGenerator::emitDelegateYield(RegisterID* argument, Throwable > Ref<Label> returnLabel = newLabel(); > { > RefPtr<RegisterID> condition = newTemporary(); >- emitEqualityOp(op_stricteq, condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); >+ emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); > emitJumpIfTrue(condition.get(), normalLabel.get()); > >- emitEqualityOp(op_stricteq, condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ReturnMode)))); >+ emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ReturnMode)))); > emitJumpIfTrue(condition.get(), returnLabel.get()); > > // Fallthrough to ThrowMode. >@@ -5120,7 +4675,7 @@ bool BytecodeGenerator::emitReturnViaFinallyIfNeeded(RegisterID* returnRegister) > void BytecodeGenerator::emitFinallyCompletion(FinallyContext& context, RegisterID* completionTypeRegister, Label& normalCompletionLabel) > { > if (context.numberOfBreaksOrContinues() || context.handlesReturns()) { >- emitJumpIf(op_stricteq, completionTypeRegister, CompletionType::Normal, normalCompletionLabel); >+ emitJumpIf<OpStricteq>(completionTypeRegister, CompletionType::Normal, normalCompletionLabel); > > FinallyContext* outerContext = context.outerContext(); > >@@ -5130,7 +4685,7 @@ void BytecodeGenerator::emitFinallyCompletion(FinallyContext& context, RegisterI > for (size_t i = 0; i < numberOfJumps; i++) { > Ref<Label> nextLabel = newLabel(); > auto& jump = context.jumps(i); >- emitJumpIf(op_nstricteq, completionTypeRegister, jump.jumpID, nextLabel.get()); >+ emitJumpIf<OpNstricteq>(completionTypeRegister, jump.jumpID, nextLabel.get()); > > restoreScopeRegister(jump.targetLexicalScopeIndex); > emitSetCompletionType(CompletionType::Normal); >@@ -5143,13 +4698,13 @@ void BytecodeGenerator::emitFinallyCompletion(FinallyContext& context, RegisterI > // We are not the outermost finally. > bool hasBreaksOrContinuesNotCoveredByJumps = context.numberOfBreaksOrContinues() > numberOfJumps; > if (hasBreaksOrContinuesNotCoveredByJumps || context.handlesReturns()) >- emitJumpIf(op_nstricteq, completionTypeRegister, CompletionType::Throw, *outerContext->finallyLabel()); >+ emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Throw, *outerContext->finallyLabel()); > > } else { > // We are the outermost finally. > if (context.handlesReturns()) { > Ref<Label> notReturnLabel = newLabel(); >- emitJumpIf(op_nstricteq, completionTypeRegister, CompletionType::Return, notReturnLabel.get()); >+ emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Return, notReturnLabel.get()); > > emitWillLeaveCallFrameDebugHook(); > emitReturn(completionValueRegister(), ReturnFrom::Finally); >@@ -5158,7 +4713,7 @@ void BytecodeGenerator::emitFinallyCompletion(FinallyContext& context, RegisterI > } > } > } >- emitJumpIf(op_nstricteq, completionTypeRegister, CompletionType::Throw, normalCompletionLabel); >+ emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Throw, normalCompletionLabel); > emitThrow(completionValueRegister()); > } > >@@ -5183,69 +4738,85 @@ void BytecodeGenerator::releaseCompletionRecordRegisters() > m_completionValueRegister = nullptr; > } > >-void BytecodeGenerator::emitJumpIf(OpcodeID compareOpcode, RegisterID* completionTypeRegister, CompletionType type, Label& jumpTarget) >+template<typename CompareOp> >+void BytecodeGenerator::emitJumpIf(RegisterID* completionTypeRegister, CompletionType type, Label& jumpTarget) > { > RefPtr<RegisterID> tempRegister = newTemporary(); > RegisterID* valueConstant = addConstantValue(jsNumber(static_cast<int>(type))); > OperandTypes operandTypes = OperandTypes(ResultType::numberTypeIsInt32(), ResultType::unknownType()); > >- auto equivalenceResult = emitBinaryOp(compareOpcode, tempRegister.get(), valueConstant, completionTypeRegister, operandTypes); >+ auto equivalenceResult = emitBinaryOp<CompareOp>(tempRegister.get(), valueConstant, completionTypeRegister, operandTypes); > emitJumpIfTrue(equivalenceResult, jumpTarget); > } > >-void StructureForInContext::finalize(BytecodeGenerator& generator) >+void StructureForInContext::finalize(BytecodeGenerator& /*generator*/) > { > if (isValid()) > return; > >- for (const auto& instTuple : m_getInsts) { >- unsigned instIndex = std::get<0>(instTuple); >- int propertyRegIndex = std::get<1>(instTuple); >- UnlinkedValueProfile valueProfile = std::get<2>(instTuple); >- OpcodeID op = generator.instructions()[instIndex].u.opcode; >- RELEASE_ASSERT(op == op_get_direct_pname); >- ASSERT(opcodeLength(op_get_direct_pname) == 7); >- ASSERT(opcodeLength(op_get_by_val) == 6); >- >- // 0. Change the opcode to get_by_val. >- generator.instructions()[instIndex].u.opcode = op_get_by_val; >- // 1. dst stays the same. >- // 2. base stays the same. >- // 3. property gets switched to the original property. >- generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; >- // 4. add an array profile. >- generator.instructions()[instIndex + 4].u.unsignedValue = generator.newArrayProfile(); >- // 5. set the result value profile. >- generator.instructions()[instIndex + 5].u.unsignedValue = valueProfile; >- // 6. nop out the last instruction word. >- generator.instructions()[instIndex + 6].u.opcode = op_nop; >- } >-} >- >-void IndexedForInContext::finalize(BytecodeGenerator& generator) >+ // TODO >+ //for (const auto& instTuple : m_getInsts) { >+ //unsigned instIndex = std::get<0>(instTuple); >+ //int propertyRegIndex = std::get<1>(instTuple); >+ //OpcodeID op = generator.instructions()[instIndex].u.opcode; >+ //RELEASE_ASSERT(op == op_get_direct_pname); >+ //ASSERT(opcodeLength(op_get_direct_pname) == 7); >+ //ASSERT(opcodeLength(op_get_by_val) == 6); >+ >+ //// 0. Change the opcode to get_by_val. >+ //generator.instructions()[instIndex].u.opcode = op_get_by_val; >+ //// 1. dst stays the same. >+ //// 2. base stays the same. >+ //// 3. property gets switched to the original property. >+ //generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; >+ //// 4. add an array profile. >+ //generator.instructions()[instIndex + 4].u.unsignedValue = generator.newArrayProfile(); >+ //// TODO: do we need this step? >+ //// 5. set the result value profile. >+ ////generator.instructions()[instIndex + 5].u.unsignedValue = valueProfile; >+ //// 6. nop out the last instruction word. >+ //generator.instructions()[instIndex + 6].u.opcode = op_nop; >+ //} >+} >+ >+void IndexedForInContext::finalize(BytecodeGenerator& /*generator*/) > { > if (isValid()) > return; > >- for (const auto& instPair : m_getInsts) { >- unsigned instIndex = instPair.first; >- int propertyRegIndex = instPair.second; >- OpcodeID op = generator.instructions()[instIndex].u.opcode; >- RELEASE_ASSERT(op == op_get_by_val); >- // We just need to perform the get_by_val with the original property here, >- // not the indexed one. >- generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; >+ // TODO >+ //for (const auto& instPair : m_getInsts) { >+ //unsigned instIndex = instPair.first; >+ //int propertyRegIndex = instPair.second; >+ //OpcodeID op = generator.instructions()[instIndex].u.opcode; >+ //RELEASE_ASSERT(op == op_get_by_val); >+ //// We just need to perform the get_by_val with the original property here, >+ //// not the indexed one. >+ //generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; >+ //} >+} >+ >+void StaticPropertyAnalysis::record() >+{ >+ auto* instruction = m_instructionRef.ptr(); >+ auto size = m_propertyIndexes.size(); >+ switch (instruction->opcodeID()) { >+ case OpNewObject::opcodeID(): >+ instruction->cast<OpNewObject>()->setInlineCapacity(size); >+ return; >+ case OpCreateThis::opcodeID(): >+ instruction->cast<OpCreateThis>()->setInlineCapacity(size); >+ return; >+ default: >+ ASSERT_NOT_REACHED(); > } > } > > void BytecodeGenerator::emitToThis() > { > m_codeBlock->addPropertyAccessInstruction(instructions().size()); >- UnlinkedValueProfile profile = emitProfiledOpcode(op_to_this); >- instructions().append(kill(&m_thisRegister)); >- instructions().append(0); >- instructions().append(0); >- instructions().append(profile); >+ >+ OpToThis::emit(this, kill(&m_thisRegister)); > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >index 8ac6bc1e88ef9ec86d88461d1617515fd0e3ed59..990245015ba2679f1d526e5412bf73455b75276f 100644 >--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >@@ -41,6 +41,7 @@ > #include "LabelScope.h" > #include "Nodes.h" > #include "ParserError.h" >+#include "ProfileTypeBytecodeFlag.h" > #include "RegisterID.h" > #include "StaticPropertyAnalyzer.h" > #include "SymbolTable.h" >@@ -208,7 +209,7 @@ namespace JSC { > > class StructureForInContext : public ForInContext { > public: >- using GetInst = std::tuple<unsigned, int, UnlinkedValueProfile>; >+ using GetInst = std::tuple<unsigned, int>; > > StructureForInContext(RegisterID* localRegister, RegisterID* indexRegister, RegisterID* propertyRegister, RegisterID* enumeratorRegister) > : ForInContext(localRegister) >@@ -227,9 +228,9 @@ namespace JSC { > RegisterID* property() const { return m_propertyRegister.get(); } > RegisterID* enumerator() const { return m_enumeratorRegister.get(); } > >- void addGetInst(unsigned instIndex, int propertyRegIndex, UnlinkedValueProfile valueProfile) >+ void addGetInst(unsigned instIndex, int propertyRegIndex) > { >- m_getInsts.append(GetInst { instIndex, propertyRegIndex, valueProfile }); >+ m_getInsts.append(GetInst { instIndex, propertyRegIndex }); > } > > void finalize(BytecodeGenerator&); >@@ -344,17 +345,11 @@ namespace JSC { > TryData* tryData; > }; > >- enum ProfileTypeBytecodeFlag { >- ProfileTypeBytecodeClosureVar, >- ProfileTypeBytecodeLocallyResolved, >- ProfileTypeBytecodeDoesNotHaveGlobalID, >- ProfileTypeBytecodeFunctionArgument, >- ProfileTypeBytecodeFunctionReturnStatement >- }; >- > class BytecodeGenerator { > WTF_MAKE_FAST_ALLOCATED; > WTF_MAKE_NONCOPYABLE(BytecodeGenerator); >+ >+ friend class Label; > public: > typedef DeclarationStacks::FunctionStack FunctionStack; > >@@ -495,6 +490,22 @@ namespace JSC { > n->emitBytecode(*this, dst); > } > >+ void recordOpcode(OpcodeID opcodeID) >+ { >+#ifndef NDEBUG >+ // TODO >+ //ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end); >+#endif >+ // TODO >+ //m_lastInstruction = m_writer.ref(); >+ m_lastOpcodeID = opcodeID; >+ }; >+ >+ unsigned addMetadataFor(OpcodeID opcodeID) >+ { >+ return m_codeBlock->addMetadataFor(opcodeID); >+ } >+ > void emitNode(StatementNode* n) > { > emitNode(nullptr, n); >@@ -570,31 +581,32 @@ namespace JSC { > ASSERT(divot.offset >= divotStart.offset); > ASSERT(divotEnd.offset >= divot.offset); > >- int sourceOffset = m_scopeNode->source().startOffset(); >- unsigned firstLine = m_scopeNode->source().firstLine().oneBasedInt(); >+ //int sourceOffset = m_scopeNode->source().startOffset(); >+ //unsigned firstLine = m_scopeNode->source().firstLine().oneBasedInt(); > >- int divotOffset = divot.offset - sourceOffset; >- int startOffset = divot.offset - divotStart.offset; >- int endOffset = divotEnd.offset - divot.offset; >+ //int divotOffset = divot.offset - sourceOffset; >+ //int startOffset = divot.offset - divotStart.offset; >+ //int endOffset = divotEnd.offset - divot.offset; > >- unsigned line = divot.line; >- ASSERT(line >= firstLine); >- line -= firstLine; >+ //unsigned line = divot.line; >+ //ASSERT(line >= firstLine); >+ //line -= firstLine; > >- int lineStart = divot.lineStartOffset; >- if (lineStart > sourceOffset) >- lineStart -= sourceOffset; >- else >- lineStart = 0; >+ //int lineStart = divot.lineStartOffset; >+ //if (lineStart > sourceOffset) >+ //lineStart -= sourceOffset; >+ //else >+ //lineStart = 0; > >- if (divotOffset < lineStart) >- return; >+ //if (divotOffset < lineStart) >+ //return; > >- unsigned column = divotOffset - lineStart; >+ //unsigned column = divotOffset - lineStart; > >- unsigned instructionOffset = instructions().size(); >- if (!m_isBuiltinFunction) >- m_codeBlock->addExpressionInfo(instructionOffset, divotOffset, startOffset, endOffset, line, column); >+ // TODO >+ //unsigned instructionOffset = instructions().size(); >+ //if (!m_isBuiltinFunction) >+ //m_codeBlock->addExpressionInfo(instructionOffset, divotOffset, startOffset, endOffset, line, column); > } > > >@@ -654,11 +666,46 @@ namespace JSC { > RegisterID* emitLoad(RegisterID* dst, IdentifierSet& excludedList); > RegisterID* emitLoadGlobalObject(RegisterID* dst); > >- RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src); >+ template<typename UnaryOp, typename = std::enable_if_t<UnaryOp::opcodeID() != op_negate>> >+ RegisterID* emitUnaryOp(RegisterID* dst, RegisterID* src) >+ { >+ //ASSERT_WITH_MESSAGE(op_negate != UnaryOp::opcodeID(), "op_negate has an Arith Profile."); >+ UnaryOp::emit(this, dst, src); >+ return dst; >+ } >+ > RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src, OperandTypes); >- RegisterID* emitUnaryOpProfiled(OpcodeID, RegisterID* dst, RegisterID* src); >- RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes); >- RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2); >+ >+ template<typename BinaryOp> >+ std::enable_if_t< >+ BinaryOp::opcodeID() != op_bitor && BinaryOp::opcodeID() != op_bitand && >+ BinaryOp::opcodeID() != op_bitxor && BinaryOp::opcodeID() != op_add && >+ BinaryOp::opcodeID() != op_mul && BinaryOp::opcodeID() != op_sub && >+ BinaryOp::opcodeID() != op_div >+ , RegisterID*> >+ emitBinaryOp(RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes) >+ { >+ BinaryOp::emit(this, dst, src1, src2); >+ return dst; >+ } >+ >+ template<typename BinaryOp> >+ std::enable_if_t< >+ BinaryOp::opcodeID() == op_bitor || BinaryOp::opcodeID() == op_bitand || >+ BinaryOp::opcodeID() == op_bitxor || BinaryOp::opcodeID() == op_add || >+ BinaryOp::opcodeID() == op_mul || BinaryOp::opcodeID() == op_sub || >+ BinaryOp::opcodeID() == op_div >+ , RegisterID*> >+ emitBinaryOp(RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) >+ { >+ BinaryOp::emit(this, dst, src1, src2, types); >+ return dst; >+ } >+ >+ RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types); >+ >+ template<typename EqOp> >+ RegisterID* emitEqualityOp(RegisterID* dst, RegisterID* src1, RegisterID* src2); > RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src); > > RegisterID* emitCreateThis(RegisterID* dst); >@@ -685,8 +732,8 @@ namespace JSC { > RegisterID* moveLinkTimeConstant(RegisterID* dst, LinkTimeConstant); > RegisterID* moveEmptyValue(RegisterID* dst); > >- RegisterID* emitToNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOpProfiled(op_to_number, dst, src); } >- RegisterID* emitToString(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_string, dst, src); } >+ RegisterID* emitToNumber(RegisterID* dst, RegisterID* src); >+ RegisterID* emitToString(RegisterID* dst, RegisterID* src); > RegisterID* emitToObject(RegisterID* dst, RegisterID* src, const Identifier& message); > RegisterID* emitInc(RegisterID* srcDst); > RegisterID* emitDec(RegisterID* srcDst); >@@ -694,7 +741,7 @@ namespace JSC { > RegisterID* emitOverridesHasInstance(RegisterID* dst, RegisterID* constructor, RegisterID* hasInstanceValue); > RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype); > RegisterID* emitInstanceOfCustom(RegisterID* dst, RegisterID* value, RegisterID* constructor, RegisterID* hasInstanceValue); >- RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); } >+ RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src); > RegisterID* emitInByVal(RegisterID* dst, RegisterID* property, RegisterID* base); > RegisterID* emitInById(RegisterID* dst, RegisterID* base, const Identifier& property); > >@@ -779,6 +826,12 @@ namespace JSC { > void emitJumpIfNotFunctionCall(RegisterID* cond, Label& target); > void emitJumpIfNotFunctionApply(RegisterID* cond, Label& target); > >+ template<typename BinOp, typename JmpOp> >+ bool fuseCompareAndJump(RegisterID* cond, Label& target, bool swapOperands = false); >+ >+ template<typename UnaryOp, typename JmpOp> >+ bool fuseTestAndJmp(RegisterID* cond, Label& target); >+ > void emitEnter(); > void emitCheckTraps(); > >@@ -914,7 +967,8 @@ namespace JSC { > move(completionValueRegister(), reg); > } > >- void emitJumpIf(OpcodeID compareOpcode, RegisterID* completionTypeRegister, CompletionType, Label& jumpTarget); >+ template<typename CompareOp> >+ void emitJumpIf(RegisterID* completionTypeRegister, CompletionType, Label& jumpTarget); > > bool emitJumpViaFinallyIfNeeded(int targetLabelScopeDepth, Label& jumpTarget); > bool emitReturnViaFinallyIfNeeded(RegisterID* returnRegister); >@@ -1011,18 +1065,14 @@ namespace JSC { > void emitOpcode(OpcodeID); > UnlinkedArrayAllocationProfile newArrayAllocationProfile(IndexingType); > UnlinkedObjectAllocationProfile newObjectAllocationProfile(); >- UnlinkedValueProfile emitProfiledOpcode(OpcodeID); >- int kill(RegisterID* dst) >+ RegisterID* kill(RegisterID* dst) > { >- int index = dst->index(); >- m_staticPropertyAnalyzer.kill(index); >- return index; >+ m_staticPropertyAnalyzer.kill(dst); >+ return dst; > } > >- void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); > void retrieveLastUnaryOp(int& dstIndex, int& srcIndex); >- ALWAYS_INLINE void rewindBinaryOp(); >- ALWAYS_INLINE void rewindUnaryOp(); >+ ALWAYS_INLINE void rewind(); > > void allocateCalleeSaveSpace(); > void allocateAndEmitScope(); >@@ -1039,7 +1089,8 @@ namespace JSC { > // (i.e. "Object()" is identical to "new Object()"). > ExpectedFunction emitExpectedFunctionSnippet(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, Label& done); > >- RegisterID* emitCall(OpcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); >+ template<typename CallOp> >+ RegisterID* emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); > > RegisterID* emitCallIterator(RegisterID* iterator, RegisterID* argument, ThrowableExpressionData*); > RegisterID* newRegister(); >@@ -1102,7 +1153,8 @@ namespace JSC { > void getVariablesUnderTDZ(VariableEnvironment&); > > RegisterID* emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); >- RegisterID* emitCallVarargs(OpcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); >+ template<typename CallOp> >+ RegisterID* emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); > > void emitLogShadowChickenPrologueIfNecessary(); > void emitLogShadowChickenTailIfNecessary(); >@@ -1125,10 +1177,13 @@ namespace JSC { > JSValue addBigIntConstant(const Identifier&, uint8_t radix, bool sign); > RegisterID* addTemplateObjectConstant(Ref<TemplateObjectDescriptor>&&); > >- Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions() { return m_instructions; } >+ const InstructionStream& instructions() const { return m_writer; } > > RegisterID* emitThrowExpressionTooDeepException(); > >+ void write(uint8_t byte) { m_writer.write(byte); } >+ void write(uint32_t i) { m_writer.write(i); } >+ > class PreservedTDZStack { > private: > Vector<TDZMap> m_preservedTDZStack; >@@ -1138,8 +1193,17 @@ namespace JSC { > void preserveTDZStack(PreservedTDZStack&); > void restoreTDZStack(const PreservedTDZStack&); > >+ template<typename Func> >+ void withWriter(InstructionStreamWriter& writer, Func fn) >+ { >+ auto tmp = m_writer; >+ m_writer = writer; >+ fn(); >+ m_writer = tmp; >+ } >+ > private: >- Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow> m_instructions; >+ InstructionStreamWriter m_writer; > > bool m_shouldEmitDebugHooks; > >@@ -1229,14 +1293,12 @@ namespace JSC { > IdentifierBigIntMap m_bigIntMap; > TemplateObjectDescriptorMap m_templateObjectDescriptorMap; > >- StaticPropertyAnalyzer m_staticPropertyAnalyzer { &m_instructions }; >+ StaticPropertyAnalyzer m_staticPropertyAnalyzer; > > VM* m_vm; > > OpcodeID m_lastOpcodeID = op_end; >-#ifndef NDEBUG >- size_t m_lastOpcodePosition { 0 }; >-#endif >+ InstructionStream::MutableRef m_lastInstruction { m_writer.ref() }; > > bool m_usesExceptions { false }; > bool m_expressionTooDeep { false }; >@@ -1246,10 +1308,11 @@ namespace JSC { > bool m_needsToUpdateArrowFunctionContext; > DerivedContextType m_derivedContextType { DerivedContextType::None }; > >- using CatchEntry = std::tuple<TryData*, int, int>; >+ using CatchEntry = std::tuple<TryData*, VirtualRegister, VirtualRegister>; > Vector<CatchEntry> m_catchesToEmit; > }; > >+ > } // namespace JSC > > namespace WTF { >diff --git a/Source/JavaScriptCore/bytecompiler/Label.h b/Source/JavaScriptCore/bytecompiler/Label.h >index 3e2d297f23d105c15984011a0f55a33574df053a..7e6bed65c2f66b1de785822c10148fb5bec415e9 100644 >--- a/Source/JavaScriptCore/bytecompiler/Label.h >+++ b/Source/JavaScriptCore/bytecompiler/Label.h >@@ -34,24 +34,38 @@ > #include <limits.h> > > namespace JSC { >- > class BytecodeGenerator; > > class Label { > WTF_MAKE_NONCOPYABLE(Label); > public: >+ class Bound { >+ >+ }; >+ > Label() = default; > >+ Label(unsigned location) >+ : m_location(location) >+ { } >+ > void setLocation(BytecodeGenerator&, unsigned); > >- int bind(int opcode, int offset) const >+ int bind(BytecodeGenerator*); >+ >+ int bind(unsigned offset) > { > m_bound = true; >- if (m_location == invalidLocation) { >- m_unresolvedJumps.append(std::make_pair(opcode, offset)); >- return 0; >- } >- return m_location - opcode; >+ if (!isForward()) >+ return m_location - offset; >+ m_unresolvedJumps.append(offset); >+ return 0; >+ } >+ >+ int bind() >+ { >+ ASSERT(!isForward()); >+ return bind(0u); > } > > void ref() { ++m_refCount; } >@@ -65,16 +79,10 @@ namespace JSC { > > bool isForward() const { return m_location == invalidLocation; } > >- int bind() >- { >- ASSERT(!isForward()); >- return bind(0, 0); >- } >- > bool isBound() const { return m_bound; } > > private: >- typedef Vector<std::pair<int, int>, 8> JumpVector; >+ typedef Vector<int, 8> JumpVector; > > static const unsigned invalidLocation = UINT_MAX; > >diff --git a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp >index 366964f1d63a5de692592f9c4544e0872128dad9..24e2f60f77e43da5ed6eafb2b4f93b815e64f0bc 100644 >--- a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp >+++ b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp >@@ -464,7 +464,7 @@ handleSpread: > }); > for (; n; n = n->next()) { > if (n->elision()) >- generator.emitBinaryOp(op_add, index.get(), index.get(), generator.emitLoad(0, jsNumber(n->elision())), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); >+ generator.emitBinaryOp<OpAdd>(index.get(), index.get(), generator.emitLoad(0, jsNumber(n->elision())), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); > if (n->value()->isSpreadExpression()) { > SpreadExpressionNode* spread = static_cast<SpreadExpressionNode*>(n->value()); > generator.emitEnumeration(spread, spread->expression(), spreader); >@@ -475,7 +475,7 @@ handleSpread: > } > > if (m_elision) { >- generator.emitBinaryOp(op_add, index.get(), index.get(), generator.emitLoad(0, jsNumber(m_elision)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); >+ generator.emitBinaryOp<OpAdd>(index.get(), index.get(), generator.emitLoad(0, jsNumber(m_elision)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); > generator.emitPutById(array.get(), generator.propertyNames().length, index.get()); > } > return generator.move(dst, array.get()); >@@ -1460,13 +1460,13 @@ RegisterID* ApplyFunctionCallDotNode::emitBytecode(BytecodeGenerator& generator, > Ref<Label> haveThis = generator.newLabel(); > Ref<Label> end = generator.newLabel(); > RefPtr<RegisterID> compareResult = generator.newTemporary(); >- RefPtr<RegisterID> indexZeroCompareResult = generator.emitBinaryOp(op_eq, compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(0)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); >+ RefPtr<RegisterID> indexZeroCompareResult = generator.emitBinaryOp<OpEq>(compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(0)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); > generator.emitJumpIfFalse(indexZeroCompareResult.get(), haveThis.get()); > generator.move(thisRegister.get(), value); > generator.emitLoad(index.get(), jsNumber(1)); > generator.emitJump(end.get()); > generator.emitLabel(haveThis.get()); >- RefPtr<RegisterID> indexOneCompareResult = generator.emitBinaryOp(op_eq, compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(1)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); >+ RefPtr<RegisterID> indexOneCompareResult = generator.emitBinaryOp<OpEq>(compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(1)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); > generator.emitJumpIfFalse(indexOneCompareResult.get(), end.get()); > generator.move(argumentsRegister.get(), value); > generator.emitLoad(index.get(), jsNumber(2)); >@@ -1898,7 +1898,7 @@ RegisterID* BitwiseNotNode::emitBytecode(BytecodeGenerator& generator, RegisterI > { > RefPtr<RegisterID> src2 = generator.emitLoad(nullptr, jsNumber(-1)); > RefPtr<RegisterID> src1 = generator.emitNode(m_expr); >- return generator.emitBinaryOp(op_bitxor, generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(m_expr->resultDescriptor(), ResultType::numberTypeIsInt32())); >+ return generator.emitBinaryOp<OpBitxor>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(m_expr->resultDescriptor(), ResultType::numberTypeIsInt32())); > } > > // ------------------------------ LogicalNotNode ----------------------------------- >@@ -2166,7 +2166,7 @@ RegisterID* BinaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID* > if (m_expr1->isNull() || m_expr2->isNull()) { > RefPtr<RegisterID> src = generator.tempDestination(dst); > generator.emitNode(src.get(), m_expr1->isNull() ? m_expr2 : m_expr1); >- return generator.emitUnaryOp(op_neq_null, generator.finalDestination(dst, src.get()), src.get()); >+ return generator.emitUnaryOp<OpNeqNull>(generator.finalDestination(dst, src.get()), src.get()); > } > } > >@@ -2184,17 +2184,17 @@ RegisterID* BinaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID* > if (wasTypeof && (opcodeID == op_neq || opcodeID == op_nstricteq)) { > RefPtr<RegisterID> tmp = generator.tempDestination(dst); > if (opcodeID == op_neq) >- generator.emitEqualityOp(op_eq, generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); >+ generator.emitEqualityOp<OpEq>(generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); > else if (opcodeID == op_nstricteq) >- generator.emitEqualityOp(op_stricteq, generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); >+ generator.emitEqualityOp<OpStricteq>(generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); > else > RELEASE_ASSERT_NOT_REACHED(); >- return generator.emitUnaryOp(op_not, generator.finalDestination(dst, tmp.get()), tmp.get()); >+ return generator.emitUnaryOp<OpNot>(generator.finalDestination(dst, tmp.get()), tmp.get()); > } > RegisterID* result = generator.emitBinaryOp(opcodeID, generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(left->resultDescriptor(), right->resultDescriptor())); > if (m_shouldToUnsignedResult) { > if (opcodeID == op_urshift && dst != generator.ignoredResult()) >- return generator.emitUnaryOp(op_unsigned, result, result); >+ return generator.emitUnaryOp<OpUnsigned>(result, result); > } > return result; > } >@@ -2204,7 +2204,7 @@ RegisterID* EqualNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds > if (m_expr1->isNull() || m_expr2->isNull()) { > RefPtr<RegisterID> src = generator.tempDestination(dst); > generator.emitNode(src.get(), m_expr1->isNull() ? m_expr2 : m_expr1); >- return generator.emitUnaryOp(op_eq_null, generator.finalDestination(dst, src.get()), src.get()); >+ return generator.emitUnaryOp<OpEqNull>(generator.finalDestination(dst, src.get()), src.get()); > } > > ExpressionNode* left = m_expr1; >@@ -2214,7 +2214,7 @@ RegisterID* EqualNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds > > RefPtr<RegisterID> src1 = generator.emitNodeForLeftHandSide(left, m_rightHasAssignments, m_expr2->isPure(generator)); > RefPtr<RegisterID> src2 = generator.emitNode(right); >- return generator.emitEqualityOp(op_eq, generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); >+ return generator.emitEqualityOp<OpEq>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); > } > > RegisterID* StrictEqualNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) >@@ -2226,7 +2226,7 @@ RegisterID* StrictEqualNode::emitBytecode(BytecodeGenerator& generator, Register > > RefPtr<RegisterID> src1 = generator.emitNodeForLeftHandSide(left, m_rightHasAssignments, m_expr2->isPure(generator)); > RefPtr<RegisterID> src2 = generator.emitNode(right); >- return generator.emitEqualityOp(op_stricteq, generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); >+ return generator.emitEqualityOp<OpStricteq>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); > } > > RegisterID* ThrowableBinaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) >@@ -2419,7 +2419,7 @@ static ALWAYS_INLINE RegisterID* emitReadModifyAssignment(BytecodeGenerator& gen > generator.emitExpressionInfo(emitExpressionInfoForMe->divot(), emitExpressionInfoForMe->divotStart(), emitExpressionInfoForMe->divotEnd()); > RegisterID* result = generator.emitBinaryOp(opcodeID, dst, src1, src2, types); > if (oper == OpURShift) >- return generator.emitUnaryOp(op_unsigned, result, result); >+ return generator.emitUnaryOp<OpUnsigned>(result, result); > return result; > } > >@@ -3092,7 +3092,7 @@ void ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) > generator.emitLabel(loopStart.get()); > generator.emitLoopHint(); > >- RefPtr<RegisterID> result = generator.emitEqualityOp(op_less, generator.newTemporary(), i.get(), length.get()); >+ RefPtr<RegisterID> result = generator.emitEqualityOp<OpLess>(generator.newTemporary(), i.get(), length.get()); > generator.emitJumpIfFalse(result.get(), loopEnd.get()); > generator.emitHasIndexedProperty(result.get(), base.get(), i.get()); > generator.emitJumpIfFalse(result.get(), *scope->continueTarget()); >@@ -3133,7 +3133,7 @@ void ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) > generator.emitLabel(loopStart.get()); > generator.emitLoopHint(); > >- RefPtr<RegisterID> result = generator.emitUnaryOp(op_eq_null, generator.newTemporary(), propertyName.get()); >+ RefPtr<RegisterID> result = generator.emitUnaryOp<OpEqNull>(generator.newTemporary(), propertyName.get()); > generator.emitJumpIfTrue(result.get(), loopEnd.get()); > generator.emitHasStructureProperty(result.get(), base.get(), propertyName.get(), enumerator.get()); > generator.emitJumpIfFalse(result.get(), *scope->continueTarget()); >@@ -3174,7 +3174,7 @@ void ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) > generator.emitLabel(loopStart.get()); > generator.emitLoopHint(); > >- RefPtr<RegisterID> result = generator.emitUnaryOp(op_eq_null, generator.newTemporary(), propertyName.get()); >+ RefPtr<RegisterID> result = generator.emitUnaryOp<OpEqNull>(generator.newTemporary(), propertyName.get()); > generator.emitJumpIfTrue(result.get(), loopEnd.get()); > > generator.emitHasGenericProperty(result.get(), base.get(), propertyName.get()); >@@ -3499,7 +3499,7 @@ void CaseBlockNode::emitBytecodeForBlock(BytecodeGenerator& generator, RegisterI > for (ClauseListNode* list = m_list1; list; list = list->getNext()) { > RefPtr<RegisterID> clauseVal = generator.newTemporary(); > generator.emitNode(clauseVal.get(), list->getClause()->expr()); >- generator.emitBinaryOp(op_stricteq, clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); >+ generator.emitBinaryOp<OpStricteq>(clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); > labelVector.append(generator.newLabel()); > generator.emitJumpIfTrue(clauseVal.get(), labelVector[labelVector.size() - 1].get()); > } >@@ -3507,7 +3507,7 @@ void CaseBlockNode::emitBytecodeForBlock(BytecodeGenerator& generator, RegisterI > for (ClauseListNode* list = m_list2; list; list = list->getNext()) { > RefPtr<RegisterID> clauseVal = generator.newTemporary(); > generator.emitNode(clauseVal.get(), list->getClause()->expr()); >- generator.emitBinaryOp(op_stricteq, clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); >+ generator.emitBinaryOp<OpStricteq>(clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); > labelVector.append(generator.newLabel()); > generator.emitJumpIfTrue(clauseVal.get(), labelVector[labelVector.size() - 1].get()); > } >@@ -3855,11 +3855,11 @@ void FunctionNode::emitBytecode(BytecodeGenerator& generator, RegisterID*) > Ref<Label> generatorBodyLabel = generator.newLabel(); > { > RefPtr<RegisterID> condition = generator.newTemporary(); >- generator.emitEqualityOp(op_stricteq, condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); >+ generator.emitEqualityOp<OpStricteq>(condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); > generator.emitJumpIfTrue(condition.get(), generatorBodyLabel.get()); > > Ref<Label> throwLabel = generator.newLabel(); >- generator.emitEqualityOp(op_stricteq, condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); >+ generator.emitEqualityOp<OpStricteq>(condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); > generator.emitJumpIfTrue(condition.get(), throwLabel.get()); > > generator.emitReturn(generator.generatorValueRegister()); >@@ -4019,7 +4019,7 @@ RegisterID* ClassExprNode::emitBytecode(BytecodeGenerator& generator, RegisterID > generator.emitJumpIfTrue(generator.emitIsUndefined(tempRegister.get(), superclass.get()), superclassIsUndefinedLabel.get()); > > Ref<Label> superclassIsNullLabel = generator.newLabel(); >- generator.emitJumpIfTrue(generator.emitUnaryOp(op_eq_null, tempRegister.get(), superclass.get()), superclassIsNullLabel.get()); >+ generator.emitJumpIfTrue(generator.emitUnaryOp<OpEqNull>(tempRegister.get(), superclass.get()), superclassIsNullLabel.get()); > > Ref<Label> superclassIsObjectLabel = generator.newLabel(); > generator.emitJumpIfTrue(generator.emitIsObject(tempRegister.get(), superclass.get()), superclassIsObjectLabel.get()); >@@ -4029,8 +4029,8 @@ RegisterID* ClassExprNode::emitBytecode(BytecodeGenerator& generator, RegisterID > generator.emitGetById(protoParent.get(), superclass.get(), generator.propertyNames().prototype); > > Ref<Label> protoParentIsObjectOrNullLabel = generator.newLabel(); >- generator.emitJumpIfTrue(generator.emitUnaryOp(op_is_object_or_null, tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); >- generator.emitJumpIfTrue(generator.emitUnaryOp(op_is_function, tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); >+ generator.emitJumpIfTrue(generator.emitUnaryOp<OpIsObjectOrNull>(tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); >+ generator.emitJumpIfTrue(generator.emitUnaryOp<OpIsFunction>(tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); > generator.emitThrowTypeError("The value of the superclass's prototype property is not an object."_s); > generator.emitLabel(protoParentIsObjectOrNullLabel.get()); > >diff --git a/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.h b/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.h >new file mode 100644 >index 0000000000000000000000000000000000000000..002f32c65f9b207a7e7d1dccbfdf4a3ccc6e1e8f >--- /dev/null >+++ b/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.h >@@ -0,0 +1,38 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#pragma once >+ >+namespace JSC { >+ >+enum ProfileTypeBytecodeFlag { >+ ProfileTypeBytecodeClosureVar, >+ ProfileTypeBytecodeLocallyResolved, >+ ProfileTypeBytecodeDoesNotHaveGlobalID, >+ ProfileTypeBytecodeFunctionArgument, >+ ProfileTypeBytecodeFunctionReturnStatement >+}; >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/bytecompiler/RegisterID.h b/Source/JavaScriptCore/bytecompiler/RegisterID.h >index cc80f5eb8913562ad25cd6f7aad1c5bffd8a03b7..d9ba5598351b6db0c84930278e67931db511af5a 100644 >--- a/Source/JavaScriptCore/bytecompiler/RegisterID.h >+++ b/Source/JavaScriptCore/bytecompiler/RegisterID.h >@@ -37,6 +37,8 @@ namespace JSC { > > class RegisterID { > WTF_MAKE_NONCOPYABLE(RegisterID); >+ >+ friend class VirtualRegister; > public: > RegisterID() > : m_refCount(0) >@@ -123,6 +125,10 @@ namespace JSC { > #endif > }; > >+VirtualRegister::VirtualRegister(RegisterID* reg) >+ : VirtualRegister(reg->m_virtualRegister.m_virtualRegister) >+{ >+} > } // namespace JSC > > namespace WTF { >diff --git a/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h b/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h >index f23e8425a795f98a7c4dc61bf35c15d1b079b3ae..8757d3a0d54fc89fb0d97ac8b99e86a341eb6d74 100644 >--- a/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h >+++ b/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h >@@ -25,6 +25,7 @@ > > #pragma once > >+#include "InstructionStream.h" > #include <wtf/HashSet.h> > > namespace JSC { >@@ -32,29 +33,24 @@ namespace JSC { > // Reference count indicates number of live registers that alias this object. > class StaticPropertyAnalysis : public RefCounted<StaticPropertyAnalysis> { > public: >- static Ref<StaticPropertyAnalysis> create(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions, unsigned target) >+ static Ref<StaticPropertyAnalysis> create(InstructionStream::MutableRef&& instructionRef) > { >- return adoptRef(*new StaticPropertyAnalysis(instructions, target)); >+ return adoptRef(*new StaticPropertyAnalysis(WTFMove(instructionRef))); > } > > void addPropertyIndex(unsigned propertyIndex) { m_propertyIndexes.add(propertyIndex); } > >- void record() >- { >- (*m_instructions)[m_target] = m_propertyIndexes.size(); >- } >+ void record(); > > int propertyIndexCount() { return m_propertyIndexes.size(); } > > private: >- StaticPropertyAnalysis(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions, unsigned target) >- : m_instructions(instructions) >- , m_target(target) >+ StaticPropertyAnalysis(InstructionStream::MutableRef&& instructionRef) >+ : m_instructionRef(WTFMove(instructionRef)) > { > } > >- Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* m_instructions; >- unsigned m_target; >+ InstructionStream::MutableRef m_instructionRef; > typedef HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> PropertyIndexSet; > PropertyIndexSet m_propertyIndexes; > }; >diff --git a/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h b/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h >index cc3b1e4a983391501d3fdf3a67a9c4a34bd9a268..fc5166c965015d98c2410ec82f2aef01cdb059c4 100644 >--- a/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h >+++ b/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h >@@ -35,63 +35,55 @@ namespace JSC { > // is understood to be lossy, and it's OK if it turns out to be wrong sometimes. > class StaticPropertyAnalyzer { > public: >- StaticPropertyAnalyzer(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>*); >- >- void createThis(int dst, unsigned offsetOfInlineCapacityOperand); >- void newObject(int dst, unsigned offsetOfInlineCapacityOperand); >- void putById(int dst, unsigned propertyIndex); // propertyIndex is an index into a uniqued set of strings. >- void mov(int dst, int src); >+ void createThis(RegisterID* dst, InstructionStream::MutableRef&& instructionRef); >+ void newObject(RegisterID* dst, InstructionStream::MutableRef&& instructionRef); >+ void putById(RegisterID* dst, unsigned propertyIndex); // propertyIndex is an index into a uniqued set of strings. >+ void mov(RegisterID* dst, RegisterID* src); > > void kill(); >- void kill(int dst); >+ void kill(RegisterID* dst); > > private: > void kill(StaticPropertyAnalysis*); > >- Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* m_instructions; > typedef HashMap<int, RefPtr<StaticPropertyAnalysis>, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> AnalysisMap; > AnalysisMap m_analyses; > }; > >-inline StaticPropertyAnalyzer::StaticPropertyAnalyzer(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions) >- : m_instructions(instructions) >-{ >-} >- >-inline void StaticPropertyAnalyzer::createThis(int dst, unsigned offsetOfInlineCapacityOperand) >+inline void StaticPropertyAnalyzer::createThis(RegisterID* dst, InstructionStream::MutableRef&& instructionRef) > { > AnalysisMap::AddResult addResult = m_analyses.add( >- dst, StaticPropertyAnalysis::create(m_instructions, offsetOfInlineCapacityOperand)); >+ dst->index(), StaticPropertyAnalysis::create(WTFMove(instructionRef))); > ASSERT_UNUSED(addResult, addResult.isNewEntry); // Can't have two 'this' in the same constructor. > } > >-inline void StaticPropertyAnalyzer::newObject(int dst, unsigned offsetOfInlineCapacityOperand) >+inline void StaticPropertyAnalyzer::newObject(RegisterID* dst, InstructionStream::MutableRef&& instructionRef) > { >- RefPtr<StaticPropertyAnalysis> analysis = StaticPropertyAnalysis::create(m_instructions, offsetOfInlineCapacityOperand); >- AnalysisMap::AddResult addResult = m_analyses.add(dst, analysis); >+ RefPtr<StaticPropertyAnalysis> analysis = StaticPropertyAnalysis::create(WTFMove(instructionRef)); >+ AnalysisMap::AddResult addResult = m_analyses.add(dst->index(), analysis); > if (!addResult.isNewEntry) { > kill(addResult.iterator->value.get()); > addResult.iterator->value = WTFMove(analysis); > } > } > >-inline void StaticPropertyAnalyzer::putById(int dst, unsigned propertyIndex) >+inline void StaticPropertyAnalyzer::putById(RegisterID* dst, unsigned propertyIndex) > { >- StaticPropertyAnalysis* analysis = m_analyses.get(dst); >+ StaticPropertyAnalysis* analysis = m_analyses.get(dst->index()); > if (!analysis) > return; > analysis->addPropertyIndex(propertyIndex); > } > >-inline void StaticPropertyAnalyzer::mov(int dst, int src) >+inline void StaticPropertyAnalyzer::mov(RegisterID* dst, RegisterID* src) > { >- RefPtr<StaticPropertyAnalysis> analysis = m_analyses.get(src); >+ RefPtr<StaticPropertyAnalysis> analysis = m_analyses.get(src->index()); > if (!analysis) { > kill(dst); > return; > } > >- AnalysisMap::AddResult addResult = m_analyses.add(dst, analysis); >+ AnalysisMap::AddResult addResult = m_analyses.add(dst->index(), analysis); > if (!addResult.isNewEntry) { > kill(addResult.iterator->value.get()); > addResult.iterator->value = WTFMove(analysis); >@@ -107,7 +99,7 @@ inline void StaticPropertyAnalyzer::kill(StaticPropertyAnalysis* analysis) > analysis->record(); > } > >-inline void StaticPropertyAnalyzer::kill(int dst) >+inline void StaticPropertyAnalyzer::kill(RegisterID* dst) > { > // We observe kills in order to avoid piling on properties to an object after > // its bytecode register has been recycled. >@@ -148,7 +140,7 @@ inline void StaticPropertyAnalyzer::kill(int dst) > // so we accept kills to any registers except for registers that have no inferred > // properties yet. > >- AnalysisMap::iterator it = m_analyses.find(dst); >+ AnalysisMap::iterator it = m_analyses.find(dst->index()); > if (it == m_analyses.end()) > return; > if (!it->value->propertyIndexCount()) >diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >index e9c7fa5b04fa2ed7eb91195bfadb47e7b818f19c..a653e0c406f755cb010e7eb1a4aaf1d1ec9f08f7 100644 >--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >@@ -134,7 +134,7 @@ private: > > // Helper for min and max. > template<typename ChecksFunctor> >- bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks); >+ bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks); > > void refineStatically(CallLinkStatus&, Node* callTarget); > // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin, >@@ -151,11 +151,13 @@ private: > // Handle calls. This resolves issues surrounding inlining and intrinsics. > enum Terminality { Terminal, NonTerminal }; > Terminality handleCall( >- int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, >+ VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, > Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus, > SpeculatedType prediction); >- Terminality handleCall(Instruction* pc, NodeType op, CallMode); >- Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode); >+ template<typename CallOp> >+ Terminality handleCall(const Instruction* pc, NodeType op, CallMode); >+ template<typename CallOp> >+ Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode); > void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt); > void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis); > Node* getArgumentCount(); >@@ -163,28 +165,39 @@ private: > bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded); > unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1. > // Handle inlining. Return true if it succeeded, false if we need to plant a call. >- bool handleVarargsInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind); >+ bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind); > unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind); > enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing }; >- CallOptimizationResult handleCallVariant(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee); >- CallOptimizationResult handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); >+ CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee); >+ CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); > template<typename ChecksFunctor> >- void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks); >+ void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks); > // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. > template<typename ChecksFunctor> >- bool handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); >+ bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); > template<typename ChecksFunctor> >- bool handleDOMJITCall(Node* callee, int resultOperand, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); >+ bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); > template<typename ChecksFunctor> >- bool handleIntrinsicGetter(int resultOperand, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks); >+ bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks); > template<typename ChecksFunctor> >- bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks); >+ bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks); > template<typename ChecksFunctor> >- bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks); >+ bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks); > Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value); > Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset); >- bool handleDOMJITGetter(int resultOperand, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction); >- bool handleModuleNamespaceLoad(int resultOperand, SpeculatedType, Node* base, GetByIdStatus); >+ bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction); >+ bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus); >+ >+ template<typename Bytecode> >+ void handlePutByVal(Bytecode); >+ template <typename Bytecode> >+ void handlePutAccessorById(NodeType, Bytecode); >+ template <typename Bytecode> >+ void handlePutAccessorByVal(NodeType, Bytecode); >+ template <typename Bytecode> >+ void handleNewFunc(NodeType, Bytecode); >+ template <typename Bytecode> >+ void handleNewFuncExp(NodeType, Bytecode); > > // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not > // check the validity of the condition, but it may return a null one if it encounters a contradiction. >@@ -204,7 +217,7 @@ private: > Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value); > > void handleGetById( >- int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize); >+ VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize); > void emitPutById( > Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect); > void handlePutById( >@@ -786,7 +799,7 @@ private: > } > > Node* addCall( >- int result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset, >+ VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset, > SpeculatedType prediction) > { > if (op == TailCall) { >@@ -798,9 +811,8 @@ private: > > Node* call = addCallWithoutSettingResult( > op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction)); >- VirtualRegister resultReg(result); >- if (resultReg.isValid()) >- set(resultReg, call); >+ if (result.isValid()) >+ set(result, call); > return call; > } > >@@ -832,8 +844,8 @@ private: > // chain and use its prediction. If we only have > // inlined tail call frames, we use SpecFullTop > // to avoid a spurious OSR exit. >- Instruction* instruction = &m_inlineStackTop->m_profiledBlock->instructions()[bytecodeIndex]; >- OpcodeID opcodeID = Interpreter::getOpcodeID(instruction->u.opcode); >+ auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex); >+ OpcodeID opcodeID = instruction->opcodeID(); > > switch (opcodeID) { > case op_tail_call: >@@ -892,12 +904,12 @@ private: > return getPrediction(m_currentIndex); > } > >- ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action) >+ ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action) > { > ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); >- profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); >- bool makeSafe = profile->outOfBounds(locker); >- return ArrayMode::fromObserved(locker, profile, action, makeSafe); >+ profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); >+ bool makeSafe = profile.outOfBounds(locker); >+ return ArrayMode::fromObserved(locker, &profile, action, makeSafe); > } > > Node* makeSafe(Node* node) >@@ -1145,7 +1157,7 @@ private: > > Vector<DelayedSetLocal, 2> m_setLocalQueue; > >- Instruction* m_currentInstruction; >+ const Instruction* m_currentInstruction; > bool m_hasDebuggerEnabled; > bool m_hasAnyForceOSRExits { false }; > }; >@@ -1196,17 +1208,17 @@ void ByteCodeParser::addJumpTo(unsigned bytecodeIndex) > m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock); > } > >-ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode) >+template<typename CallOp> >+ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode) > { > static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), > "op_call, op_tail_call and op_construct should always have the same length"); > static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), > "op_call, op_tail_call and op_construct should always have the same length"); > >- int result = pc[1].u.operand; >- Node* callTarget = get(VirtualRegister(pc[2].u.operand)); >- int argumentCountIncludingThis = pc[3].u.operand; >- int registerOffset = -pc[4].u.operand; >+ auto bytecode = pc->as<CallOp>(); >+ Node* callTarget = get(bytecode.callee); >+ int registerOffset = -static_cast<int>(bytecode.argv); > > CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( > m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), >@@ -1214,8 +1226,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType > > InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode); > >- return handleCall(result, op, kind, OPCODE_LENGTH(op_call), callTarget, >- argumentCountIncludingThis, registerOffset, callLinkStatus, getPrediction()); >+ return handleCall(bytecode.dst, op, kind, OPCODE_LENGTH(op_call), callTarget, >+ bytecode.argc, registerOffset, callLinkStatus, getPrediction()); > } > > void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget) >@@ -1225,7 +1237,7 @@ void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* call > } > > ByteCodeParser::Terminality ByteCodeParser::handleCall( >- int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, >+ VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, > Node* callTarget, int argumentCountIncludingThis, int registerOffset, > CallLinkStatus callLinkStatus, SpeculatedType prediction) > { >@@ -1257,23 +1269,21 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall( > return callNode->op() == TailCall ? Terminal : NonTerminal; > } > >-ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode) >+template<typename CallOp> >+ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode) > { > static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs), > "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length"); > static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs), > "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length"); > >- int result = pc[1].u.operand; >- int callee = pc[2].u.operand; >- int thisReg = pc[3].u.operand; >- int arguments = pc[4].u.operand; >- int firstFreeReg = pc[5].u.operand; >- int firstVarArgOffset = pc[6].u.operand; >+ auto bytecode = pc->as<CallOp>(); >+ int firstFreeReg = bytecode.firstFree.offset(); >+ int firstVarArgOffset = bytecode.firstVarArg; > > SpeculatedType prediction = getPrediction(); > >- Node* callTarget = get(VirtualRegister(callee)); >+ Node* callTarget = get(bytecode.callee); > > CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( > m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), >@@ -1285,8 +1295,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N > if (callLinkStatus.canOptimize()) { > addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses.addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget); > >- if (handleVarargsInlining(callTarget, result, >- callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), >+ if (handleVarargsInlining(callTarget, bytecode.dst, >+ callLinkStatus, firstFreeReg, bytecode.thisValue, bytecode.arguments, > firstVarArgOffset, op, > InlineCallFrame::varargsKindFor(callMode))) { > if (UNLIKELY(m_graph.compilation())) >@@ -1298,10 +1308,10 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N > CallVarargsData* data = m_graph.m_callVarargsData.add(); > data->firstVarArgOffset = firstVarArgOffset; > >- Node* thisChild = get(VirtualRegister(thisReg)); >+ Node* thisChild = get(bytecode.thisValue); > Node* argumentsChild = nullptr; > if (op != TailCallForwardVarargs) >- argumentsChild = get(VirtualRegister(arguments)); >+ argumentsChild = get(bytecode.arguments); > > if (op == TailCallVarargs || op == TailCallForwardVarargs) { > if (allInlineFramesAreTailCalls()) { >@@ -1312,9 +1322,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N > } > > Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild); >- VirtualRegister resultReg(result); >- if (resultReg.isValid()) >- set(resultReg, call); >+ if (bytecode.dst.isValid()) >+ set(bytecode.dst, call); > return NonTerminal; > } > >@@ -1540,9 +1549,9 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu > } > > template<typename ChecksFunctor> >-void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks) >+void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks) > { >- Instruction* savedCurrentInstruction = m_currentInstruction; >+ const Instruction* savedCurrentInstruction = m_currentInstruction; > CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); > > ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX); >@@ -1573,9 +1582,8 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar > > size_t argumentPositionStart = m_graph.m_argumentPositions.size(); > >- VirtualRegister resultReg(resultOperand); >- if (resultReg.isValid()) >- resultReg = m_inlineStackTop->remapOperand(resultReg); >+ if (result.isValid()) >+ result = m_inlineStackTop->remapOperand(result); > > VariableAccessData* calleeVariable = nullptr; > if (callee.isClosureCall()) { >@@ -1636,7 +1644,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar > // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin. > } > >- InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), resultReg, >+ InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result, > (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock); > > // This is where the actual inlining really happens. >@@ -1684,7 +1692,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar > m_currentInstruction = savedCurrentInstruction; > } > >-ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee) >+ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee) > { > VERBOSE_LOG(" Considering callee ", callee, "\n"); > >@@ -1720,7 +1728,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c > }; > > if (InternalFunction* function = callee.internalFunction()) { >- if (handleConstantInternalFunction(callTargetNode, resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) { >+ if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) { > endSpecialCase(); > return CallOptimizationResult::Inlined; > } >@@ -1730,7 +1738,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c > > Intrinsic intrinsic = callee.intrinsicFor(specializationKind); > if (intrinsic != NoIntrinsic) { >- if (handleIntrinsicCall(callTargetNode, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { >+ if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { > endSpecialCase(); > return CallOptimizationResult::Inlined; > } >@@ -1740,7 +1748,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c > > if (Options::useDOMJIT()) { > if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) { >- if (handleDOMJITCall(callTargetNode, resultOperand, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { >+ if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { > endSpecialCase(); > return CallOptimizationResult::Inlined; > } >@@ -1756,12 +1764,12 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c > if (needsToCheckCallee) > emitFunctionChecks(callee, callTargetNode, thisArgument); > }; >- inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck); >+ inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck); > inliningBalance -= myInliningCost; > return CallOptimizationResult::Inlined; > } > >-bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOperand, >+bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result, > const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument, > VirtualRegister argumentsArgument, unsigned argumentsOffset, > NodeType callOp, InlineCallFrame::Kind kind) >@@ -1873,7 +1881,7 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOpera > // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to > // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without > // calling LoadVarargs twice. >- inlineCall(callTargetNode, resultOperand, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks); >+ inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks); > > VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n"); > return true; >@@ -1890,7 +1898,7 @@ unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus > } > > ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( >- Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, >+ Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus, > int registerOffset, VirtualRegister thisArgument, > int argumentCountIncludingThis, > unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction) >@@ -1905,7 +1913,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > // this in cases where we don't need control flow diamonds to check the callee. > if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) { > return handleCallVariant( >- callTargetNode, resultOperand, callLinkStatus[0], registerOffset, thisArgument, >+ callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument, > argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true); > } > >@@ -1995,7 +2003,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > Node* myCallTargetNode = getDirect(calleeReg); > > auto inliningResult = handleCallVariant( >- myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset, >+ myCallTargetNode, result, callLinkStatus[i], registerOffset, > thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction, > inliningBalance, continuationBlock, false); > >@@ -2032,7 +2040,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > Node* myCallTargetNode = getDirect(calleeReg); > if (couldTakeSlowPath) { > addCall( >- resultOperand, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis, >+ result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis, > registerOffset, prediction); > VERBOSE_LOG("We added a call in the slow path\n"); > } else { >@@ -2040,7 +2048,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > addToGraph(Phantom, myCallTargetNode); > emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); > >- set(VirtualRegister(resultOperand), addToGraph(BottomValue)); >+ set(result, addToGraph(BottomValue)); > VERBOSE_LOG("couldTakeSlowPath was false\n"); > } > >@@ -2065,28 +2073,28 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > } > > template<typename ChecksFunctor> >-bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks) >+bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks) > { > ASSERT(op == ArithMin || op == ArithMax); > > if (argumentCountIncludingThis == 1) { > insertChecks(); >- double result = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(result))))); >+ double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity(); >+ set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit))))); > return true; > } > > if (argumentCountIncludingThis == 2) { > insertChecks(); >- Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); >- addToGraph(Phantom, Edge(result, NumberUse)); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); >+ addToGraph(Phantom, Edge(resultNode, NumberUse)); >+ set(result, resultNode); > return true; > } > > if (argumentCountIncludingThis == 3) { > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); >+ set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); > return true; > } > >@@ -2095,7 +2103,7 @@ bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOf > } > > template<typename ChecksFunctor> >-bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) >+bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) > { > VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n"); > >@@ -2105,7 +2113,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > // o.__defineSetter__("foo", Math.pow) > // > // Which is extremely amusing, but probably not worth optimizing. >- if (!VirtualRegister(resultOperand).isValid()) >+ if (!result.isValid()) > return false; > > switch (intrinsic) { >@@ -2115,7 +2123,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > case AbsIntrinsic: { > if (argumentCountIncludingThis == 1) { // Math.abs() > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > >@@ -2126,15 +2134,15 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset))); > if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) > node->mergeFlags(NodeMayOverflowInt32InDFG); >- set(VirtualRegister(resultOperand), node); >+ set(result, node); > return true; > } > > case MinIntrinsic: >- return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks); >+ return handleMinMax(result, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks); > > case MaxIntrinsic: >- return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks); >+ return handleMinMax(result, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks); > > #define DFG_ARITH_UNARY(capitalizedName, lowerName) \ > case capitalizedName##Intrinsic: >@@ -2143,7 +2151,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > { > if (argumentCountIncludingThis == 1) { > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > Arith::UnaryType type = Arith::UnaryType::Sin; >@@ -2158,7 +2166,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > RELEASE_ASSERT_NOT_REACHED(); > } > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2166,7 +2174,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > case SqrtIntrinsic: { > if (argumentCountIncludingThis == 1) { > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > >@@ -2182,7 +2190,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > RELEASE_ASSERT_NOT_REACHED(); > } > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2190,13 +2198,13 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (argumentCountIncludingThis < 3) { > // Math.pow() and Math.pow(x) return NaN. > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > insertChecks(); > VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset); > VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset); >- set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand))); >+ set(result, addToGraph(ArithPow, get(xOperand), get(yOperand))); > return true; > } > >@@ -2211,7 +2219,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX) > return false; > >- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile, Array::Write); >+ ArrayMode arrayMode = getArrayMode(m_currentInstruction->as<OpCall>().metadata(m_codeBlock).arrayProfile, Array::Write); > if (!arrayMode.isJSArray()) > return false; > switch (arrayMode.type()) { >@@ -2225,7 +2233,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > for (int i = 0; i < argumentCountIncludingThis; ++i) > addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); > Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction)); >- set(VirtualRegister(resultOperand), arrayPush); >+ set(result, arrayPush); > > return true; > } >@@ -2249,7 +2257,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) > return false; > >- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile, Array::Read); >+ ArrayMode arrayMode = getArrayMode(m_currentInstruction->as<OpCall>().metadata(m_codeBlock).arrayProfile, Array::Write); > if (!arrayMode.isJSArray()) > return false; > >@@ -2314,7 +2322,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addVarArgChild(addToGraph(GetButterfly, array)); > > Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo()); >- set(VirtualRegister(resultOperand), arraySlice); >+ set(result, arraySlice); > return true; > } > >@@ -2338,7 +2346,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) > return false; > >- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile, Array::Read); >+ ArrayMode arrayMode = getArrayMode(m_currentInstruction->as<OpCall>().metadata(m_codeBlock).arrayProfile, Array::Write); > if (!arrayMode.isJSArray()) > return false; > >@@ -2379,7 +2387,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addVarArgChild(nullptr); > > Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo()); >- set(VirtualRegister(resultOperand), node); >+ set(result, node); > return true; > } > >@@ -2398,7 +2406,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (argumentCountIncludingThis != 1) > return false; > >- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile, Array::Write); >+ ArrayMode arrayMode = getArrayMode(m_currentInstruction->as<OpCall>().metadata(m_codeBlock).arrayProfile, Array::Write); > if (!arrayMode.isJSArray()) > return false; > switch (arrayMode.type()) { >@@ -2408,7 +2416,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > case Array::ArrayStorage: { > insertChecks(); > Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset))); >- set(VirtualRegister(resultOperand), arrayPop); >+ set(result, arrayPop); > return true; > } > >@@ -2491,19 +2499,19 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > for (unsigned i = 0; i < numArgs; ++i) > args.append(get(virtualRegisterForArgument(1 + i, registerOffset))); > >- Node* result; >+ Node* resultNode; > if (numArgs + 1 <= 3) { > while (args.size() < 3) > args.append(nullptr); >- result = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]); >+ resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]); > } else { > for (Node* node : args) > addVarArgChild(node); > addVarArgChild(nullptr); >- result = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction)); >+ resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction)); > } > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -2524,7 +2532,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset); > parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand)); > } >- set(VirtualRegister(resultOperand), parseInt); >+ set(result, parseInt); > return true; > } > >@@ -2537,7 +2545,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); > Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand)); > >- set(VirtualRegister(resultOperand), charCode); >+ set(result, charCode); > return true; > } > >@@ -2550,16 +2558,16 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); > Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand)); > >- set(VirtualRegister(resultOperand), charCode); >+ set(result, charCode); > return true; > } > case Clz32Intrinsic: { > insertChecks(); > if (argumentCountIncludingThis == 1) >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32))))); >+ set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32))))); > else { > Node* operand = get(virtualRegisterForArgument(1, registerOffset)); >- set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand)); >+ set(result, addToGraph(ArithClz32, operand)); > } > return true; > } >@@ -2571,7 +2579,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); > Node* charCode = addToGraph(StringFromCharCode, get(indexOperand)); > >- set(VirtualRegister(resultOperand), charCode); >+ set(result, charCode); > > return true; > } >@@ -2582,7 +2590,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); >- set(VirtualRegister(resultOperand), regExpExec); >+ set(result, regExpExec); > > return true; > } >@@ -2633,7 +2641,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset)); > Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset))); >- set(VirtualRegister(resultOperand), regExpExec); >+ set(result, regExpExec); > > return true; > } >@@ -2643,7 +2651,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); >- set(VirtualRegister(resultOperand), regExpMatch); >+ set(result, regExpMatch); > return true; > } > >@@ -2652,7 +2660,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2661,7 +2669,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2670,7 +2678,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); >+ set(result, addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); > return true; > } > >@@ -2679,7 +2687,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse))); >+ set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse))); > return true; > } > >@@ -2687,7 +2695,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > ASSERT(argumentCountIncludingThis == 2); > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2738,8 +2746,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > >- Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); >+ set(result, resultNode); > return true; > } > >@@ -2748,8 +2756,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- Node* result = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); >+ set(result, resultNode); > return true; > } > >@@ -2759,7 +2767,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > case TruncIntrinsic: { > if (argumentCountIncludingThis == 1) { > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > insertChecks(); >@@ -2776,7 +2784,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > op = ArithTrunc; > } > Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand); >- set(VirtualRegister(resultOperand), roundNode); >+ set(result, roundNode); > return true; > } > case IMulIntrinsic: { >@@ -2787,7 +2795,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset); > Node* left = get(leftOperand); > Node* right = get(rightOperand); >- set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right)); >+ set(result, addToGraph(ArithIMul, left, right)); > return true; > } > >@@ -2795,32 +2803,32 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (argumentCountIncludingThis != 1) > return false; > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(ArithRandom)); >+ set(result, addToGraph(ArithRandom)); > return true; > } > > case DFGTrueIntrinsic: { > insertChecks(); >- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true))); >+ set(result, jsConstant(jsBoolean(true))); > return true; > } > > case FTLTrueIntrinsic: { > insertChecks(); >- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(isFTL(m_graph.m_plan.mode)))); >+ set(result, jsConstant(jsBoolean(isFTL(m_graph.m_plan.mode)))); > return true; > } > > case OSRExitIntrinsic: { > insertChecks(); > addToGraph(ForceOSRExit); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantUndefined))); > return true; > } > > case IsFinalTierIntrinsic: { > insertChecks(); >- set(VirtualRegister(resultOperand), >+ set(result, > jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true))); > return true; > } >@@ -2832,7 +2840,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (node->hasHeapPrediction()) > node->setHeapPrediction(SpecInt32Only); > } >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantUndefined))); > return true; > } > >@@ -2842,7 +2850,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* node = get(virtualRegisterForArgument(i, registerOffset)); > addToGraph(Phantom, Edge(node, Int32Use)); > } >- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true))); >+ set(result, jsConstant(jsBoolean(true))); > return true; > } > >@@ -2852,9 +2860,9 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); > if (enableInt52()) >- set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand))); >+ set(result, addToGraph(FiatInt52, get(operand))); > else >- set(VirtualRegister(resultOperand), get(operand)); >+ set(result, get(operand)); > return true; > } > >@@ -2868,8 +2876,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* normalizedKey = addToGraph(NormalizeMapKey, key); > Node* hash = addToGraph(MapHash, normalizedKey); > Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash)); >- Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); >+ set(result, resultNode); > return true; > } > >@@ -2893,8 +2901,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > FrozenValue* frozenPointer = m_graph.freeze(sentinel); > Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket); >- Node* result = addToGraph(LogicalNot, invertedResult); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(LogicalNot, invertedResult); >+ set(result, resultNode); > return true; > } > >@@ -2908,7 +2916,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* normalizedKey = addToGraph(NormalizeMapKey, key); > Node* hash = addToGraph(MapHash, normalizedKey); > addToGraph(SetAdd, base, normalizedKey, hash); >- set(VirtualRegister(resultOperand), base); >+ set(result, base); > return true; > } > >@@ -2929,7 +2937,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addVarArgChild(value); > addVarArgChild(hash); > addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0)); >- set(VirtualRegister(resultOperand), base); >+ set(result, base); > return true; > } > >@@ -2940,8 +2948,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* map = get(virtualRegisterForArgument(1, registerOffset)); > UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse; >- Node* result = addToGraph(GetMapBucketHead, Edge(map, useKind)); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind)); >+ set(result, resultNode); > return true; > } > >@@ -2952,8 +2960,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); > BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map; >- Node* result = addToGraph(GetMapBucketNext, OpInfo(type), bucket); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket); >+ set(result, resultNode); > return true; > } > >@@ -2964,8 +2972,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); > BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map; >- Node* result = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket); >+ set(result, resultNode); > return true; > } > >@@ -2974,8 +2982,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); >- Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); >+ set(result, resultNode); > return true; > } > >@@ -2992,9 +3000,9 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addToGraph(Check, Edge(key, ObjectUse)); > Node* hash = addToGraph(MapHash, key); > Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); >- Node* result = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder); >+ Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder); > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -3012,9 +3020,9 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* hash = addToGraph(MapHash, key); > Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); > Node* invertedResult = addToGraph(IsEmpty, holder); >- Node* result = addToGraph(LogicalNot, invertedResult); >+ Node* resultNode = addToGraph(LogicalNot, invertedResult); > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -3032,9 +3040,9 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* hash = addToGraph(MapHash, key); > Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); > Node* invertedResult = addToGraph(IsEmpty, holder); >- Node* result = addToGraph(LogicalNot, invertedResult); >+ Node* resultNode = addToGraph(LogicalNot, invertedResult); > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -3051,7 +3059,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addToGraph(Check, Edge(key, ObjectUse)); > Node* hash = addToGraph(MapHash, key); > addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); >- set(VirtualRegister(resultOperand), base); >+ set(result, base); > return true; > } > >@@ -3075,7 +3083,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addVarArgChild(Edge(value)); > addVarArgChild(Edge(hash, Int32Use)); > addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0)); >- set(VirtualRegister(resultOperand), base); >+ set(result, base); > return true; > } > >@@ -3094,8 +3102,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* object = get(virtualRegisterForArgument(0, registerOffset)); > Node* key = get(virtualRegisterForArgument(1, registerOffset)); >- Node* result = addToGraph(HasOwnProperty, object, key); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(HasOwnProperty, object, key); >+ set(result, resultNode); > return true; > } > >@@ -3112,8 +3120,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* end = nullptr; > if (argumentCountIncludingThis > 2) > end = get(virtualRegisterForArgument(2, registerOffset)); >- Node* result = addToGraph(StringSlice, thisString, start, end); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(StringSlice, thisString, start, end); >+ set(result, resultNode); > return true; > } > >@@ -3126,8 +3134,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* thisString = get(virtualRegisterForArgument(0, registerOffset)); >- Node* result = addToGraph(ToLowerCase, thisString); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(ToLowerCase, thisString); >+ set(result, resultNode); > return true; > } > >@@ -3141,12 +3149,12 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset)); > if (argumentCountIncludingThis == 1) { >- Node* result = addToGraph(ToString, thisNumber); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(ToString, thisNumber); >+ set(result, resultNode); > } else { > Node* radix = get(virtualRegisterForArgument(1, registerOffset)); >- Node* result = addToGraph(NumberToStringWithRadix, thisNumber, radix); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix); >+ set(result, resultNode); > } > return true; > } >@@ -3157,8 +3165,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* input = get(virtualRegisterForArgument(1, registerOffset)); >- Node* result = addToGraph(NumberIsInteger, input); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(NumberIsInteger, input); >+ set(result, resultNode); > return true; > } > >@@ -3170,7 +3178,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (!isFTL(m_graph.m_plan.mode)) > return false; > insertChecks(); >- set(VirtualRegister(resultOperand), >+ set(result, > addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo())); > return true; > #else >@@ -3185,7 +3193,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > } > > template<typename ChecksFunctor> >-bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) >+bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) > { > if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount)) > return false; >@@ -3198,13 +3206,13 @@ bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const > ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments."); > > insertChecks(); >- addCall(resultOperand, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction); >+ addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction); > return true; > } > > > template<typename ChecksFunctor> >-bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks) >+bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks) > { > switch (variant.intrinsic()) { > case TypedArrayByteLengthIntrinsic: { >@@ -3224,14 +3232,14 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre > Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode); > > if (!logSize) { >- set(VirtualRegister(resultOperand), lengthNode); >+ set(result, lengthNode); > return true; > } > > // We can use a BitLShift here because typed arrays will never have a byteLength > // that overflows int32. > Node* shiftNode = jsConstant(jsNumber(logSize)); >- set(VirtualRegister(resultOperand), addToGraph(BitLShift, lengthNode, shiftNode)); >+ set(result, addToGraph(BitLShift, lengthNode, shiftNode)); > > return true; > } >@@ -3248,7 +3256,7 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre > ASSERT(arrayType != Array::Generic); > }); > >- set(VirtualRegister(resultOperand), addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); >+ set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); > > return true; > >@@ -3266,7 +3274,7 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre > ASSERT(arrayType != Array::Generic); > }); > >- set(VirtualRegister(resultOperand), addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); >+ set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); > > return true; > } >@@ -3297,11 +3305,11 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre > // OK, only one prototype is found. We perform constant folding here. > // This information is important for super's constructor call to get new.target constant. > if (prototype && canFold) { >- set(VirtualRegister(resultOperand), weakJSConstant(prototype)); >+ set(result, weakJSConstant(prototype)); > return true; > } > >- set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode)); >+ set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode)); > return true; > } > >@@ -3318,7 +3326,7 @@ static void blessCallDOMGetter(Node* node) > node->clearFlags(NodeMustGenerate); > } > >-bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction) >+bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction) > { > if (!variant.domAttribute()) > return false; >@@ -3358,11 +3366,11 @@ bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant& > } else > callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode); > blessCallDOMGetter(callDOMGetterNode); >- set(VirtualRegister(resultOperand), callDOMGetterNode); >+ set(result, callDOMGetterNode); > return true; > } > >-bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType prediction, Node* base, GetByIdStatus getById) >+bool ByteCodeParser::handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType prediction, Node* base, GetByIdStatus getById) > { > if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) > return false; >@@ -3385,16 +3393,16 @@ bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType > // would recompile. But if we can fold it here, we avoid the exit. > m_graph.freeze(getById.moduleEnvironment()); > if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) { >- set(VirtualRegister(resultOperand), weakJSConstant(value)); >+ set(result, weakJSConstant(value)); > return true; > } >- set(VirtualRegister(resultOperand), addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment()))); >+ set(result, addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment()))); > return true; > } > > template<typename ChecksFunctor> > bool ByteCodeParser::handleTypedArrayConstructor( >- int resultOperand, InternalFunction* function, int registerOffset, >+ VirtualRegister result, InternalFunction* function, int registerOffset, > int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks) > { > if (!isTypedView(type)) >@@ -3444,21 +3452,21 @@ bool ByteCodeParser::handleTypedArrayConstructor( > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), >+ set(result, > addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > > template<typename ChecksFunctor> > bool ByteCodeParser::handleConstantInternalFunction( >- Node* callTargetNode, int resultOperand, InternalFunction* function, int registerOffset, >+ Node* callTargetNode, VirtualRegister result, InternalFunction* function, int registerOffset, > int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks) > { > VERBOSE_LOG(" Handling constant internal function ", JSValue(function), "\n"); > > // It so happens that the code below assumes that the result operand is valid. It's extremely > // unlikely that the result operand would be invalid - you'd have to call this via a setter call. >- if (!VirtualRegister(resultOperand).isValid()) >+ if (!result.isValid()) > return false; > > if (kind == CodeForConstruct) { >@@ -3476,14 +3484,14 @@ bool ByteCodeParser::handleConstantInternalFunction( > > insertChecks(); > if (argumentCountIncludingThis == 2) { >- set(VirtualRegister(resultOperand), >+ set(result, > addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > > for (int i = 1; i < argumentCountIncludingThis; ++i) > addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); >- set(VirtualRegister(resultOperand), >+ set(result, > addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(argumentCountIncludingThis - 1))); > return true; > } >@@ -3494,9 +3502,9 @@ bool ByteCodeParser::handleConstantInternalFunction( > > insertChecks(); > if (argumentCountIncludingThis <= 1) >- set(VirtualRegister(resultOperand), jsConstant(jsNumber(0))); >+ set(result, jsConstant(jsNumber(0))); > else >- set(VirtualRegister(resultOperand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); > > return true; > } >@@ -3504,17 +3512,17 @@ bool ByteCodeParser::handleConstantInternalFunction( > if (function->classInfo() == StringConstructor::info()) { > insertChecks(); > >- Node* result; >+ Node* resultNode; > > if (argumentCountIncludingThis <= 1) >- result = jsConstant(m_vm->smallStrings.emptyString()); >+ resultNode = jsConstant(m_vm->smallStrings.emptyString()); > else >- result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset))); >+ resultNode = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset))); > > if (kind == CodeForConstruct) >- result = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), result); >+ resultNode = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), resultNode); > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -3522,20 +3530,20 @@ bool ByteCodeParser::handleConstantInternalFunction( > if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) { > insertChecks(); > >- Node* result; >+ Node* resultNode; > if (argumentCountIncludingThis <= 1) >- result = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor()))); >+ resultNode = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor()))); > else >- result = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))); >- set(VirtualRegister(resultOperand), result); >+ resultNode = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))); >+ set(result, resultNode); > return true; > } > > for (unsigned typeIndex = 0; typeIndex < NumberOfTypedArrayTypes; ++typeIndex) { >- bool result = handleTypedArrayConstructor( >- resultOperand, function, registerOffset, argumentCountIncludingThis, >+ bool handled = handleTypedArrayConstructor( >+ result, function, registerOffset, argumentCountIncludingThis, > indexToTypedArrayType(typeIndex), insertChecks); >- if (result) >+ if (handled) > return true; > } > >@@ -3968,7 +3976,7 @@ Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVarian > } > > void ByteCodeParser::handleGetById( >- int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber, >+ VirtualRegister destination, SpeculatedType prediction, Node* base, unsigned identifierNumber, > GetByIdStatus getByIdStatus, AccessType type, unsigned instructionSize) > { > // Attempt to reduce the set of things in the GetByIdStatus. >@@ -3996,7 +4004,7 @@ void ByteCodeParser::handleGetById( > getById = getByIdStatus.makesCalls() ? GetByIdDirectFlush : GetByIdDirect; > > if (getById != TryGetById && getByIdStatus.isModuleNamespace()) { >- if (handleModuleNamespaceLoad(destinationOperand, prediction, base, getByIdStatus)) { >+ if (handleModuleNamespaceLoad(destination, prediction, base, getByIdStatus)) { > if (UNLIKELY(m_graph.compilation())) > m_graph.compilation()->noticeInlinedGetById(); > return; >@@ -4010,7 +4018,7 @@ void ByteCodeParser::handleGetById( > ASSERT(!getByIdStatus.makesCalls()); > GetByIdVariant variant = getByIdStatus[0]; > ASSERT(variant.domAttribute()); >- if (handleDOMJITGetter(destinationOperand, variant, base, identifierNumber, prediction)) { >+ if (handleDOMJITGetter(destination, variant, base, identifierNumber, prediction)) { > if (UNLIKELY(m_graph.compilation())) > m_graph.compilation()->noticeInlinedGetById(); > return; >@@ -4019,7 +4027,7 @@ void ByteCodeParser::handleGetById( > > ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !getByIdStatus.makesCalls()); > if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4032,7 +4040,7 @@ void ByteCodeParser::handleGetById( > if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode) > || !Options::usePolymorphicAccessInlining() > || getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4046,7 +4054,7 @@ void ByteCodeParser::handleGetById( > // of checks and those checks are not watchpointable. > for (const GetByIdVariant& variant : getByIdStatus.variants()) { > if (variant.intrinsic() != NoIntrinsic) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4061,7 +4069,7 @@ void ByteCodeParser::handleGetById( > > GetByOffsetMethod method = planLoad(variant.conditionSet()); > if (!method) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4076,7 +4084,7 @@ void ByteCodeParser::handleGetById( > MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); > data->cases = cases; > data->identifierNumber = identifierNumber; >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base)); > return; > } >@@ -4088,7 +4096,7 @@ void ByteCodeParser::handleGetById( > > Node* loadedValue = load(prediction, base, identifierNumber, variant); > if (!loadedValue) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4098,13 +4106,13 @@ void ByteCodeParser::handleGetById( > > ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !variant.callLinkStatus()); > if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) { >- set(VirtualRegister(destinationOperand), loadedValue); >+ set(destination, loadedValue); > return; > } > > Node* getter = addToGraph(GetGetter, loadedValue); > >- if (handleIntrinsicGetter(destinationOperand, prediction, variant, base, >+ if (handleIntrinsicGetter(destination, prediction, variant, base, > [&] () { > addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter); > })) { >@@ -4150,7 +4158,7 @@ void ByteCodeParser::handleGetById( > addToGraph(ExitOK); > > handleCall( >- destinationOperand, Call, InlineCallFrame::GetterCall, instructionSize, >+ destination, Call, InlineCallFrame::GetterCall, instructionSize, > getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction); > } > >@@ -4338,7 +4346,7 @@ void ByteCodeParser::handlePutById( > addToGraph(ExitOK); > > handleCall( >- VirtualRegister().offset(), Call, InlineCallFrame::SetterCall, >+ VirtualRegister(), Call, InlineCallFrame::SetterCall, > OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset, > *variant.callLinkStatus(), SpecOther); > return; >@@ -4376,7 +4384,7 @@ static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutI > // Doesn't allow using `continue`. > #define NEXT_OPCODE(name) \ > if (true) { \ >- m_currentIndex += OPCODE_LENGTH(name); \ >+ m_currentIndex += currentInstruction->size(); \ > goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \ > } else \ > WTF_CONCAT(NEXT_OPCODE_, __LINE__): \ >@@ -4406,7 +4414,7 @@ static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutI > > void ByteCodeParser::parseBlock(unsigned limit) > { >- Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin(); >+ auto& instructions = m_inlineStackTop->m_codeBlock->instructions(); > unsigned blockBegin = m_currentIndex; > > // If we are the first basic block, introduce markers for arguments. This allows >@@ -4458,9 +4466,9 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > // Switch on the current bytecode opcode. >- Instruction* currentInstruction = instructionsBegin + m_currentIndex; >+ const Instruction* currentInstruction = instructions.at(m_currentIndex).ptr(); > m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls. >- OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode); >+ OpcodeID opcodeID = currentInstruction->opcodeID(); > > VERBOSE_LOG(" parsing ", currentCodeOrigin(), ": ", opcodeID, "\n"); > >@@ -4485,8 +4493,9 @@ void ByteCodeParser::parseBlock(unsigned limit) > case op_to_this: { > Node* op1 = getThis(); > if (op1->op() != ToThis) { >- Structure* cachedStructure = currentInstruction[2].u.structure.get(); >- if (currentInstruction[3].u.toThisStatus != ToThisOK >+ auto metadata = currentInstruction->as<OpToThis>().metadata(m_codeBlock); >+ Structure* cachedStructure = metadata.cachedStructure.get(); >+ if (metadata.toThisStatus != ToThisOK > || !cachedStructure > || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis > || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) >@@ -4504,12 +4513,12 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_create_this: { >- auto& bytecode = *reinterpret_cast<OpCreateThis*>(currentInstruction); >- Node* callee = get(VirtualRegister(bytecode.callee())); >+ auto bytecode = currentInstruction->as<OpCreateThis>(); >+ Node* callee = get(VirtualRegister(bytecode.callee)); > > JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm); > if (!function) { >- JSCell* cachedFunction = bytecode.cachedCallee().unvalidatedGet(); >+ JSCell* cachedFunction = bytecode.metadata(m_codeBlock).cachedCallee.unvalidatedGet(); > if (cachedFunction > && cachedFunction != JSCell::seenMultipleCalleeObjects() > && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { >@@ -4548,243 +4557,256 @@ void ByteCodeParser::parseBlock(unsigned limit) > ASSERT(isInlineOffset(knownPolyProtoOffset)); > addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype)); > } >- set(VirtualRegister(bytecode.dst()), object); >+ set(VirtualRegister(bytecode.dst), object); > alreadyEmitted = true; > } > } > } > } > if (!alreadyEmitted) { >- set(VirtualRegister(bytecode.dst()), >- addToGraph(CreateThis, OpInfo(bytecode.inlineCapacity()), callee)); >+ set(VirtualRegister(bytecode.dst), >+ addToGraph(CreateThis, OpInfo(bytecode.inlineCapacity), callee)); > } > NEXT_OPCODE(op_create_this); > } > > case op_new_object: { >- set(VirtualRegister(currentInstruction[1].u.operand), >+ auto bytecode = currentInstruction->as<OpNewObject>(); >+ set(bytecode.dst, > addToGraph(NewObject, >- OpInfo(m_graph.registerStructure(currentInstruction[3].u.objectAllocationProfile->structure())))); >+ OpInfo(m_graph.registerStructure(bytecode.metadata(m_codeBlock).allocationProfile.structure())))); > NEXT_OPCODE(op_new_object); > } > > case op_new_array: { >- int startOperand = currentInstruction[2].u.operand; >- int numOperands = currentInstruction[3].u.operand; >- ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile; >+ auto bytecode = currentInstruction->as<OpNewArray>(); >+ int startOperand = bytecode.argv.offset(); >+ int numOperands = bytecode.argc; >+ ArrayAllocationProfile& profile = bytecode.metadata(m_codeBlock).allocationProfile; > for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) > addVarArgChild(get(VirtualRegister(operandIdx))); >- unsigned vectorLengthHint = std::max<unsigned>(profile->vectorLengthHint(), numOperands); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(vectorLengthHint))); >+ unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHint(), numOperands); >+ set(bytecode.dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingType()), OpInfo(vectorLengthHint))); > NEXT_OPCODE(op_new_array); > } > > case op_new_array_with_spread: { >- int startOperand = currentInstruction[2].u.operand; >- int numOperands = currentInstruction[3].u.operand; >- const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(currentInstruction[4].u.unsignedValue); >+ auto bytecode = currentInstruction->as<OpNewArrayWithSpread>(); >+ int startOperand = bytecode.argv.offset(); >+ int numOperands = bytecode.argc; >+ const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.bitVector); > for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) > addVarArgChild(get(VirtualRegister(operandIdx))); > > BitVector* copy = m_graph.m_bitVectors.add(bitVector); > ASSERT(*copy == bitVector); > >- set(VirtualRegister(currentInstruction[1].u.operand), >+ set(bytecode.dst, > addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy))); > NEXT_OPCODE(op_new_array_with_spread); > } > > case op_spread: { >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(Spread, get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpSpread>(); >+ set(bytecode.dst, >+ addToGraph(Spread, get(bytecode.argument))); > NEXT_OPCODE(op_spread); > } > > case op_new_array_with_size: { >- int lengthOperand = currentInstruction[2].u.operand; >- ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile; >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand)))); >+ auto bytecode = currentInstruction->as<OpNewArrayWithSize>(); >+ ArrayAllocationProfile& profile = bytecode.metadata(m_codeBlock).allocationProfile; >+ set(bytecode.dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingType()), get(bytecode.length))); > NEXT_OPCODE(op_new_array_with_size); > } > > case op_new_array_buffer: { >- auto& bytecode = *reinterpret_cast<OpNewArrayBuffer*>(currentInstruction); >+ auto bytecode = currentInstruction->as<OpNewArrayBuffer>(); > // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we > // cannot allocate from compilation threads. > WTF::loadLoadFence(); >- FrozenValue* frozen = get(VirtualRegister(bytecode.immutableButterfly()))->constant(); >+ FrozenValue* frozen = get(VirtualRegister(bytecode.immutableButterfly))->constant(); > WTF::loadLoadFence(); > JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>(); > NewArrayBufferData data { }; > data.indexingMode = immutableButterfly->indexingMode(); > data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength(); > >- set(VirtualRegister(bytecode.dst()), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord))); >+ set(VirtualRegister(bytecode.dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord))); > NEXT_OPCODE(op_new_array_buffer); > } > > case op_new_regexp: { >- VirtualRegister regExpRegister(currentInstruction[2].u.operand); >- ASSERT(regExpRegister.isConstant()); >- FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(regExpRegister.offset())); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0)))); >+ auto bytecode = currentInstruction->as<OpNewRegexp>(); >+ ASSERT(bytecode.regexp.isConstant()); >+ FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.regexp.offset())); >+ set(bytecode.dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0)))); > NEXT_OPCODE(op_new_regexp); > } > > case op_get_rest_length: { >+ auto bytecode = currentInstruction->as<OpGetRestLength>(); > InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); > Node* length; > if (inlineCallFrame && !inlineCallFrame->isVarargs()) { > unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1; >- unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue; > JSValue restLength; >- if (argumentsLength <= numParamsToSkip) >+ if (argumentsLength <= bytecode.numParametersToSkip) > restLength = jsNumber(0); > else >- restLength = jsNumber(argumentsLength - numParamsToSkip); >+ restLength = jsNumber(argumentsLength - bytecode.numParametersToSkip); > > length = jsConstant(restLength); > } else >- length = addToGraph(GetRestLength, OpInfo(currentInstruction[2].u.unsignedValue)); >- set(VirtualRegister(currentInstruction[1].u.operand), length); >+ length = addToGraph(GetRestLength, OpInfo(bytecode.numParametersToSkip)); >+ set(bytecode.dst, length); > NEXT_OPCODE(op_get_rest_length); > } > > case op_create_rest: { >+ auto bytecode = currentInstruction->as<OpCreateRest>(); > noticeArgumentsUse(); >- Node* arrayLength = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(CreateRest, OpInfo(currentInstruction[3].u.unsignedValue), arrayLength)); >+ Node* arrayLength = get(bytecode.arraySize); >+ set(bytecode.dst, >+ addToGraph(CreateRest, OpInfo(bytecode.numParametersToSkip), arrayLength)); > NEXT_OPCODE(op_create_rest); > } > > // === Bitwise operations === > > case op_bitand: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBitand>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitAnd, op1, op2)); > NEXT_OPCODE(op_bitand); > } > > case op_bitor: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBitor>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitOr, op1, op2)); > NEXT_OPCODE(op_bitor); > } > > case op_bitxor: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBitxor>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitXor, op1, op2)); > NEXT_OPCODE(op_bitxor); > } > > case op_rshift: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(BitRShift, op1, op2)); >+ auto bytecode = currentInstruction->as<OpRshift>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitRShift, op1, op2)); > NEXT_OPCODE(op_rshift); > } > > case op_lshift: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(BitLShift, op1, op2)); >+ auto bytecode = currentInstruction->as<OpLshift>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitLShift, op1, op2)); > NEXT_OPCODE(op_lshift); > } > > case op_urshift: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(BitURShift, op1, op2)); >+ auto bytecode = currentInstruction->as<OpUrshift>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitURShift, op1, op2)); > NEXT_OPCODE(op_urshift); > } > > case op_unsigned: { >- set(VirtualRegister(currentInstruction[1].u.operand), >- makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand))))); >+ auto bytecode = currentInstruction->as<OpUnsigned>(); >+ set(bytecode.dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.operand)))); > NEXT_OPCODE(op_unsigned); > } > > // === Increment/Decrement opcodes === > > case op_inc: { >- int srcDst = currentInstruction[1].u.operand; >- VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); >- Node* op = get(srcDstVirtualRegister); >- set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); >+ auto bytecode = currentInstruction->as<OpInc>(); >+ Node* op = get(bytecode.srcDst); >+ set(bytecode.srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); > NEXT_OPCODE(op_inc); > } > > case op_dec: { >- int srcDst = currentInstruction[1].u.operand; >- VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); >- Node* op = get(srcDstVirtualRegister); >- set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); >+ auto bytecode = currentInstruction->as<OpDec>(); >+ Node* op = get(bytecode.srcDst); >+ set(bytecode.srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); > NEXT_OPCODE(op_dec); > } > > // === Arithmetic operations === > > case op_add: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpAdd>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > if (op1->hasNumberResult() && op2->hasNumberResult()) >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2))); >+ set(bytecode.dst, makeSafe(addToGraph(ArithAdd, op1, op2))); > else >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2))); >+ set(bytecode.dst, makeSafe(addToGraph(ValueAdd, op1, op2))); > NEXT_OPCODE(op_add); > } > > case op_sub: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2))); >+ auto bytecode = currentInstruction->as<OpSub>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, makeSafe(addToGraph(ArithSub, op1, op2))); > NEXT_OPCODE(op_sub); > } > > case op_negate: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpNegate>(); >+ Node* op1 = get(VirtualRegister(bytecode.operand)); > if (op1->hasNumberResult()) >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1))); >+ set(bytecode.dst, makeSafe(addToGraph(ArithNegate, op1))); > else >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueNegate, op1))); >+ set(bytecode.dst, makeSafe(addToGraph(ValueNegate, op1))); > NEXT_OPCODE(op_negate); > } > > case op_mul: { > // Multiply requires that the inputs are not truncated, unfortunately. >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2))); >+ auto bytecode = currentInstruction->as<OpMul>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, makeSafe(addToGraph(ArithMul, op1, op2))); > NEXT_OPCODE(op_mul); > } > > case op_mod: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2))); >+ auto bytecode = currentInstruction->as<OpMod>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, makeSafe(addToGraph(ArithMod, op1, op2))); > NEXT_OPCODE(op_mod); > } > > case op_pow: { > // FIXME: ArithPow(Untyped, Untyped) should be supported as the same to ArithMul, ArithSub etc. > // https://bugs.webkit.org/show_bug.cgi?id=160012 >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithPow, op1, op2)); >+ auto bytecode = currentInstruction->as<OpPow>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(ArithPow, op1, op2)); > NEXT_OPCODE(op_pow); > } > > case op_div: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2))); >+ auto bytecode = currentInstruction->as<OpDiv>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, makeDivSafe(addToGraph(ArithDiv, op1, op2))); > NEXT_OPCODE(op_div); > } > >@@ -4798,43 +4820,46 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_mov: { >- Node* op = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), op); >+ auto bytecode = currentInstruction->as<OpMov>(); >+ Node* op = get(bytecode.src); >+ set(bytecode.dst, op); > NEXT_OPCODE(op_mov); > } > > case op_check_tdz: { >- addToGraph(CheckNotEmpty, get(VirtualRegister(currentInstruction[1].u.operand))); >+ auto bytecode = currentInstruction->as<OpCheckTdz>(); >+ addToGraph(CheckNotEmpty, get(bytecode.target)); > NEXT_OPCODE(op_check_tdz); > } > > case op_overrides_has_instance: { >- auto& bytecode = *reinterpret_cast<OpOverridesHasInstance*>(currentInstruction); >+ auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); > JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction(); > >- Node* constructor = get(VirtualRegister(bytecode.constructor())); >- Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue())); >+ Node* constructor = get(VirtualRegister(bytecode.constructor)); >+ Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue)); > >- set(VirtualRegister(bytecode.dst()), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue)); >+ set(VirtualRegister(bytecode.dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue)); > NEXT_OPCODE(op_overrides_has_instance); > } > > case op_identity_with_profile: { >- Node* src = get(VirtualRegister(currentInstruction[1].u.operand)); >- SpeculatedType speculation = static_cast<SpeculatedType>(currentInstruction[2].u.operand) << 32 | static_cast<SpeculatedType>(currentInstruction[3].u.operand); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IdentityWithProfile, OpInfo(speculation), src)); >+ auto bytecode = currentInstruction->as<OpIdentityWithProfile>(); >+ Node* srcDst = get(bytecode.srcDst); >+ SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.topProfile) << 32 | static_cast<SpeculatedType>(bytecode.bottomProfile); >+ set(bytecode.srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst)); > NEXT_OPCODE(op_identity_with_profile); > } > > case op_instanceof: { >- auto& bytecode = *reinterpret_cast<OpInstanceof*>(currentInstruction); >+ auto bytecode = currentInstruction->as<OpInstanceof>(); > > InstanceOfStatus status = InstanceOfStatus::computeFor( > m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap, > m_currentIndex); > >- Node* value = get(VirtualRegister(bytecode.value())); >- Node* prototype = get(VirtualRegister(bytecode.prototype())); >+ Node* value = get(bytecode.value); >+ Node* prototype = get(bytecode.prototype); > > // Only inline it if it's Simple with a commonPrototype; bottom/top or variable > // prototypes both get handled by the IC. This makes sense for bottom (unprofiled) >@@ -4862,86 +4887,96 @@ void ByteCodeParser::parseBlock(unsigned limit) > > if (allOK) { > Node* match = addToGraph(MatchStructure, OpInfo(data), value); >- set(VirtualRegister(bytecode.dst()), match); >+ set(bytecode.dst, match); > NEXT_OPCODE(op_instanceof); > } > } > >- set(VirtualRegister(bytecode.dst()), addToGraph(InstanceOf, value, prototype)); >+ set(bytecode.dst, addToGraph(InstanceOf, value, prototype)); > NEXT_OPCODE(op_instanceof); > } > > case op_instanceof_custom: { >- auto& bytecode = *reinterpret_cast<OpInstanceofCustom*>(currentInstruction); >- Node* value = get(VirtualRegister(bytecode.value())); >- Node* constructor = get(VirtualRegister(bytecode.constructor())); >- Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue())); >- set(VirtualRegister(bytecode.dst()), addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue)); >+ auto bytecode = currentInstruction->as<OpInstanceofCustom>(); >+ Node* value = get(bytecode.value); >+ Node* constructor = get(bytecode.constructor); >+ Node* hasInstanceValue = get(bytecode.hasInstanceValue); >+ set(bytecode.dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue)); > NEXT_OPCODE(op_instanceof_custom); > } > case op_is_empty: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsEmpty, value)); >+ auto bytecode = currentInstruction->as<OpIsEmpty>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsEmpty, value)); > NEXT_OPCODE(op_is_empty); > } > case op_is_undefined: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value)); >+ auto bytecode = currentInstruction->as<OpIsUndefined>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsUndefined, value)); > NEXT_OPCODE(op_is_undefined); > } > > case op_is_boolean: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value)); >+ auto bytecode = currentInstruction->as<OpIsBoolean>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsBoolean, value)); > NEXT_OPCODE(op_is_boolean); > } > > case op_is_number: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value)); >+ auto bytecode = currentInstruction->as<OpIsNumber>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsNumber, value)); > NEXT_OPCODE(op_is_number); > } > > case op_is_cell_with_type: { >- JSType type = static_cast<JSType>(currentInstruction[3].u.operand); >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsCellWithType, OpInfo(type), value)); >+ auto bytecode = currentInstruction->as<OpIsCellWithType>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsCellWithType, OpInfo(bytecode.type), value)); > NEXT_OPCODE(op_is_cell_with_type); > } > > case op_is_object: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value)); >+ auto bytecode = currentInstruction->as<OpIsObject>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsObject, value)); > NEXT_OPCODE(op_is_object); > } > > case op_is_object_or_null: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value)); >+ auto bytecode = currentInstruction->as<OpIsObjectOrNull>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsObjectOrNull, value)); > NEXT_OPCODE(op_is_object_or_null); > } > > case op_is_function: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value)); >+ auto bytecode = currentInstruction->as<OpIsFunction>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsFunction, value)); > NEXT_OPCODE(op_is_function); > } > > case op_not: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value)); >+ auto bytecode = currentInstruction->as<OpNot>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(LogicalNot, value)); > NEXT_OPCODE(op_not); > } > > case op_to_primitive: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value)); >+ auto bytecode = currentInstruction->as<OpToPrimitive>(); >+ Node* value = get(bytecode.src); >+ set(bytecode.dst, addToGraph(ToPrimitive, value)); > NEXT_OPCODE(op_to_primitive); > } > > case op_strcat: { >- int startOperand = currentInstruction[2].u.operand; >- int numOperands = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpStrcat>(); >+ int startOperand = bytecode.src.offset(); >+ int numOperands = bytecode.count; > #if CPU(X86) > // X86 doesn't have enough registers to compile MakeRope with three arguments. The > // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever, >@@ -4966,104 +5001,116 @@ void ByteCodeParser::parseBlock(unsigned limit) > ASSERT(indexInOperands < maxArguments); > operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx)); > } >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(StrCat, operands[0], operands[1], operands[2])); >+ set(bytecode.dst, addToGraph(StrCat, operands[0], operands[1], operands[2])); > NEXT_OPCODE(op_strcat); > } > > case op_less: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2)); >+ auto bytecode = currentInstruction->as<OpLess>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareLess, op1, op2)); > NEXT_OPCODE(op_less); > } > > case op_lesseq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpLesseq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareLessEq, op1, op2)); > NEXT_OPCODE(op_lesseq); > } > > case op_greater: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2)); >+ auto bytecode = currentInstruction->as<OpGreater>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareGreater, op1, op2)); > NEXT_OPCODE(op_greater); > } > > case op_greatereq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpGreatereq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareGreaterEq, op1, op2)); > NEXT_OPCODE(op_greatereq); > } > > case op_below: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareBelow, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBelow>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareBelow, op1, op2)); > NEXT_OPCODE(op_below); > } > > case op_beloweq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareBelowEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBeloweq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareBelowEq, op1, op2)); > NEXT_OPCODE(op_beloweq); > } > > case op_eq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpEq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareEq, op1, op2)); > NEXT_OPCODE(op_eq); > } > > case op_eq_null: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpEqNull>(); >+ Node* value = get(bytecode.operand); > Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, value, nullConstant)); >+ set(bytecode.dst, addToGraph(CompareEq, value, nullConstant)); > NEXT_OPCODE(op_eq_null); > } > > case op_stricteq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpStricteq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareStrictEq, op1, op2)); > NEXT_OPCODE(op_stricteq); > } > > case op_neq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); >+ auto bytecode = currentInstruction->as<OpNeq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); > NEXT_OPCODE(op_neq); > } > > case op_neq_null: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpNeqNull>(); >+ Node* value = get(bytecode.operand); > Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant))); >+ set(bytecode.dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant))); > NEXT_OPCODE(op_neq_null); > } > > case op_nstricteq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpNstricteq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* invertedResult; > invertedResult = addToGraph(CompareStrictEq, op1, op2); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult)); >+ set(bytecode.dst, addToGraph(LogicalNot, invertedResult)); > NEXT_OPCODE(op_nstricteq); > } > > // === Property access operations === > > case op_get_by_val: { >+ auto bytecode = currentInstruction->as<OpGetByVal>(); > SpeculatedType prediction = getPredictionWithoutOSRExit(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); > bool compiledAsGetById = false; > GetByIdStatus getByIdStatus; > unsigned identifierNumber = 0; >@@ -5097,9 +5144,9 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > if (compiledAsGetById) >- handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, OPCODE_LENGTH(op_get_by_val)); >+ handleGetById(bytecode.dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, OPCODE_LENGTH(op_get_by_val)); > else { >- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read); >+ ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_codeBlock).arrayProfile, Array::Read); > // FIXME: We could consider making this not vararg, since it only uses three child > // slots. > // https://bugs.webkit.org/show_bug.cgi?id=184192 >@@ -5108,87 +5155,40 @@ void ByteCodeParser::parseBlock(unsigned limit) > addVarArgChild(0); // Leave room for property storage. > Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction)); > m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic. >- set(VirtualRegister(currentInstruction[1].u.operand), getByVal); >+ set(bytecode.dst, getByVal); > } > > NEXT_OPCODE(op_get_by_val); > } > > case op_get_by_val_with_this: { >+ auto bytecode = currentInstruction->as<OpGetByValWithThis>(); > SpeculatedType prediction = getPrediction(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[4].u.operand)); >+ Node* base = get(bytecode.base); >+ Node* thisValue = get(bytecode.thisValue); >+ Node* property = get(bytecode.property); > Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property); >- set(VirtualRegister(currentInstruction[1].u.operand), getByValWithThis); >+ set(bytecode.dst, getByValWithThis); > > NEXT_OPCODE(op_get_by_val_with_this); > } > > case op_put_by_val_direct: >- case op_put_by_val: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); >- bool isDirect = opcodeID == op_put_by_val_direct; >- bool compiledAsPutById = false; >- { >- unsigned identifierNumber = std::numeric_limits<unsigned>::max(); >- PutByIdStatus putByIdStatus; >- { >- ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); >- ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo; >- // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. >- // At that time, there is no information. >- if (byValInfo >- && byValInfo->stubInfo >- && !byValInfo->tookSlowPath >- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) >- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType) >- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { >- compiledAsPutById = true; >- identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); >- UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; >- >- if (Symbol* symbol = byValInfo->cachedSymbol.get()) { >- FrozenValue* frozen = m_graph.freezeStrong(symbol); >- addToGraph(CheckCell, OpInfo(frozen), property); >- } else { >- ASSERT(!uid->isSymbol()); >- addToGraph(CheckStringIdent, OpInfo(uid), property); >- } >- >- putByIdStatus = PutByIdStatus::computeForStubInfo( >- locker, m_inlineStackTop->m_profiledBlock, >- byValInfo->stubInfo, currentCodeOrigin(), uid); >- >- } >- } >- >- if (compiledAsPutById) >- handlePutById(base, identifierNumber, value, putByIdStatus, isDirect); >- } >- >- if (!compiledAsPutById) { >- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Write); >- >- addVarArgChild(base); >- addVarArgChild(property); >- addVarArgChild(value); >- addVarArgChild(0); // Leave room for property storage. >- addVarArgChild(0); // Leave room for length. >- addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); >- } >+ handlePutByVal(currentInstruction->as<OpPutByValDirect>()); >+ NEXT_OPCODE(op_put_by_val_direct); > >+ case op_put_by_val: { >+ handlePutByVal(currentInstruction->as<OpPutByVal>()); > NEXT_OPCODE(op_put_by_val); > } > > case op_put_by_val_with_this: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* value = get(VirtualRegister(currentInstruction[4].u.operand)); >+ auto bytecode = currentInstruction->as<OpPutByValWithThis>(); >+ Node* base = get(bytecode.base); >+ Node* thisValue = get(bytecode.thisValue); >+ Node* property = get(bytecode.property); >+ Node* value = get(bytecode.value); > > addVarArgChild(base); > addVarArgChild(thisValue); >@@ -5200,10 +5200,11 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_define_data_property: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* attributes = get(VirtualRegister(currentInstruction[4].u.operand)); >+ auto bytecode = currentInstruction->as<OpDefineDataProperty>(); >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); >+ Node* value = get(bytecode.value); >+ Node* attributes = get(bytecode.attributes); > > addVarArgChild(base); > addVarArgChild(property); >@@ -5215,11 +5216,12 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_define_accessor_property: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* getter = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* setter = get(VirtualRegister(currentInstruction[4].u.operand)); >- Node* attributes = get(VirtualRegister(currentInstruction[5].u.operand)); >+ auto bytecode = currentInstruction->as<OpDefineAccessorProperty>(); >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); >+ Node* getter = get(bytecode.getter); >+ Node* setter = get(bytecode.setter); >+ Node* attributes = get(bytecode.attributes); > > addVarArgChild(base); > addVarArgChild(property); >@@ -5233,14 +5235,17 @@ void ByteCodeParser::parseBlock(unsigned limit) > > case op_get_by_id_direct: > case op_try_get_by_id: >- case op_get_by_id: > case op_get_by_id_proto_load: > case op_get_by_id_unset: >- case op_get_array_length: { >+ case op_get_array_length: >+ // TODO >+ ASSERT_NOT_REACHED(); >+ case op_get_by_id: { >+ auto bytecode = currentInstruction->as<OpGetById>(); > SpeculatedType prediction = getPrediction(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > > UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; > GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( >@@ -5259,33 +5264,29 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > handleGetById( >- currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength); >+ bytecode.dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength); > >- // Opcode's length is different from others in try and direct cases. >- if (opcodeID == op_try_get_by_id) >- NEXT_OPCODE(op_try_get_by_id); >- else if (opcodeID == op_get_by_id_direct) >- NEXT_OPCODE(op_get_by_id_direct); >- else >- NEXT_OPCODE(op_get_by_id); >+ NEXT_OPCODE(op_get_by_id); > } > case op_get_by_id_with_this: { > SpeculatedType prediction = getPrediction(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand]; >+ auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); >+ Node* base = get(bytecode.base); >+ Node* thisValue = get(bytecode.thisValue); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > >- set(VirtualRegister(currentInstruction[1].u.operand), >+ set(bytecode.dst, > addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue)); > > NEXT_OPCODE(op_get_by_id_with_this); > } > case op_put_by_id: { >- Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; >- bool direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect; >+ auto bytecode = currentInstruction->as<OpPutById>(); >+ Node* value = get(bytecode.value); >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; >+ bool direct = bytecode.metadata(m_codeBlock).flags & PutByIdIsDirect; > > PutByIdStatus putByIdStatus = PutByIdStatus::computeFor( > m_inlineStackTop->m_profiledBlock, >@@ -5297,71 +5298,68 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_put_by_id_with_this: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* value = get(VirtualRegister(currentInstruction[4].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ auto bytecode = currentInstruction->as<OpPutByIdWithThis>(); >+ Node* base = get(bytecode.base); >+ Node* thisValue = get(bytecode.thisValue); >+ Node* value = get(bytecode.value); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > > addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value); > NEXT_OPCODE(op_put_by_id_with_this); > } > > case op_put_getter_by_id: >- case op_put_setter_by_id: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; >- unsigned attributes = currentInstruction[3].u.operand; >- Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand)); >- NodeType op = (opcodeID == op_put_getter_by_id) ? PutGetterById : PutSetterById; >- addToGraph(op, OpInfo(identifierNumber), OpInfo(attributes), base, accessor); >+ handlePutAccessorById(PutGetterById, currentInstruction->as<OpPutGetterById>()); > NEXT_OPCODE(op_put_getter_by_id); >+ case op_put_setter_by_id: { >+ handlePutAccessorById(PutSetterById, currentInstruction->as<OpPutSetterById>()); >+ NEXT_OPCODE(op_put_setter_by_id); > } > > case op_put_getter_setter_by_id: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; >- unsigned attributes = currentInstruction[3].u.operand; >- Node* getter = get(VirtualRegister(currentInstruction[4].u.operand)); >- Node* setter = get(VirtualRegister(currentInstruction[5].u.operand)); >- addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(attributes), base, getter, setter); >+ auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; >+ Node* getter = get(bytecode.getter); >+ Node* setter = get(bytecode.setter); >+ addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.attributes), base, getter, setter); > NEXT_OPCODE(op_put_getter_setter_by_id); > } > > case op_put_getter_by_val: >- case op_put_setter_by_val: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* subscript = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned attributes = currentInstruction[3].u.operand; >- Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand)); >- NodeType op = (opcodeID == op_put_getter_by_val) ? PutGetterByVal : PutSetterByVal; >- addToGraph(op, OpInfo(attributes), base, subscript, accessor); >+ handlePutAccessorByVal(PutGetterByVal, currentInstruction->as<OpPutGetterByVal>()); > NEXT_OPCODE(op_put_getter_by_val); >+ case op_put_setter_by_val: { >+ handlePutAccessorByVal(PutSetterByVal, currentInstruction->as<OpPutSetterByVal>()); >+ NEXT_OPCODE(op_put_setter_by_val); > } > > case op_del_by_id: { >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(DeleteById, OpInfo(identifierNumber), base)); >+ auto bytecode = currentInstruction->as<OpDelById>(); >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; >+ set(bytecode.dst, addToGraph(DeleteById, OpInfo(identifierNumber), base)); > NEXT_OPCODE(op_del_by_id); > } > > case op_del_by_val: { >- int dst = currentInstruction[1].u.operand; >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* key = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(dst), addToGraph(DeleteByVal, base, key)); >+ auto bytecode = currentInstruction->as<OpDelByVal>(); >+ Node* base = get(bytecode.base); >+ Node* key = get(bytecode.property); >+ set(bytecode.dst, addToGraph(DeleteByVal, base, key)); > NEXT_OPCODE(op_del_by_val); > } > > case op_profile_type: { >- Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand)); >- addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile); >+ auto bytecode = currentInstruction->as<OpProfileType>(); >+ Node* valueToProfile = get(bytecode.target); >+ addToGraph(ProfileType, OpInfo(bytecode.flag), valueToProfile); > NEXT_OPCODE(op_profile_type); > } > > case op_profile_control_flow: { >- BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; >+ auto bytecode = currentInstruction->as<OpProfileControlFlow>(); >+ BasicBlockLocation* basicBlockLocation = bytecode.metadata(m_codeBlock).basicBlockLocation; > addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation)); > NEXT_OPCODE(op_profile_control_flow); > } >@@ -5370,7 +5368,8 @@ void ByteCodeParser::parseBlock(unsigned limit) > > case op_jmp: { > ASSERT(!m_currentBlock->terminal()); >- int relativeOffset = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpJmp>(); >+ int relativeOffset = bytecode.target; > addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); > if (relativeOffset <= 0) > flushForTerminal(); >@@ -5378,168 +5377,205 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_jtrue: { >- unsigned relativeOffset = currentInstruction[2].u.operand; >- Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); >+ auto bytecode = currentInstruction->as<OpJtrue>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* condition = get(bytecode.condition); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition); > LAST_OPCODE(op_jtrue); > } > > case op_jfalse: { >- unsigned relativeOffset = currentInstruction[2].u.operand; >- Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); >+ auto bytecode = currentInstruction->as<OpJfalse>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* condition = get(bytecode.condition); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jfalse); > } > > case op_jeq_null: { >- unsigned relativeOffset = currentInstruction[2].u.operand; >- Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); >+ auto bytecode = currentInstruction->as<OpJeqNull>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* value = get(bytecode.condition); > Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); > Node* condition = addToGraph(CompareEq, value, nullConstant); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition); > LAST_OPCODE(op_jeq_null); > } > > case op_jneq_null: { >- unsigned relativeOffset = currentInstruction[2].u.operand; >- Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); >+ auto bytecode = currentInstruction->as<OpJneqNull>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* value = get(bytecode.condition); > Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); > Node* condition = addToGraph(CompareEq, value, nullConstant); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jneq_null); > } > > case op_jless: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJless>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareLess, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition); > LAST_OPCODE(op_jless); > } > > case op_jlesseq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJlesseq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareLessEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition); > LAST_OPCODE(op_jlesseq); > } > > case op_jgreater: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJgreater>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareGreater, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition); > LAST_OPCODE(op_jgreater); > } > > case op_jgreatereq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJgreatereq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareGreaterEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition); > LAST_OPCODE(op_jgreatereq); > } > > case op_jeq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJeq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq))), condition); > LAST_OPCODE(op_jeq); > } > > case op_jstricteq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJstricteq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareStrictEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jstricteq))), condition); > LAST_OPCODE(op_jstricteq); > } > > case op_jnless: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJnless>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareLess, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jnless); > } > > case op_jnlesseq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJnlesseq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareLessEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jnlesseq); > } > > case op_jngreater: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJngreater>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareGreater, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jngreater); > } > > case op_jngreatereq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJngreatereq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareGreaterEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jngreatereq); > } > > case op_jneq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJneq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jneq); > } > > case op_jnstricteq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJnstricteq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareStrictEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnstricteq), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jnstricteq); > } > > case op_jbelow: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJbelow>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareBelow, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jbelow))), condition); > LAST_OPCODE(op_jbelow); > } > > case op_jbeloweq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJbeloweq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareBelowEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jbeloweq))), condition); > LAST_OPCODE(op_jbeloweq); > } > > case op_switch_imm: { >+ auto bytecode = currentInstruction->as<OpSwitchImm>(); > SwitchData& data = *m_graph.m_switchData.add(); > data.kind = SwitchImm; >- data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; >- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); >+ data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.tableIndex]; >+ data.fallThrough.setBytecodeIndex(m_currentIndex + bytecode.defaultOffset); > SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); > for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { > if (!table.branchOffsets[i]) >@@ -5549,16 +5585,17 @@ void ByteCodeParser::parseBlock(unsigned limit) > continue; > data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target)); > } >- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); >+ addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); > flushIfTerminal(data); > LAST_OPCODE(op_switch_imm); > } > > case op_switch_char: { >+ auto bytecode = currentInstruction->as<OpSwitchChar>(); > SwitchData& data = *m_graph.m_switchData.add(); > data.kind = SwitchChar; >- data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; >- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); >+ data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.tableIndex]; >+ data.fallThrough.setBytecodeIndex(m_currentIndex + bytecode.defaultOffset); > SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); > for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { > if (!table.branchOffsets[i]) >@@ -5569,16 +5606,17 @@ void ByteCodeParser::parseBlock(unsigned limit) > data.cases.append( > SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target)); > } >- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); >+ addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); > flushIfTerminal(data); > LAST_OPCODE(op_switch_char); > } > > case op_switch_string: { >+ auto bytecode = currentInstruction->as<OpSwitchString>(); > SwitchData& data = *m_graph.m_switchData.add(); > data.kind = SwitchString; >- data.switchTableIndex = currentInstruction[1].u.operand; >- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); >+ data.switchTableIndex = bytecode.tableIndex; >+ data.fallThrough.setBytecodeIndex(m_currentIndex + bytecode.defaultOffset); > StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); > StringJumpTable::StringOffsetTable::iterator iter; > StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); >@@ -5589,25 +5627,26 @@ void ByteCodeParser::parseBlock(unsigned limit) > data.cases.append( > SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target)); > } >- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); >+ addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); > flushIfTerminal(data); > LAST_OPCODE(op_switch_string); > } > >- case op_ret: >+ case op_ret: { >+ auto bytecode = currentInstruction->as<OpRet>(); > ASSERT(!m_currentBlock->terminal()); > if (!inlineCallFrame()) { > // Simple case: we are just producing a return >- addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); >+ addToGraph(Return, get(bytecode.value)); > flushForReturn(); > LAST_OPCODE(op_ret); > } > > flushForReturn(); > if (m_inlineStackTop->m_returnValue.isValid()) >- setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush); >+ setDirect(m_inlineStackTop->m_returnValue, get(bytecode.value), ImmediateSetWithFlush); > >- if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size()) { >+ if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) { > // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one. > // It is untargetable, because we do not know the appropriate index. > // If this block turns out to be a jump target, parseCodeBlock will fix its bytecodeIndex before putting it in m_blockLinkingTargets >@@ -5621,26 +5660,27 @@ void ByteCodeParser::parseBlock(unsigned limit) > m_inlineStackTop->m_continuationBlock = m_currentBlock; > } > LAST_OPCODE_LINKED(op_ret); >- >+ } > case op_end: > ASSERT(!inlineCallFrame()); >- addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); >+ addToGraph(Return, get(currentInstruction->as<OpEnd>().value)); > flushForReturn(); > LAST_OPCODE(op_end); > > case op_throw: >- addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand))); >+ addToGraph(Throw, get(currentInstruction->as<OpThrow>().value)); > flushForTerminal(); > LAST_OPCODE(op_throw); > > case op_throw_static_error: { >- uint32_t errorType = currentInstruction[2].u.unsignedValue; >- addToGraph(ThrowStaticError, OpInfo(errorType), get(VirtualRegister(currentInstruction[1].u.operand))); >+ auto bytecode = currentInstruction->as<OpThrowStaticError>(); >+ addToGraph(ThrowStaticError, OpInfo(bytecode.errorType), get(bytecode.message)); > flushForTerminal(); > LAST_OPCODE(op_throw_static_error); > } > > case op_catch: { >+ auto bytecode = currentInstruction->as<OpCatch>(); > m_graph.m_hasExceptionHandlers = true; > > if (inlineCallFrame()) { >@@ -5654,7 +5694,7 @@ void ByteCodeParser::parseBlock(unsigned limit) > > RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution)); > >- ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(currentInstruction[3].u.pointer); >+ ValueProfileAndOperandBuffer* buffer = bytecode.metadata(m_codeBlock).buffer; > > if (!buffer) { > NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread. >@@ -5761,13 +5801,13 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_call: >- handleCall(currentInstruction, Call, CallMode::Regular); >+ handleCall<OpCall>(currentInstruction, Call, CallMode::Regular); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); > NEXT_OPCODE(op_call); > > case op_tail_call: { > flushForReturn(); >- Terminality terminality = handleCall(currentInstruction, TailCall, CallMode::Tail); >+ Terminality terminality = handleCall<OpTailCall>(currentInstruction, TailCall, CallMode::Tail); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); > // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. > // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean >@@ -5780,19 +5820,19 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_construct: >- handleCall(currentInstruction, Construct, CallMode::Construct); >+ handleCall<OpConstruct>(currentInstruction, Construct, CallMode::Construct); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); > NEXT_OPCODE(op_construct); > > case op_call_varargs: { >- handleVarargsCall(currentInstruction, CallVarargs, CallMode::Regular); >+ handleVarargsCall<OpCallVarargs>(currentInstruction, CallVarargs, CallMode::Regular); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); > NEXT_OPCODE(op_call_varargs); > } > > case op_tail_call_varargs: { > flushForReturn(); >- Terminality terminality = handleVarargsCall(currentInstruction, TailCallVarargs, CallMode::Tail); >+ Terminality terminality = handleVarargsCall<OpTailCallVarargs>(currentInstruction, TailCallVarargs, CallMode::Tail); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); > // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. > // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean >@@ -5808,7 +5848,7 @@ void ByteCodeParser::parseBlock(unsigned limit) > // done by the arguments object creation node as that node may not exist. > noticeArgumentsUse(); > flushForReturn(); >- Terminality terminality = handleVarargsCall(currentInstruction, TailCallForwardVarargs, CallMode::Tail); >+ Terminality terminality = handleVarargsCall<OpTailCallForwardArguments>(currentInstruction, TailCallForwardVarargs, CallMode::Tail); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); > // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. > // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean >@@ -5820,31 +5860,30 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_construct_varargs: { >- handleVarargsCall(currentInstruction, ConstructVarargs, CallMode::Construct); >+ handleVarargsCall<OpConstructVarargs>(currentInstruction, ConstructVarargs, CallMode::Construct); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); > NEXT_OPCODE(op_construct_varargs); > } > > case op_call_eval: { >- int result = currentInstruction[1].u.operand; >- int callee = currentInstruction[2].u.operand; >- int argumentCountIncludingThis = currentInstruction[3].u.operand; >- int registerOffset = -currentInstruction[4].u.operand; >- addCall(result, CallEval, nullptr, get(VirtualRegister(callee)), argumentCountIncludingThis, registerOffset, getPrediction()); >+ auto bytecode = currentInstruction->as<OpCallEval>(); >+ int registerOffset = -bytecode.argv; >+ addCall(bytecode.dst, CallEval, nullptr, get(bytecode.callee), bytecode.argc, registerOffset, getPrediction()); > NEXT_OPCODE(op_call_eval); > } > > case op_jneq_ptr: { >- Special::Pointer specialPointer = currentInstruction[2].u.specialPointer; >+ auto bytecode = currentInstruction->as<OpJneqPtr>(); >+ Special::Pointer specialPointer = bytecode.specialPointer; > ASSERT(pointerIsCell(specialPointer)); > JSCell* actualPointer = static_cast<JSCell*>( > actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer)); > FrozenValue* frozenPointer = m_graph.freeze(actualPointer); >- int operand = currentInstruction[1].u.operand; >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* child = get(VirtualRegister(operand)); >- if (currentInstruction[4].u.operand) { >+ unsigned relativeOffset = bytecode.target; >+ Node* child = get(bytecode.condition); >+ if (bytecode.metadata(m_codeBlock).hasJumped) { > Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jneq_ptr); > } >@@ -5853,75 +5892,73 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_resolve_scope: { >- int dst = currentInstruction[1].u.operand; >- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); >- unsigned depth = currentInstruction[5].u.operand; >- int scope = currentInstruction[2].u.operand; >- >- if (needsDynamicLookup(resolveType, op_resolve_scope)) { >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >- set(VirtualRegister(dst), addToGraph(ResolveScope, OpInfo(identifierNumber), get(VirtualRegister(scope)))); >+ auto bytecode = currentInstruction->as<OpResolveScope>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ unsigned depth = bytecode.localScopeDepth; >+ >+ if (needsDynamicLookup(bytecode.resolveType, op_resolve_scope)) { >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.var]; >+ set(bytecode.dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.scope))); > NEXT_OPCODE(op_resolve_scope); > } > > // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints. >- if (needsVarInjectionChecks(resolveType)) >+ if (needsVarInjectionChecks(bytecode.resolveType)) > m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint()); > >- switch (resolveType) { >+ switch (bytecode.resolveType) { > case GlobalProperty: > case GlobalVar: > case GlobalPropertyWithVarInjectionChecks: > case GlobalVarWithVarInjectionChecks: > case GlobalLexicalVar: > case GlobalLexicalVarWithVarInjectionChecks: { >- JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock); >+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(bytecode.resolveType, m_inlineStackTop->m_codeBlock); > RELEASE_ASSERT(constantScope); >- RELEASE_ASSERT(static_cast<JSScope*>(currentInstruction[6].u.pointer) == constantScope); >- set(VirtualRegister(dst), weakJSConstant(constantScope)); >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ RELEASE_ASSERT(metadata.scope.get() == constantScope); >+ set(bytecode.dst, weakJSConstant(constantScope)); >+ addToGraph(Phantom, get(bytecode.scope)); > break; > } > case ModuleVar: { > // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar, > // we need not to keep it alive by the Phantom node. >- JSModuleEnvironment* moduleEnvironment = jsCast<JSModuleEnvironment*>(currentInstruction[6].u.jsCell.get()); > // Module environment is already strongly referenced by the CodeBlock. >- set(VirtualRegister(dst), weakJSConstant(moduleEnvironment)); >+ set(bytecode.dst, weakJSConstant(metadata.moduleEnvironment.get())); > break; > } > case LocalClosureVar: > case ClosureVar: > case ClosureVarWithVarInjectionChecks: { >- Node* localBase = get(VirtualRegister(scope)); >+ Node* localBase = get(bytecode.scope); > addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope. > > // We have various forms of constant folding here. This is necessary to avoid > // spurious recompiles in dead-but-foldable code. >- if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) { >+ if (SymbolTable* symbolTable = metadata.symbolTable.get()) { > InferredValue* singleton = symbolTable->singletonScope(); > if (JSValue value = singleton->inferredValue()) { > m_graph.watchpoints().addLazily(singleton); >- set(VirtualRegister(dst), weakJSConstant(value)); >+ set(bytecode.dst, weakJSConstant(value)); > break; > } > } > if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) { > for (unsigned n = depth; n--;) > scope = scope->next(); >- set(VirtualRegister(dst), weakJSConstant(scope)); >+ set(bytecode.dst, weakJSConstant(scope)); > break; > } > for (unsigned n = depth; n--;) > localBase = addToGraph(SkipScope, localBase); >- set(VirtualRegister(dst), localBase); >+ set(bytecode.dst, localBase); > break; > } > case UnresolvedProperty: > case UnresolvedPropertyWithVarInjectionChecks: { >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ addToGraph(Phantom, get(bytecode.scope)); > addToGraph(ForceOSRExit); >- set(VirtualRegister(dst), addToGraph(JSConstant, OpInfo(m_constantNull))); >+ set(bytecode.dst, addToGraph(JSConstant, OpInfo(m_constantNull))); > break; > } > case Dynamic: >@@ -5931,21 +5968,20 @@ void ByteCodeParser::parseBlock(unsigned limit) > NEXT_OPCODE(op_resolve_scope); > } > case op_resolve_scope_for_hoisting_func_decl_in_eval: { >- int dst = currentInstruction[1].u.operand; >- int scope = currentInstruction[2].u.operand; >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>(); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > >- set(VirtualRegister(dst), addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(VirtualRegister(scope)))); >+ set(bytecode.dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.scope))); > > NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval); > } > > case op_get_from_scope: { >- int dst = currentInstruction[1].u.operand; >- int scope = currentInstruction[2].u.operand; >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ auto bytecode = currentInstruction->as<OpGetFromScope>(); >+ auto metadata = bytecode.metadata(m_codeBlock); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.var]; > UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; >- ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType(); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); > > Structure* structure = 0; > WatchpointSet* watchpoints = 0; >@@ -5953,17 +5989,17 @@ void ByteCodeParser::parseBlock(unsigned limit) > { > ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); > if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) >- watchpoints = currentInstruction[5].u.watchpointSet; >+ watchpoints = metadata.watchpointSet; > else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) >- structure = currentInstruction[5].u.structure.get(); >- operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer); >+ structure = metadata.structure.get(); >+ operand = reinterpret_cast<uintptr_t>(metadata.scopeOffset); > } > > if (needsDynamicLookup(resolveType, op_get_from_scope)) { >- uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, currentInstruction[4].u.operand); >+ uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, bytecode.localScopeDepth); > SpeculatedType prediction = getPrediction(); >- set(VirtualRegister(dst), >- addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(VirtualRegister(scope)))); >+ set(bytecode.dst, >+ addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.scope))); > NEXT_OPCODE(op_get_from_scope); > } > >@@ -5980,21 +6016,21 @@ void ByteCodeParser::parseBlock(unsigned limit) > if (status.state() != GetByIdStatus::Simple > || status.numVariants() != 1 > || status[0].structureSet().size() != 1) { >- set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope)))); >+ set(bytecode.dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.scope))); > break; > } > > Node* base = weakJSConstant(globalObject); > Node* result = load(prediction, base, identifierNumber, status[0]); >- addToGraph(Phantom, get(VirtualRegister(scope))); >- set(VirtualRegister(dst), result); >+ addToGraph(Phantom, get(bytecode.scope)); >+ set(bytecode.dst, result); > break; > } > case GlobalVar: > case GlobalVarWithVarInjectionChecks: > case GlobalLexicalVar: > case GlobalLexicalVarWithVarInjectionChecks: { >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ addToGraph(Phantom, get(bytecode.scope)); > WatchpointSet* watchpointSet; > ScopeOffset offset; > JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock)); >@@ -6050,7 +6086,7 @@ void ByteCodeParser::parseBlock(unsigned limit) > JSValue value = pointer->get(); > if (value) { > m_graph.watchpoints().addLazily(watchpointSet); >- set(VirtualRegister(dst), weakJSConstant(value)); >+ set(bytecode.dst, weakJSConstant(value)); > break; > } > } >@@ -6064,13 +6100,13 @@ void ByteCodeParser::parseBlock(unsigned limit) > Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction)); > if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) > addToGraph(CheckNotEmpty, value); >- set(VirtualRegister(dst), value); >+ set(bytecode.dst, value); > break; > } > case LocalClosureVar: > case ClosureVar: > case ClosureVarWithVarInjectionChecks: { >- Node* scopeNode = get(VirtualRegister(scope)); >+ Node* scopeNode = get(bytecode.scope); > > // Ideally we wouldn't have to do this Phantom. But: > // >@@ -6086,11 +6122,11 @@ void ByteCodeParser::parseBlock(unsigned limit) > // prediction, we'd otherwise think that it has to exit. Then when it did execute, we > // would recompile. But if we can fold it here, we avoid the exit. > if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) { >- set(VirtualRegister(dst), weakJSConstant(value)); >+ set(bytecode.dst, weakJSConstant(value)); > break; > } > SpeculatedType prediction = getPrediction(); >- set(VirtualRegister(dst), >+ set(bytecode.dst, > addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode)); > break; > } >@@ -6105,13 +6141,12 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_put_to_scope: { >- unsigned scope = currentInstruction[1].u.operand; >- unsigned identifierNumber = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpPutToScope>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ unsigned identifierNumber = bytecode.var; > if (identifierNumber != UINT_MAX) > identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber]; >- unsigned value = currentInstruction[3].u.operand; >- GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand); >- ResolveType resolveType = getPutInfo.resolveType(); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); > UniquedStringImpl* uid; > if (identifierNumber != UINT_MAX) > uid = m_graph.identifiers()[identifierNumber]; >@@ -6124,18 +6159,18 @@ void ByteCodeParser::parseBlock(unsigned limit) > { > ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); > if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) >- watchpoints = currentInstruction[5].u.watchpointSet; >+ watchpoints = metadata.watchpointSet; > else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) >- structure = currentInstruction[5].u.structure.get(); >- operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer); >+ structure = metadata.structure.get(); >+ operand = reinterpret_cast<uintptr_t>(metadata.scopeOffset); > } > > JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); > > if (needsDynamicLookup(resolveType, op_put_to_scope)) { > ASSERT(identifierNumber != UINT_MAX); >- uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, currentInstruction[4].u.operand); >- addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(VirtualRegister(scope)), get(VirtualRegister(value))); >+ uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, metadata.getPutInfo.operand()); >+ addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.scope), get(bytecode.value)); > NEXT_OPCODE(op_put_to_scope); > } > >@@ -6150,20 +6185,20 @@ void ByteCodeParser::parseBlock(unsigned limit) > if (status.numVariants() != 1 > || status[0].kind() != PutByIdVariant::Replace > || status[0].structure().size() != 1) { >- addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value))); >+ addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.scope), get(bytecode.value)); > break; > } > Node* base = weakJSConstant(globalObject); >- store(base, identifierNumber, status[0], get(VirtualRegister(value))); >+ store(base, identifierNumber, status[0], get(bytecode.value)); > // Keep scope alive until after put. >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ addToGraph(Phantom, get(bytecode.scope)); > break; > } > case GlobalLexicalVar: > case GlobalLexicalVarWithVarInjectionChecks: > case GlobalVar: > case GlobalVarWithVarInjectionChecks: { >- if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { >+ if (!isInitialization(metadata.getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { > SpeculatedType prediction = SpecEmpty; > Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction)); > addToGraph(CheckNotEmpty, value); >@@ -6174,21 +6209,21 @@ void ByteCodeParser::parseBlock(unsigned limit) > SymbolTableEntry entry = scopeObject->symbolTable()->get(uid); > ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet()); > } >- Node* valueNode = get(VirtualRegister(value)); >+ Node* valueNode = get(bytecode.value); > addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode); > if (watchpoints && watchpoints->state() != IsInvalidated) { > // Must happen after the store. See comment for GetGlobalVar. > addToGraph(NotifyWrite, OpInfo(watchpoints)); > } > // Keep scope alive until after put. >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ addToGraph(Phantom, get(bytecode.scope)); > break; > } > case LocalClosureVar: > case ClosureVar: > case ClosureVarWithVarInjectionChecks: { >- Node* scopeNode = get(VirtualRegister(scope)); >- Node* valueNode = get(VirtualRegister(value)); >+ Node* scopeNode = get(bytecode.scope); >+ Node* valueNode = get(bytecode.value); > > addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode); > >@@ -6251,28 +6286,29 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_create_lexical_environment: { >- VirtualRegister symbolTableRegister(currentInstruction[3].u.operand); >- VirtualRegister initialValueRegister(currentInstruction[4].u.operand); >- ASSERT(symbolTableRegister.isConstant() && initialValueRegister.isConstant()); >- FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(symbolTableRegister.offset())); >- FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(initialValueRegister.offset())); >- Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>(); >+ ASSERT(VirtualRegister(bytecode.symbolTableIndex).isConstant() && bytecode.initialValue.isConstant()); >+ FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.symbolTableIndex)); >+ FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.initialValue.offset())); >+ Node* scope = get(bytecode.scope); > Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope); >- set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment); >+ set(bytecode.dst, lexicalEnvironment); > NEXT_OPCODE(op_create_lexical_environment); > } > > case op_push_with_scope: { >- Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* object = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(PushWithScope, currentScope, object)); >+ auto bytecode = currentInstruction->as<OpPushWithScope>(); >+ Node* currentScope = get(bytecode.currentScope); >+ Node* object = get(bytecode.newScope); >+ set(bytecode.dst, addToGraph(PushWithScope, currentScope, object)); > NEXT_OPCODE(op_push_with_scope); > } > > case op_get_parent_scope: { >- Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpGetParentScope>(); >+ Node* currentScope = get(bytecode.scope); > Node* newScope = addToGraph(SkipScope, currentScope); >- set(VirtualRegister(currentInstruction[1].u.operand), newScope); >+ set(bytecode.dst, newScope); > addToGraph(Phantom, currentScope); > NEXT_OPCODE(op_get_parent_scope); > } >@@ -6282,67 +6318,74 @@ void ByteCodeParser::parseBlock(unsigned limit) > // only helps for the first basic block. It's extremely important not to constant fold > // loads from the scope register later, as that would prevent the DFG from tracking the > // bytecode-level liveness of the scope register. >+ auto bytecode = currentInstruction->as<OpGetScope>(); > Node* callee = get(VirtualRegister(CallFrameSlot::callee)); > Node* result; > if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm)) > result = weakJSConstant(function->scope()); > else > result = addToGraph(GetScope, callee); >- set(VirtualRegister(currentInstruction[1].u.operand), result); >+ set(bytecode.dst, result); > NEXT_OPCODE(op_get_scope); > } > > case op_argument_count: { >+ auto bytecode = currentInstruction->as<OpArgumentCount>(); > Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne))); >- >- set(VirtualRegister(currentInstruction[1].u.operand), sub); >+ set(bytecode.dst, sub); > NEXT_OPCODE(op_argument_count); > } > > case op_create_direct_arguments: { >+ auto bytecode = currentInstruction->as<OpCreateDirectArguments>(); > noticeArgumentsUse(); > Node* createArguments = addToGraph(CreateDirectArguments); >- set(VirtualRegister(currentInstruction[1].u.operand), createArguments); >+ set(bytecode.dst, createArguments); > NEXT_OPCODE(op_create_direct_arguments); > } > > case op_create_scoped_arguments: { >+ auto bytecode = currentInstruction->as<OpCreateScopedArguments>(); > noticeArgumentsUse(); >- Node* createArguments = addToGraph(CreateScopedArguments, get(VirtualRegister(currentInstruction[2].u.operand))); >- set(VirtualRegister(currentInstruction[1].u.operand), createArguments); >+ Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.scope)); >+ set(bytecode.dst, createArguments); > NEXT_OPCODE(op_create_scoped_arguments); > } > > case op_create_cloned_arguments: { >+ auto bytecode = currentInstruction->as<OpCreateClonedArguments>(); > noticeArgumentsUse(); > Node* createArguments = addToGraph(CreateClonedArguments); >- set(VirtualRegister(currentInstruction[1].u.operand), createArguments); >+ set(bytecode.dst, createArguments); > NEXT_OPCODE(op_create_cloned_arguments); > } > > case op_get_from_arguments: { >- set(VirtualRegister(currentInstruction[1].u.operand), >+ auto bytecode = currentInstruction->as<OpGetFromArguments>(); >+ set(bytecode.dst, > addToGraph( > GetFromArguments, >- OpInfo(currentInstruction[3].u.operand), >+ OpInfo(bytecode.offset), > OpInfo(getPrediction()), >- get(VirtualRegister(currentInstruction[2].u.operand)))); >+ get(bytecode.scope))); > NEXT_OPCODE(op_get_from_arguments); > } > > case op_put_to_arguments: { >+ auto bytecode = currentInstruction->as<OpPutToArguments>(); > addToGraph( > PutToArguments, >- OpInfo(currentInstruction[2].u.operand), >- get(VirtualRegister(currentInstruction[1].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand))); >+ OpInfo(bytecode.offset), >+ get(bytecode.scope), >+ get(bytecode.value)); > NEXT_OPCODE(op_put_to_arguments); > } > > case op_get_argument: { >+ auto bytecode = currentInstruction->as<OpGetArgument>(); > InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); > Node* argument; >- int32_t argumentIndexIncludingThis = currentInstruction[2].u.operand; >+ int32_t argumentIndexIncludingThis = bytecode.index; > if (inlineCallFrame && !inlineCallFrame->isVarargs()) { > int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size(); > if (argumentIndexIncludingThis < argumentCountIncludingThisWithFixup) >@@ -6351,125 +6394,84 @@ void ByteCodeParser::parseBlock(unsigned limit) > argument = addToGraph(JSConstant, OpInfo(m_constantUndefined)); > } else > argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction())); >- set(VirtualRegister(currentInstruction[1].u.operand), argument); >+ set(bytecode.dst, argument); > NEXT_OPCODE(op_get_argument); > } > case op_new_async_generator_func: >+ handleNewFunc(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFunc>()); >+ NEXT_OPCODE(op_new_async_generator_func); > case op_new_func: >- case op_new_generator_func: >- case op_new_async_func: { >- FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand); >- FrozenValue* frozen = m_graph.freezeStrong(decl); >- NodeType op; >- switch (opcodeID) { >- case op_new_generator_func: >- op = NewGeneratorFunction; >- break; >- case op_new_async_func: >- op = NewAsyncFunction; >- break; >- case op_new_async_generator_func: >- op = NewAsyncGeneratorFunction; >- break; >- default: >- op = NewFunction; >- } >- Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), scope)); >- // Ideally we wouldn't have to do this Phantom. But: >- // >- // For the constant case: we must do it because otherwise we would have no way of knowing >- // that the scope is live at OSR here. >- // >- // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation >- // won't be able to handle an Undefined scope. >- addToGraph(Phantom, scope); >- static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_generator_func), "The length of op_new_func should be equal to one of op_new_generator_func"); >- static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_func), "The length of op_new_func should be equal to one of op_new_async_func"); >- static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_generator_func), "The length of op_new_func should be equal to one of op_new_async_generator_func"); >+ handleNewFunc(NewFunction, currentInstruction->as<OpNewFunc>()); > NEXT_OPCODE(op_new_func); >- } >+ case op_new_generator_func: >+ handleNewFunc(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFunc>()); >+ NEXT_OPCODE(op_new_generator_func); >+ case op_new_async_func: >+ handleNewFunc(NewAsyncFunction, currentInstruction->as<OpNewAsyncFunc>()); >+ NEXT_OPCODE(op_new_async_func); > > case op_new_func_exp: >+ handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewFuncExp>()); >+ NEXT_OPCODE(op_new_func_exp); > case op_new_generator_func_exp: >+ handleNewFuncExp(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFuncExp>()); >+ NEXT_OPCODE(op_new_generator_func_exp); > case op_new_async_generator_func_exp: >- case op_new_async_func_exp: { >- FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand); >- FrozenValue* frozen = m_graph.freezeStrong(expr); >- NodeType op; >- switch (opcodeID) { >- case op_new_generator_func_exp: >- op = NewGeneratorFunction; >- break; >- case op_new_async_func_exp: >- op = NewAsyncFunction; >- break; >- case op_new_async_generator_func_exp: >- op = NewAsyncGeneratorFunction; >- break; >- default: >- op = NewFunction; >- } >- Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), scope)); >- // Ideally we wouldn't have to do this Phantom. But: >- // >- // For the constant case: we must do it because otherwise we would have no way of knowing >- // that the scope is live at OSR here. >- // >- // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation >- // won't be able to handle an Undefined scope. >- addToGraph(Phantom, scope); >- static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_generator_func_exp), "The length of op_new_func_exp should be equal to one of op_new_generator_func_exp"); >- static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_func_exp), "The length of op_new_func_exp should be equal to one of op_new_async_func_exp"); >- static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_generator_func_exp), "The length of op_new_func_exp should be equal to one of op_new_async_func_exp"); >- NEXT_OPCODE(op_new_func_exp); >- } >+ handleNewFuncExp(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFuncExp>()); >+ NEXT_OPCODE(op_new_async_generator_func_exp); >+ case op_new_async_func_exp: >+ handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewAsyncFuncExp>()); >+ NEXT_OPCODE(op_new_async_func_exp); > > case op_set_function_name: { >- Node* func = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* name = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpSetFunctionName>(); >+ Node* func = get(bytecode.function); >+ Node* name = get(bytecode.name); > addToGraph(SetFunctionName, func, name); > NEXT_OPCODE(op_set_function_name); > } > > case op_typeof: { >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpTypeof>(); >+ set(bytecode.dst, addToGraph(TypeOf, get(bytecode.value))); > NEXT_OPCODE(op_typeof); > } > > case op_to_number: { >+ auto bytecode = currentInstruction->as<OpToNumber>(); > SpeculatedType prediction = getPrediction(); >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value)); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value)); > NEXT_OPCODE(op_to_number); > } > > case op_to_string: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value)); >+ auto bytecode = currentInstruction->as<OpToString>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(ToString, value)); > NEXT_OPCODE(op_to_string); > } > > case op_to_object: { >+ auto bytecode = currentInstruction->as<OpToObject>(); > SpeculatedType prediction = getPrediction(); >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value)); >+ Node* value = get(bytecode.operand); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.message]; >+ set(bytecode.dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value)); > NEXT_OPCODE(op_to_object); > } > > case op_in_by_val: { >- ArrayMode arrayMode = getArrayMode(currentInstruction[OPCODE_LENGTH(op_in_by_val) - 1].u.arrayProfile, Array::Read); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand)))); >+ auto bytecode = currentInstruction->as<OpInByVal>(); >+ ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_codeBlock).arrayProfile, Array::Read); >+ set(bytecode.dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.base), get(bytecode.property))); > NEXT_OPCODE(op_in_by_val); > } > > case op_in_by_id: { >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ auto bytecode = currentInstruction->as<OpInById>(); >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; > > InByIdStatus status = InByIdStatus::computeFor( >@@ -6498,101 +6500,106 @@ void ByteCodeParser::parseBlock(unsigned limit) > addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addInByIdStatus(currentCodeOrigin(), status)), base); > > Node* match = addToGraph(MatchStructure, OpInfo(data), base); >- set(VirtualRegister(currentInstruction[1].u.operand), match); >+ set(bytecode.dst, match); > NEXT_OPCODE(op_in_by_id); > } > } > >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InById, OpInfo(identifierNumber), base)); >+ set(bytecode.dst, addToGraph(InById, OpInfo(identifierNumber), base)); > NEXT_OPCODE(op_in_by_id); > } > > case op_get_enumerable_length: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength, >- get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpGetEnumerableLength>(); >+ set(bytecode.dst, addToGraph(GetEnumerableLength, get(bytecode.base))); > NEXT_OPCODE(op_get_enumerable_length); > } > > case op_has_generic_property: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, >- get(VirtualRegister(currentInstruction[2].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand)))); >+ auto bytecode = currentInstruction->as<OpHasGenericProperty>(); >+ set(bytecode.dst, addToGraph(HasGenericProperty, get(bytecode.base), get(bytecode.property))); > NEXT_OPCODE(op_has_generic_property); > } > > case op_has_structure_property: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, >- get(VirtualRegister(currentInstruction[2].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand)), >- get(VirtualRegister(currentInstruction[4].u.operand)))); >+ auto bytecode = currentInstruction->as<OpHasStructureProperty>(); >+ set(bytecode.dst, addToGraph(HasStructureProperty, >+ get(bytecode.base), >+ get(bytecode.property), >+ get(bytecode.enumerator))); > NEXT_OPCODE(op_has_structure_property); > } > > case op_has_indexed_property: { >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read); >- Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); >+ Node* base = get(bytecode.base); >+ ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_codeBlock).arrayProfile, Array::Read); >+ Node* property = get(bytecode.property); > Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)), base, property); >- set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty); >+ set(bytecode.dst, hasIterableProperty); > NEXT_OPCODE(op_has_indexed_property); > } > > case op_get_direct_pname: { >+ auto bytecode = currentInstruction->as<OpGetDirectPname>(); > SpeculatedType prediction = getPredictionWithoutOSRExit(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* index = get(VirtualRegister(currentInstruction[4].u.operand)); >- Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand)); >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); >+ Node* index = get(bytecode.index); >+ Node* enumerator = get(bytecode.enumerator); > > addVarArgChild(base); > addVarArgChild(property); > addVarArgChild(index); > addVarArgChild(enumerator); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); >+ set(bytecode.dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); > > NEXT_OPCODE(op_get_direct_pname); > } > > case op_get_property_enumerator: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator, >- get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>(); >+ set(bytecode.dst, addToGraph(GetPropertyEnumerator, get(bytecode.base))); > NEXT_OPCODE(op_get_property_enumerator); > } > > case op_enumerator_structure_pname: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname, >- get(VirtualRegister(currentInstruction[2].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand)))); >+ auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); >+ set(bytecode.dst, addToGraph(GetEnumeratorStructurePname, >+ get(bytecode.enumerator), >+ get(bytecode.index))); > NEXT_OPCODE(op_enumerator_structure_pname); > } > > case op_enumerator_generic_pname: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname, >- get(VirtualRegister(currentInstruction[2].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand)))); >+ auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); >+ set(bytecode.dst, addToGraph(GetEnumeratorGenericPname, >+ get(bytecode.enumerator), >+ get(bytecode.index))); > NEXT_OPCODE(op_enumerator_generic_pname); > } > > case op_to_index_string: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString, >- get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpToIndexString>(); >+ set(bytecode.dst, addToGraph(ToIndexString, get(bytecode.index))); > NEXT_OPCODE(op_to_index_string); > } > > case op_log_shadow_chicken_prologue: { >+ auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); > if (!m_inlineStackTop->m_inlineCallFrame) >- addToGraph(LogShadowChickenPrologue, get(VirtualRegister(currentInstruction[1].u.operand))); >+ addToGraph(LogShadowChickenPrologue, get(bytecode.scope)); > NEXT_OPCODE(op_log_shadow_chicken_prologue); > } > > case op_log_shadow_chicken_tail: { >+ auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); > if (!m_inlineStackTop->m_inlineCallFrame) { > // FIXME: The right solution for inlining is to elide these whenever the tail call > // ends up being inlined. > // https://bugs.webkit.org/show_bug.cgi?id=155686 >- addToGraph(LogShadowChickenTail, get(VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand))); >+ addToGraph(LogShadowChickenTail, get(bytecode.thisValue), get(bytecode.scope)); > } > NEXT_OPCODE(op_log_shadow_chicken_tail); > } >@@ -6796,7 +6803,7 @@ void ByteCodeParser::parseCodeBlock() > codeBlock->baselineVersion()->dumpBytecode(); > } > >- Vector<unsigned, 32> jumpTargets; >+ Vector<InstructionStream::Offset, 32> jumpTargets; > computePreciseJumpTargets(codeBlock, jumpTargets); > if (Options::dumpBytecodeAtDFGTime()) { > dataLog("Jump targets: "); >@@ -6853,6 +6860,115 @@ void ByteCodeParser::parseCodeBlock() > VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n"); > } > >+template <typename Bytecode> >+void ByteCodeParser::handlePutByVal(Bytecode bytecode) >+{ >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); >+ Node* value = get(bytecode.value); >+ bool isDirect = Bytecode::opcodeID() == op_put_by_val_direct; >+ bool compiledAsPutById = false; >+ { >+ unsigned identifierNumber = std::numeric_limits<unsigned>::max(); >+ PutByIdStatus putByIdStatus; >+ { >+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); >+ ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo; >+ // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. >+ // At that time, there is no information. >+ if (byValInfo >+ && byValInfo->stubInfo >+ && !byValInfo->tookSlowPath >+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) >+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType) >+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { >+ compiledAsPutById = true; >+ identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); >+ UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; >+ >+ if (Symbol* symbol = byValInfo->cachedSymbol.get()) { >+ FrozenValue* frozen = m_graph.freezeStrong(symbol); >+ addToGraph(CheckCell, OpInfo(frozen), property); >+ } else { >+ ASSERT(!uid->isSymbol()); >+ addToGraph(CheckStringIdent, OpInfo(uid), property); >+ } >+ >+ putByIdStatus = PutByIdStatus::computeForStubInfo( >+ locker, m_inlineStackTop->m_profiledBlock, >+ byValInfo->stubInfo, currentCodeOrigin(), uid); >+ >+ } >+ } >+ >+ if (compiledAsPutById) >+ handlePutById(base, identifierNumber, value, putByIdStatus, isDirect); >+ } >+ >+ if (!compiledAsPutById) { >+ ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_codeBlock).arrayProfile, Array::Write); >+ >+ addVarArgChild(base); >+ addVarArgChild(property); >+ addVarArgChild(value); >+ addVarArgChild(0); // Leave room for property storage. >+ addVarArgChild(0); // Leave room for length. >+ addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); >+ } >+} >+ >+template <typename Bytecode> >+void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode) >+{ >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; >+ Node* accessor = get(bytecode.accessor); >+ addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.attributes), base, accessor); >+} >+ >+template <typename Bytecode> >+void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode) >+{ >+ Node* base = get(bytecode.base); >+ Node* subscript = get(bytecode.property); >+ Node* accessor = get(bytecode.accessor); >+ addToGraph(op, OpInfo(bytecode.attributes), base, subscript, accessor); >+} >+ >+template <typename Bytecode> >+void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode) >+{ >+ FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.functionDecl); >+ FrozenValue* frozen = m_graph.freezeStrong(decl); >+ Node* scope = get(bytecode.scope); >+ set(bytecode.dst, addToGraph(op, OpInfo(frozen), scope)); >+ // Ideally we wouldn't have to do this Phantom. But: >+ // >+ // For the constant case: we must do it because otherwise we would have no way of knowing >+ // that the scope is live at OSR here. >+ // >+ // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation >+ // won't be able to handle an Undefined scope. >+ addToGraph(Phantom, scope); >+} >+ >+template <typename Bytecode> >+void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode) >+{ >+ FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.functionDecl); >+ FrozenValue* frozen = m_graph.freezeStrong(expr); >+ Node* scope = get(bytecode.scope); >+ set(bytecode.dst, addToGraph(op, OpInfo(frozen), scope)); >+ // Ideally we wouldn't have to do this Phantom. But: >+ // >+ // For the constant case: we must do it because otherwise we would have no way of knowing >+ // that the scope is live at OSR here. >+ // >+ // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation >+ // won't be able to handle an Undefined scope. >+ addToGraph(Phantom, scope); >+} >+ > void ByteCodeParser::parse() > { > // Set during construction. >@@ -6941,9 +7057,7 @@ void ByteCodeParser::parse() > if (argument.isArgument() && !argument.isHeader()) { > const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame); > arguments[argument.toArgument()]->addVariable(variable); >- } >- >- insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable)); >+ } insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable)); > }; > auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) { > insertLivenessPreservingOp(inlineCallFrame, Flush, operand); >diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >index dadc92d867e4c44a3462d8d6dab6458075acdd75..6ae75861c1cfe97ea3ae1f6616d89797122d9c71 100644 >--- a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >@@ -103,12 +103,14 @@ inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID, CapabilityLevel r > dataLog("DFG rejecting opcode in ", *codeBlock, " because of opcode ", opcodeNames[opcodeID], "\n"); > } > >-CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc) >+CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, const Instruction* pc) > { > UNUSED_PARAM(codeBlock); // This function does some bytecode parsing. Ordinarily bytecode parsing requires the owning CodeBlock. It's sort of strange that we don't use it here right now. > UNUSED_PARAM(pc); > > switch (opcodeID) { >+ case op_wide: >+ ASSERT_NOT_REACHED(); > case op_enter: > case op_to_this: > case op_argument_count: >@@ -302,20 +304,17 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc > > CapabilityLevel capabilityLevel(CodeBlock* codeBlock) > { >- Instruction* instructionsBegin = codeBlock->instructions().begin(); >- unsigned instructionCount = codeBlock->instructions().size(); > CapabilityLevel result = CanCompileAndInline; > >- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) { >- switch (Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) { >+ for (const auto& instruction : codeBlock->instructions()) { >+ switch (instruction->opcodeID()) { > #define DEFINE_OP(opcode, length) \ > case opcode: { \ >- CapabilityLevel newResult = leastUpperBound(result, capabilityLevel(opcode, codeBlock, instructionsBegin + bytecodeOffset)); \ >+ CapabilityLevel newResult = leastUpperBound(result, capabilityLevel(opcode, codeBlock, instruction.ptr())); \ > if (newResult != result) { \ > debugFail(codeBlock, opcode, newResult); \ > result = newResult; \ > } \ >- bytecodeOffset += length; \ > break; \ > } > FOR_EACH_OPCODE_ID(DEFINE_OP) >diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h >index e03b2471eb23469abc8fe1ca98f042101af4c72d..52145e976ab5ef652ab4a8a4cf5d3f145038ef64 100644 >--- a/Source/JavaScriptCore/dfg/DFGCapabilities.h >+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h >@@ -45,7 +45,7 @@ bool mightInlineFunctionForClosureCall(CodeBlock*); > bool mightInlineFunctionForConstruct(CodeBlock*); > bool canUseOSRExitFuzzing(CodeBlock*); > >-inline CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc); >+inline CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, const Instruction* pc); > > CapabilityLevel capabilityLevel(CodeBlock*); > #else // ENABLE(DFG_JIT) >@@ -58,7 +58,7 @@ inline bool mightInlineFunctionForClosureCall(CodeBlock*) { return false; } > inline bool mightInlineFunctionForConstruct(CodeBlock*) { return false; } > inline bool canUseOSRExitFuzzing(CodeBlock*) { return false; } > >-inline CapabilityLevel capabilityLevel(OpcodeID, CodeBlock*, Instruction*) { return CannotCompile; } >+inline CapabilityLevel capabilityLevel(OpcodeID, CodeBlock*, const Instruction*) { return CannotCompile; } > inline CapabilityLevel capabilityLevel(CodeBlock*) { return CannotCompile; } > #endif // ENABLE(DFG_JIT) > >diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp >index ca4b0e253a9f4bda0c3a879764db637b9d1130d1..99a6952cdac0c1d4729b6a56acd24844129136fb 100644 >--- a/Source/JavaScriptCore/dfg/DFGGraph.cpp >+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp >@@ -1661,8 +1661,9 @@ MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* curren > } > } > >- if (node->hasHeapPrediction()) >- return &profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >+ // TODO >+ //if (node->hasHeapPrediction()) >+ //return &profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); > > if (profiledBlock->hasBaselineJITProfiling()) { > if (ArithProfile* result = profiledBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex)) >diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp >index 438f8f21dfcfb9af9ddd6bbd4ca8fb459dad2a5f..74b20710636ca5ec664d0f732296f3b6fc5a748f 100644 >--- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp >+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp >@@ -389,8 +389,9 @@ MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(ExecState* ex > if (UNLIKELY(!vm.ensureStackCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck).offset()]))) > return nullptr; > >- ASSERT(Interpreter::getOpcodeID(exec->codeBlock()->instructions()[exec->bytecodeOffset()].u.opcode) == op_catch); >- ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(exec->codeBlock()->instructions()[exec->bytecodeOffset() + 3].u.pointer); >+ auto instruction = exec->codeBlock()->instructions().at(exec->bytecodeOffset()); >+ ASSERT(instruction->is<OpCatch>()); >+ ValueProfileAndOperandBuffer* buffer = instruction->as<OpCatch>().metadata(exec).buffer; > JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer()); > unsigned index = 0; > buffer->forEach([&] (ValueProfileAndOperand& profile) { >diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp >index b845f4066eef2f04ac9798062e05c3d97b535531..be5da0deab992e1f6b8613cfec39e8bff5a99103 100644 >--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp >+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp >@@ -3811,7 +3811,7 @@ void SpeculativeJIT::compileValueAdd(Node* node) > > CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); > JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile, instruction); > auto repatchingFunction = operationValueAddOptimize; > auto nonRepatchingFunction = operationValueAdd; >@@ -4465,7 +4465,7 @@ void SpeculativeJIT::compileArithSub(Node* node) > > CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); > JITSubIC* subIC = m_jit.codeBlock()->addJITSubIC(arithProfile, instruction); > auto repatchingFunction = operationValueSubOptimize; > auto nonRepatchingFunction = operationValueSub; >@@ -4484,7 +4484,7 @@ void SpeculativeJIT::compileValueNegate(Node* node) > { > CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); > JITNegIC* negIC = m_jit.codeBlock()->addJITNegIC(arithProfile, instruction); > auto repatchingFunction = operationArithNegateOptimize; > auto nonRepatchingFunction = operationArithNegate; >@@ -4826,7 +4826,7 @@ void SpeculativeJIT::compileArithMul(Node* node) > > CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); > JITMulIC* mulIC = m_jit.codeBlock()->addJITMulIC(arithProfile, instruction); > auto repatchingFunction = operationValueMulOptimize; > auto nonRepatchingFunction = operationValueMul; >diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp >index 3ba87e17c20a1cb84f95dbb11080e6d67d6a39b4..ac313dc5417d3366e2849ee46a1cf0c67f57905d 100644 >--- a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp >+++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp >@@ -1846,7 +1846,7 @@ private: > { > CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); > auto repatchingFunction = operationValueAddOptimize; > auto nonRepatchingFunction = operationValueAdd; > compileBinaryMathIC<JITAddGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction); >@@ -1854,7 +1854,7 @@ private: > > template <typename Generator, typename Func1, typename Func2, > typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>> >- void compileUnaryMathIC(ArithProfile* arithProfile, Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) >+ void compileUnaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) > { > Node* node = m_node; > >@@ -1940,7 +1940,7 @@ private: > > template <typename Generator, typename Func1, typename Func2, > typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>> >- void compileBinaryMathIC(ArithProfile* arithProfile, Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) >+ void compileBinaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) > { > Node* node = m_node; > >@@ -2107,7 +2107,7 @@ private: > > CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); > auto repatchingFunction = operationValueSubOptimize; > auto nonRepatchingFunction = operationValueSub; > compileBinaryMathIC<JITSubGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction); >@@ -2203,7 +2203,7 @@ private: > case UntypedUse: { > CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); > auto repatchingFunction = operationValueMulOptimize; > auto nonRepatchingFunction = operationValueMul; > compileBinaryMathIC<JITMulGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction); >@@ -2740,7 +2740,7 @@ private: > DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse); > CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); > auto repatchingFunction = operationArithNegateOptimize; > auto nonRepatchingFunction = operationArithNegate; > compileUnaryMathIC<JITNegGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction); >diff --git a/Source/JavaScriptCore/ftl/FTLOperations.cpp b/Source/JavaScriptCore/ftl/FTLOperations.cpp >index 147bb2c12fb53c4e0b79e5bee5bcae6e1669e14b..caa1890e23b2cadbe8232a9095e7859ac288e70c 100644 >--- a/Source/JavaScriptCore/ftl/FTLOperations.cpp >+++ b/Source/JavaScriptCore/ftl/FTLOperations.cpp >@@ -474,10 +474,10 @@ extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR( > // For now, we use array allocation profile in the actual CodeBlock. It is OK since current NewArrayBuffer > // and PhantomNewArrayBuffer are always bound to a specific op_new_array_buffer. > CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(materialization->origin(), exec->codeBlock()); >- Instruction* currentInstruction = &codeBlock->instructions()[materialization->origin().bytecodeIndex]; >- RELEASE_ASSERT(Interpreter::getOpcodeID(currentInstruction[0].u.opcode) == op_new_array_buffer); >- auto* newArrayBuffer = bitwise_cast<OpNewArrayBuffer*>(currentInstruction); >- ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile; >+ const Instruction* currentInstruction = codeBlock->instructions().at(materialization->origin().bytecodeIndex).ptr(); >+ RELEASE_ASSERT(currentInstruction->is<OpNewArrayBuffer>()); >+ auto newArrayBuffer = currentInstruction->as<OpNewArrayBuffer>(); >+ ArrayAllocationProfile* profile = &newArrayBuffer.metadata(codeBlock).allocationProfile; > > // FIXME: Share the code with CommonSlowPaths. Currently, codeBlock etc. are slightly different. > IndexingType indexingMode = profile->selectIndexingType(); >@@ -495,7 +495,7 @@ extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR( > // We also cannot allocate a new butterfly from compilation threads since it's invalid to allocate cells from > // a compilation thread. > WTF::storeStoreFence(); >- codeBlock->constantRegister(newArrayBuffer->immutableButterfly()).set(vm, codeBlock, immutableButterfly); >+ codeBlock->constantRegister(newArrayBuffer.immutableButterfly.offset()).set(vm, codeBlock, immutableButterfly); > WTF::storeStoreFence(); > } > >diff --git a/Source/JavaScriptCore/generate-bytecode-files b/Source/JavaScriptCore/generate-bytecode-files >deleted file mode 100644 >index fa25fd2ef31be4c1eb3c3a585be529d67cfed6d8..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/generate-bytecode-files >+++ /dev/null >@@ -1,302 +0,0 @@ >-#! /usr/bin/env python >- >-# Copyright (C) 2014-2017 Apple Inc. All rights reserved. >-# >-# Redistribution and use in source and binary forms, with or without >-# modification, are permitted provided that the following conditions >-# are met: >-# >-# 1. Redistributions of source code must retain the above copyright >-# notice, this list of conditions and the following disclaimer. >-# 2. Redistributions in binary form must reproduce the above copyright >-# notice, this list of conditions and the following disclaimer in the >-# documentation and/or other materials provided with the distribution. >-# >-# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >-# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- >-# This tool processes the bytecode list to create Bytecodes.h and InitBytecodes.asm >- >-import hashlib >-import json >-import optparse >-import os >-import re >-import sys >- >-cCopyrightMsg = """/* >-* Copyright (C) 2014 Apple Inc. All rights reserved. >-* >-* Redistribution and use in source and binary forms, with or without >-* modification, are permitted provided that the following conditions >-* are met: >-* >-* 1. Redistributions of source code must retain the above copyright >-* notice, this list of conditions and the following disclaimer. >-* 2. Redistributions in binary form must reproduce the above copyright >-* notice, this list of conditions and the following disclaimer in the >-* documentation and/or other materials provided with the distribution. >-* >-* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >-* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >-* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >-* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >-* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >-* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >-* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >-* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- >-* Autogenerated from %s, do not modify. >-*/ >- >-""" >- >-asmCopyrightMsg = """# Copyright (C) 2014 Apple Inc. All rights reserved. >-# >-# Redistribution and use in source and binary forms, with or without >-# modification, are permitted provided that the following conditions >-# are met: >-# >-# 1. Redistributions of source code must retain the above copyright >-# notice, this list of conditions and the following disclaimer. >-# 2. Redistributions in binary form must reproduce the above copyright >-# notice, this list of conditions and the following disclaimer in the >-# documentation and/or other materials provided with the distribution. >-# >-# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >-# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- >-# Autogenerated from %s, do not modify. >- >-""" >-def openOrExit(path, mode): >- try: >- return open(path, mode) >- except IOError as e: >- print("I/O error opening {0}, ({1}): {2}".format(path, e.errno, e.strerror)) >- exit(1) >- >-def hashFile(file): >- sha1 = hashlib.sha1() >- file.seek(0) >- for line in file: >- sha1.update(line) >- >- file.seek(0) >- >- return sha1.hexdigest() >- >- >-def toCpp(name): >- camelCase = re.sub(r'([^a-z0-9].)', lambda c: c.group(0)[1].upper(), name) >- CamelCase = camelCase[:1].upper() + camelCase[1:] >- return CamelCase >- >- >-def writeInstructionAccessor(bytecodeHFile, typeName, name): >- bytecodeHFile.write(" {0}& {1}() {{ return *bitwise_cast<{0}*>(&m_{1}); }}\n".format(typeName, name)) >- bytecodeHFile.write(" const {0}& {1}() const {{ return *bitwise_cast<const {0}*>(&m_{1}); }}\n".format(typeName, name)) >- >- >-def writeInstructionMember(bytecodeHFile, typeName, name): >- bytecodeHFile.write(" std::aligned_storage<sizeof({0}), sizeof(Instruction)>::type m_{1};\n".format(typeName, name)) >- bytecodeHFile.write(" static_assert(sizeof({0}) <= sizeof(Instruction), \"Size of {0} shouldn't be bigger than an Instruction.\");\n".format(typeName, name)) >- >-def writeStruct(bytecodeHFile, bytecode): >- bytecodeHFile.write("struct {0} {{\n".format(toCpp(bytecode["name"]))) >- bytecodeHFile.write("public:\n") >- >- writeInstructionAccessor(bytecodeHFile, "Opcode", "opcode") >- for offset in bytecode["offsets"]: >- for name, typeName in offset.iteritems(): >- writeInstructionAccessor(bytecodeHFile, typeName, name) >- >- bytecodeHFile.write("\nprivate:\n") >- bytecodeHFile.write(" friend class LLIntOffsetsExtractor;\n\n") >- >- writeInstructionMember(bytecodeHFile, "Opcode", "opcode") >- for offset in bytecode["offsets"]: >- for name, typeName in offset.iteritems(): >- writeInstructionMember(bytecodeHFile, typeName, name) >- bytecodeHFile.write("};\n\n") >- >- >-if __name__ == "__main__": >- parser = optparse.OptionParser(usage = "usage: %prog [--bytecodes_h <FILE>] [--init_bytecodes_asm <FILE>] <bytecode-json-file>") >- parser.add_option("-b", "--bytecodes_h", dest = "bytecodesHFileName", help = "generate bytecodes macro .h FILE", metavar = "FILE") >- parser.add_option("-s", "--bytecode_structs_h", dest = "bytecodeStructsHFileName", help = "generate bytecodes macro .h FILE", metavar = "FILE") >- parser.add_option("-a", "--init_bytecodes_asm", dest = "initASMFileName", help="generate ASM bytecodes init FILE", metavar = "FILE") >- (options, args) = parser.parse_args() >- >- if len(args) != 1: >- parser.error("missing <bytecode-json-file>") >- >- bytecodeJSONFile = args[0] >- bytecodeFile = openOrExit(bytecodeJSONFile, "rb") >- sha1Hash = hashFile(bytecodeFile) >- >- hFileHashString = "// SHA1Hash: {0}\n".format(sha1Hash) >- asmFileHashString = "# SHA1Hash: {0}\n".format(sha1Hash) >- >- bytecodeHFilename = options.bytecodesHFileName >- bytecodeStructsHFilename = options.bytecodeStructsHFileName >- initASMFileName = options.initASMFileName >- >- if not bytecodeHFilename and not initASMFileName and not bytecodeStructsHFilename: >- parser.print_help() >- exit(0) >- >- needToGenerate = False >- >- if bytecodeHFilename: >- try: >- bytecodeHReadFile = open(bytecodeHFilename, "rb") >- >- hashLine = bytecodeHReadFile.readline() >- if hashLine != hFileHashString: >- needToGenerate = True >- except: >- needToGenerate = True >- else: >- bytecodeHReadFile.close() >- >- if bytecodeStructsHFilename: >- try: >- bytecodeStructsHReadFile = open(bytecodeStructsHFilename, "rb") >- >- hashLine = bytecodeStructsHReadFile.readline() >- if hashLine != hFileHashString: >- needToGenerate = True >- except: >- needToGenerate = True >- else: >- bytecodeStructsHReadFile.close() >- >- if initASMFileName: >- try: >- initBytecodesReadFile = open(initASMFileName, "rb") >- >- hashLine = initBytecodesReadFile.readline() >- if hashLine != asmFileHashString: >- needToGenerate = True >- except: >- needToGenerate = True >- else: >- initBytecodesReadFile.close() >- >- if not needToGenerate: >- exit(0) >- >- if bytecodeHFilename: >- bytecodeHFile = openOrExit(bytecodeHFilename, "wb") >- >- if bytecodeStructsHFilename: >- bytecodeStructsHFile = openOrExit(bytecodeStructsHFilename, "wb") >- >- if initASMFileName: >- initBytecodesFile = openOrExit(initASMFileName, "wb") >- >- try: >- bytecodeSections = json.load(bytecodeFile, encoding = "utf-8") >- except: >- print("Unexpected error parsing {0}: {1}".format(bytecodeJSONFile, sys.exc_info())) >- >- if bytecodeHFilename: >- bytecodeHFile.write(hFileHashString) >- bytecodeHFile.write(cCopyrightMsg % bytecodeJSONFile) >- bytecodeHFile.write("#pragma once\n\n") >- >- if bytecodeStructsHFilename: >- bytecodeStructsHFile.write(hFileHashString) >- bytecodeStructsHFile.write(cCopyrightMsg % bytecodeJSONFile) >- bytecodeStructsHFile.write("#pragma once\n\n") >- bytecodeStructsHFile.write("#include \"Instruction.h\"\n") >- bytecodeStructsHFile.write("\n") >- >- if initASMFileName: >- initBytecodesFile.write(asmFileHashString) >- initBytecodesFile.write(asmCopyrightMsg % bytecodeJSONFile) >- initASMBytecodeNum = 0 >- >- for section in bytecodeSections: >- if bytecodeHFilename and section['emitInHFile']: >- bytecodeHFile.write("#define FOR_EACH_{0}_ID(macro) \\\n".format(section["macroNameComponent"])) >- firstMacro = True >- defaultLength = 1 >- if "defaultLength" in section: >- defaultLength = section["defaultLength"] >- >- bytecodeNum = 0 >- for bytecode in section["bytecodes"]: >- if not firstMacro: >- bytecodeHFile.write(" \\\n") >- >- length = defaultLength >- if "length" in bytecode: >- length = bytecode["length"] >- elif "offsets" in bytecode: >- # Add one for the opcode >- length = len(bytecode["offsets"]) + 1 >- >- bytecodeHFile.write(" macro({0}, {1})".format(bytecode["name"], length)) >- firstMacro = False >- bytecodeNum = bytecodeNum + 1 >- >- bytecodeHFile.write("\n\n") >- bytecodeHFile.write("#define NUMBER_OF_{0}_IDS {1}\n\n".format(section["macroNameComponent"], bytecodeNum)) >- >- >- if bytecodeStructsHFilename and section['emitInStructsFile']: >- bytecodeStructsHFile.write("namespace JSC {\n\n") >- >- for bytecode in section["bytecodes"]: >- if not "offsets" in bytecode: >- continue >- writeStruct(bytecodeStructsHFile, bytecode) >- >- bytecodeStructsHFile.write("} // namespace JSC \n") >- >- if bytecodeHFilename and section['emitOpcodeIDStringValuesInHFile']: >- bytecodeNum = 0 >- for bytecode in section["bytecodes"]: >- bytecodeHFile.write("#define {0}_value_string \"{1}\"\n".format(bytecode["name"], bytecodeNum)) >- firstMacro = False >- bytecodeNum = bytecodeNum + 1 >- >- bytecodeHFile.write("\n") >- >- if initASMFileName and section['emitInASMFile']: >- prefix = "" >- if "asmPrefix" in section: >- prefix = section["asmPrefix"] >- for bytecode in section["bytecodes"]: >- initBytecodesFile.write("setEntryAddress({0}, _{1}{2})\n".format(initASMBytecodeNum, prefix, bytecode["name"])) >- initASMBytecodeNum = initASMBytecodeNum + 1 >- >- if bytecodeHFilename: >- bytecodeHFile.close() >- >- if initASMFileName: >- initBytecodesFile.close() >- >- bytecodeFile.close() >- >- exit(0) >diff --git a/Source/JavaScriptCore/generator/Argument.rb b/Source/JavaScriptCore/generator/Argument.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..0a8e8be3abe201a65944d381b83e473a01521b06 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Argument.rb >@@ -0,0 +1,63 @@ >+require_relative 'Fits' >+ >+class Argument >+ attr_reader :name >+ >+ def initialize(name, type, index) >+ @optional = name[-1] == "?" >+ @name = @optional ? name[0...-1] : name >+ @type = type >+ @index = index >+ end >+ >+ def field >+ "#{@type.to_s} #{@name};" >+ end >+ >+ def create_param >+ "#{@type.to_s} #{@name}" >+ end >+ >+ def fits_check(size) >+ Fits::check size, @name, @type >+ end >+ >+ def fits_write(size) >+ Fits::write size, @name, @type >+ end >+ >+ def assert_fits(size) >+ "ASSERT((#{fits_check size}));" >+ end >+ >+ def load_from_stream(index, size) >+ "#{@name}(#{Fits::convert(size, "stream[#{index+1}]", @type)})" >+ end >+ >+ def setter >+ <<-EOF >+ void set#{capitalized_name}(#{@type.to_s} value) >+ { >+ if (isWide()) >+ set#{capitalized_name}<OpcodeSize::Wide>(value); >+ else >+ set#{capitalized_name}<OpcodeSize::Narrow>(value); >+ } >+ >+ template <OpcodeSize size> >+ void set#{capitalized_name}(#{@type.to_s} value) >+ { >+ auto* stream = reinterpret_cast<typename TypeBySize<size>::type*>(this + #{@index} * size + PaddingBySize<size>::value); >+ *stream = #{Fits::convert "size", "value", @type}; >+ } >+ EOF >+ end >+ >+ def capitalized_name >+ @capitalized_name ||= @name.to_s.split('_').map do |word| >+ letters = word.split('') >+ letters.first.upcase! >+ letters.join >+ end.join >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Assertion.rb b/Source/JavaScriptCore/generator/Assertion.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..a93dd4d9feff9750471fb66d94fd73b2da5faee0 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Assertion.rb >@@ -0,0 +1,9 @@ >+class AssertionError < RuntimeError >+ def initialize(msg) >+ super >+ end >+end >+ >+def assert(msg, &block) >+ raise AssertionError, msg unless yield >+end >diff --git a/Source/JavaScriptCore/generator/DSL.rb b/Source/JavaScriptCore/generator/DSL.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..e307d6f10313aa42d185b4ab23556fbfdab1c18c >--- /dev/null >+++ b/Source/JavaScriptCore/generator/DSL.rb >@@ -0,0 +1,124 @@ >+require_relative 'Assertion' >+require_relative 'Section' >+require_relative 'Template' >+require_relative 'Type' >+require_relative 'GeneratedFile' >+ >+module DSL >+ @sections = [] >+ @current_section = nil >+ @context = binding() >+ @namespaces = [] >+ >+ def self.begin_section(name, config={}) >+ assert("must call `end_section` before beginning a new section") { @current_section.nil? } >+ @current_section = Section.new name, config >+ end >+ >+ def self.end_section(name) >+ assert("current section's name is `#{@current_section.name}`, but end_section was called with `#{name}`") { @current_section.name == name } >+ @sections << @current_section >+ @current_section = nil >+ end >+ >+ def self.op(name, config = {}) >+ assert("`op` can only be called in between `begin_section` and `end_section`") { not @current_section.nil? } >+ @current_section.add_opcode(name, config) >+ end >+ >+ def self.op_group(desc, ops, config) >+ assert("`op_group` can only be called in between `begin_section` and `end_section`") { not @current_section.nil? } >+ @current_section.add_opcode_group(desc, ops, config) >+ end >+ >+ def self.types(types) >+ types.map do |type| >+ type = (@namespaces + [type]).join "::" >+ @context.eval("#{type} = Type.new '#{type}'") >+ end >+ end >+ >+ def self.templates(types) >+ types.map do |type| >+ type = (@namespaces + [type]).join "::" >+ @context.eval("#{type} = Template.new '#{type}'") >+ end >+ end >+ >+ def self.namespace(name) >+ @namespaces << name.to_s >+ ctx = @context >+ @context = @context.eval(" >+ module #{name} >+ def self.get_binding >+ binding() >+ end >+ end >+ #{name}.get_binding >+ ") >+ yield >+ @context = ctx >+ @namespaces.pop >+ end >+ >+ def self.run(options) >+ bytecodeListPath = options[:bytecodeList] >+ bytecodeList = File.open(bytecodeListPath) >+ @context.eval(bytecodeList.read, bytecodeListPath) >+ assert("must end last section") { @current_section.nil? } >+ >+ write_bytecodes(bytecodeList, options[:bytecodesFilename]) >+ write_bytecode_structs(bytecodeList, options[:bytecodeStructsFilename]) >+ write_init_asm(bytecodeList, options[:initAsmFilename]) >+ end >+ >+ def self.write_bytecodes(bytecode_list, bytecodes_filename) >+ GeneratedFile::create(bytecodes_filename, bytecode_list) do |template| >+ template.prefix = "#pragma once" >+ template.body = @sections.map(&:header_helpers).join("\n") >+ end >+ end >+ >+ def self.write_bytecode_structs(bytecode_list, bytecode_structs_filename) >+ GeneratedFile::create(bytecode_structs_filename, bytecode_list) do |template| >+ opcodes = opcodes_for(:emit_in_structs_file) >+ >+ template.prefix = <<-EOF >+ #pragma once >+ >+ #include "ArithProfile.h" >+ #include "BytecodeDumper.h" >+ #include "BytecodeGenerator.h" >+ #include "Fits.h" >+ #include "Instruction.h" >+ #include "Opcode.h" >+ #include "ToThisStatus.h" >+ >+ namespace JSC { >+ EOF >+ >+ template.body = <<-EOF >+ #{opcodes.map(&:cpp_class).join("\n")} >+ >+ #{Opcode.dump_bytecode(opcodes)} >+ EOF >+ >+ template.suffix = "} // namespace JSC" >+ end >+ end >+ >+ def self.write_init_asm(bytecode_list, init_asm_filename) >+ opcodes = opcodes_for(:emit_in_asm_file) >+ >+ GeneratedFile::create(init_asm_filename, bytecode_list) do |template| >+ template.multiline_comment = nil >+ template.line_comment = "#" >+ template.body = (opcodes.map(&:set_entry_address) + opcodes.map(&:set_entry_address_wide)) .join("\n") >+ end >+ end >+ >+ def self.opcodes_for(file) >+ sections = @sections.select { |s| s.config[file] } >+ sections.map(&:opcodes).flatten >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Fits.rb b/Source/JavaScriptCore/generator/Fits.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..60a0b47635a66ed6361b36337f93d6fa2d2ca968 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Fits.rb >@@ -0,0 +1,13 @@ >+module Fits >+ def self.convert(size, name, type) >+ "Fits<#{type.to_s}, #{size}>::convert(#{name})" >+ end >+ >+ def self.check(size, name, type) >+ "Fits<#{type.to_s}, #{size}>::check(#{name})" >+ end >+ >+ def self.write(size, name, type) >+ "__generator->write(#{convert(size, name, type)});" >+ end >+end >diff --git a/Source/JavaScriptCore/generator/GeneratedFile.rb b/Source/JavaScriptCore/generator/GeneratedFile.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..69e6657d8f60983c02e759573e2ab4192e03009a >--- /dev/null >+++ b/Source/JavaScriptCore/generator/GeneratedFile.rb >@@ -0,0 +1,79 @@ >+require 'date' >+require 'digest' >+ >+$LICENSE = <<-EOF >+Copyright (C) #{Date.today.year} Apple Inc. All rights reserved. >+ >+Redistribution and use in source and binary forms, with or without >+modification, are permitted provided that the following conditions >+are met: >+ >+1. Redistributions of source code must retain the above copyright >+ notice, this list of conditions and the following disclaimer. >+2. Redistributions in binary form must reproduce the above copyright >+ notice, this list of conditions and the following disclaimer in the >+ documentation and/or other materials provided with the distribution. >+ >+THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >+DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+EOF >+ >+module GeneratedFile >+ class Template < Struct.new(:multiline_comment, :line_comment, :prefix, :suffix, :body) >+ def initialize >+ super(["/*", " *", "*/"], "// ", nil, nil, nil) >+ end >+ end >+ >+ def self.create(filename, dependency) >+ template = Template.new >+ yield template >+ >+ file = File.open(filename, "w") >+ self.sha1(file, template, dependency) >+ self.license(file, template, dependency) >+ >+ unless template.prefix.nil? >+ write(file, template.prefix.to_s, "\n") >+ end >+ unless template.body.nil? >+ write(file, template.body.to_s, "\n") >+ end >+ unless template.suffix.nil? >+ write(file, template.suffix.to_s, "\n") >+ end >+ end >+ >+ def self.sha1(file, template, dependency) >+ write(file, template.line_comment, " SHA1Hash: ", Digest::SHA1.hexdigest(dependency.read), "\n") >+ end >+ >+ def self.license(file, template, dependency) >+ unless template.multiline_comment.nil? >+ write(file, template.multiline_comment[0], "\n") >+ end >+ >+ comment = if template.multiline_comment.nil? then template.line_comment else template.multiline_comment[1] end >+ write(file, $LICENSE.strip.split("\n").map { |line| "#{comment} #{line}" }.join("\n"), "\n\n") >+ write(file, comment, " Autogenerated from ", dependency.path, ", do not modify.\n") >+ >+ unless template.multiline_comment.nil? >+ write(file, template.multiline_comment[2], "\n") >+ end >+ >+ write(file, "\n") >+ end >+ >+ def self.write(file, *strings) >+ file.write(strings.map(&:to_s).join) >+ end >+end >+ >diff --git a/Source/JavaScriptCore/generator/Metadata.rb b/Source/JavaScriptCore/generator/Metadata.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..fc7e3f54cbd2ccdd863850114dcdcb900cc2e0bf >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Metadata.rb >@@ -0,0 +1,83 @@ >+require_relative 'Fits' >+ >+class Metadata >+ @@emitter_local = nil >+ >+ def initialize(fields, initializers) >+ @fields = fields >+ @initializers = initializers >+ end >+ >+ def empty? >+ @fields.nil? >+ end >+ >+ def cpp_class(op) >+ return if empty? >+ >+ fields = @fields.map { |field, type| "#{type.to_s} #{field.to_s};" }.join "\n" >+ inits = nil >+ if @initializers && (not @initializers.empty?) >+ inits = ": " + @initializers.map do |metadata, arg| >+ "#{metadata}(__op.#{arg})" >+ end.join(", ") >+ end >+ >+ <<-EOF >+ struct Metadata { >+ Metadata(const #{op.capitalized_name}&#{" __op" if inits}) >+ #{inits} >+ { } >+ >+ #{fields} >+ }; >+ EOF >+ end >+ >+ def accessor >+ return if empty? >+ >+ <<-EOF >+ Metadata& metadata(CodeBlock* codeBlock) const >+ { >+ auto*& it = codeBlock->metadata<Metadata>(opcodeID(), metadataID); >+ if (!it) >+ it = new Metadata { *this }; >+ return *it; >+ } >+ >+ Metadata& metadata(ExecState* exec) const >+ { >+ return metadata(exec->codeBlock()); >+ } >+ EOF >+ end >+ >+ def field >+ return if empty? >+ >+ "unsigned metadataID;" >+ end >+ >+ def load_from_stream(index, size) >+ return if empty? >+ >+ "metadataID(#{Fits::convert(size, "stream[#{index}]", :unsigned)})" >+ end >+ >+ def create_emitter_local >+ return if empty? >+ >+ <<-EOF >+ auto #{emitter_local.name} = __generator->addMetadataFor(opcodeID()); >+ EOF >+ end >+ >+ def emitter_local >+ unless @@emitter_local >+ @@emitter_local = Argument.new("__metadataID", :unsigned, -1) >+ end >+ >+ return @@emitter_local >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Opcode.rb b/Source/JavaScriptCore/generator/Opcode.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..b0985794282db7fe4a0c1d86f583cb5caa4f30ea >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Opcode.rb >@@ -0,0 +1,198 @@ >+require_relative 'Argument' >+require_relative 'Fits' >+require_relative 'Metadata' >+ >+class Opcode >+ attr_reader :id >+ attr_reader :args >+ attr_reader :metadata >+ >+ module Size >+ Narrow = "OpcodeSize::Narrow" >+ Wide = "OpcodeSize::Wide" >+ end >+ >+ @@id = 0 >+ >+ def self.id >+ tid = @@id >+ @@id = @@id + 1 >+ tid >+ end >+ >+ def initialize(section, name, args, metadata, metadata_initializers) >+ @id = self.class.id >+ @section = section >+ @name = name >+ @metadata = Metadata.new metadata, metadata_initializers >+ @args = args.map.with_index { |(arg_name, type), index| Argument.new arg_name, type, index + 1 } unless args.nil? >+ end >+ >+ def print_args(&block) >+ return if @args.nil? >+ >+ @args.map(&block).join "\n" >+ end >+ >+ def capitalized_name >+ name.split('_').map(&:capitalize).join >+ end >+ >+ def typed_args >+ return if @args.nil? >+ >+ @args.map(&:create_param).unshift("").join(", ") >+ end >+ >+ def map_fields_with_size(size, &block) >+ args = @args ? @args.dup : [] >+ args << Argument.new("opcodeID()", :unsigned, 0) >+ unless @metadata.empty? >+ args << @metadata.emitter_local >+ end >+ args.map { |arg| block.call(arg, size) } >+ end >+ >+ def cpp_class >+ <<-EOF >+ struct #{capitalized_name} : public Instruction { >+ #{opcodeID} >+ >+ #{emitter} >+ >+ #{dumper} >+ >+ #{constructors} >+ >+ #{setters} >+ >+ #{metadata} >+ >+ #{members} >+ }; >+ EOF >+ end >+ >+ def opcodeID >+ "static constexpr OpcodeID opcodeID() { return static_cast<OpcodeID>(#{@id}); }" >+ end >+ >+ def emitter >+ op_wide = Argument.new("op_wide", :unsigned, 0) >+ <<-EOF >+ static void emit(BytecodeGenerator* __generator#{typed_args}) >+ { >+ __generator->recordOpcode(opcodeID()); >+ #{@metadata.create_emitter_local} >+ if (#{map_fields_with_size(Size::Narrow, &:fits_check).join " && "}) { >+ #{map_fields_with_size(Size::Narrow, &:fits_write).join "\n"} >+ } else { >+ #{op_wide.assert_fits Size::Narrow} >+ #{map_fields_with_size(Size::Wide, &:assert_fits).join "\n"} >+ >+ #{op_wide.fits_write Size::Narrow} >+ #{map_fields_with_size(Size::Wide, &:fits_write).join "\n"} >+ } >+ } >+ EOF >+ end >+ >+ def dumper >+ <<-EOF >+ template<typename Block> >+ void dump(BytecodeDumper<Block>* __dumper, InstructionStream::Offset __location) >+ { >+ __dumper->printLocationAndOp(__location, "#{@name}"); >+ #{print_args { |arg| >+ <<-EOF >+ __dumper->dumpOperand(#{arg.name}); >+ EOF >+ }} >+ } >+ EOF >+ end >+ >+ def constructors >+ fields = (@args || []) + (@metadata.empty? ? [] : [@metadata]) >+ init = ->(size) { fields.empty? ? "" : ": #{fields.map.with_index { |arg, i| arg.load_from_stream(i, size) }.join ",\n" }" } >+ >+ <<-EOF >+ #{capitalized_name}(const uint8_t* stream) >+ #{init.call("OpcodeSize::Narrow")} >+ { ASSERT(stream[0] == opcodeID()); } >+ >+ #{capitalized_name}(const uint32_t* stream) >+ #{init.call("OpcodeSize::Wide")} >+ { ASSERT(stream[0] == opcodeID()); } >+ >+ static #{capitalized_name} decode(const uint8_t* stream) >+ { >+ if (*stream != op_wide) >+ return { stream }; >+ >+ auto wideStream = reinterpret_cast<const uint32_t*>(stream + 1); >+ return { wideStream }; >+ } >+ >+ EOF >+ end >+ >+ def setters >+ print_args(&:setter) >+ end >+ >+ def metadata >+ <<-EOF >+ #{@metadata.cpp_class(self)} >+ >+ #{@metadata.accessor} >+ EOF >+ end >+ >+ def members >+ <<-EOF >+ #{print_args(&:field)} >+ #{@metadata.field} >+ EOF >+ end >+ >+ def set_entry_address >+ "setEntryAddress(#{@id}, _#{full_name})" >+ end >+ >+ def set_entry_address_wide >+ "setEntryAddressWide(#{@id}, _#{full_name}_wide)" >+ end >+ >+ def full_name >+ "#{@section.config[:asm_prefix]}#{@section.config[:op_prefix]}#{@name}" >+ end >+ >+ def name >+ "#{@section.config[:op_prefix]}#{@name}" >+ end >+ >+ def length >+ 1 + (@args.nil? ? 0 : @args.length) + (@metadata.empty? ? 0 : 1) >+ end >+ >+ def self.dump_bytecode(opcodes) >+ <<-EOF >+ template<typename Block> >+ static void dumpBytecode(BytecodeDumper<Block>* __dumper, InstructionStream::Offset __location, const Instruction* __instruction) >+ { >+ switch (__instruction->opcodeID()) { >+ #{opcodes.map { |op| >+ <<-EOF >+ case #{op.name}: >+ __instruction->as<#{op.capitalized_name}>().dump(__dumper, __location); >+ break; >+ EOF >+ }.join "\n"} >+ default: >+ ASSERT_NOT_REACHED(); >+ } >+ } >+ EOF >+ end >+end >diff --git a/Source/JavaScriptCore/generator/OpcodeGroup.rb b/Source/JavaScriptCore/generator/OpcodeGroup.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..0b7971f9a67ba7c66f9906d77cf98dc6d641035e >--- /dev/null >+++ b/Source/JavaScriptCore/generator/OpcodeGroup.rb >@@ -0,0 +1,14 @@ >+require_relative 'Opcode' >+ >+class OpcodeGroup >+ attr_reader :name >+ attr_reader :opcodes >+ attr_reader :config >+ >+ def initialize(section, desc, opcodes, config) >+ @section = section >+ @name = name >+ @opcodes = opcodes >+ @config = config >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Options.rb b/Source/JavaScriptCore/generator/Options.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..2ca194a17dd25a03dbda9cce58d077eb3270f6da >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Options.rb >@@ -0,0 +1,59 @@ >+require 'optparse' >+ >+$config = { >+ bytecodesFilename: { >+ short: "-b", >+ long: "--bytecodes_h FILE", >+ desc: "generate bytecodes macro .h FILE", >+ }, >+ bytecodeStructsFilename: { >+ short: "-s", >+ long: "--bytecode_structs_h FILE", >+ desc: "generate bytecode structs .h FILE", >+ }, >+ initAsmFilename: { >+ short: "-a", >+ long: "--init_bytecodes_asm FILE", >+ desc: "generate ASM bytecodes init FILE", >+ }, >+}; >+ >+module Options >+ def self.optparser(options) >+ OptionParser.new do |opts| >+ opts.banner = "usage: #{opts.program_name} [options] <bytecode-list-file>" >+ $config.map do |key, option| >+ opts.on(option[:short], option[:long], option[:desc]) do |v| >+ options[key] = v >+ end >+ end >+ end >+ end >+ >+ def self.check(argv, options) >+ missing = $config.keys.select{ |param| options[param].nil? } >+ unless missing.empty? >+ raise OptionParser::MissingArgument.new(missing.join(', ')) >+ end >+ unless argv.length == 1 >+ raise OptionParser::MissingArgument.new("<bytecode-list-file>") >+ end >+ end >+ >+ def self.parse(argv) >+ options = {} >+ parser = optparser(options) >+ >+ begin >+ parser.parse!(argv) >+ check(argv, options) >+ rescue OptionParser::MissingArgument, OptionParser::InvalidOption >+ puts $!.to_s >+ puts parser >+ exit 1 >+ end >+ >+ options[:bytecodeList] = argv[0] >+ options >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Section.rb b/Source/JavaScriptCore/generator/Section.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..c060da9d65342ab31636ae888c8a5dc4bf42280e >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Section.rb >@@ -0,0 +1,47 @@ >+require_relative 'Opcode' >+require_relative 'OpcodeGroup' >+ >+class Section >+ attr_reader :name >+ attr_reader :config >+ attr_reader :opcodes >+ >+ def initialize(name, config) >+ @name = name >+ @config = config >+ @opcodes = [] >+ @opcode_groups = [] >+ end >+ >+ def add_opcode(name, config) >+ @opcodes << create_opcode(name, config) >+ end >+ >+ def create_opcode(name, config) >+ Opcode.new(self, name, config[:args], config[:metadata], config[:metadata_initializers]) >+ end >+ >+ def add_opcode_group(name, opcodes, config) >+ opcodes = opcodes.map { |opcode| create_opcode(opcode, config) } >+ @opcode_groups << OpcodeGroup.new(self, name, opcodes, config) >+ @opcodes += opcodes >+ end >+ >+ def header_helpers >+ out = StringIO.new >+ if config[:emit_in_h_file] >+ out.write("#define FOR_EACH_#{config[:macro_name_component]}_ID(macro) \\\n") >+ opcodes.each { |opcode| out.write("macro(#{opcode.name}, #{opcode.length}) \\\n") } >+ out << "\n" >+ out.write("#define NUMBER_OF_#{config[:macro_name_component]}_IDS #{opcodes.length}\n") >+ end >+ >+ if config[:emit_opcode_id_string_values_in_h_file] >+ out << "\n" >+ opcodes.each { |opcode| >+ out.write("#define #{opcode.name}_value_string \"#{opcode.id}\"\n") >+ } >+ end >+ out.string >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Template.rb b/Source/JavaScriptCore/generator/Template.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..a4e429ecbc1fc2956df4142c70fadf8c21fb89a7 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Template.rb >@@ -0,0 +1,7 @@ >+require_relative 'Type' >+ >+class Template < Type >+ def [](*types) >+ Type.new "#{@name}<#{types.map(&:to_s).join ","}>" >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Type.rb b/Source/JavaScriptCore/generator/Type.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..3b148bdcbd8ee70859f05f29bf41edbf1fbec41c >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Type.rb >@@ -0,0 +1,13 @@ >+class Type >+ def initialize(name) >+ @name = name >+ end >+ >+ def * >+ Type.new "#{@name}*" >+ end >+ >+ def to_s >+ @name.to_s >+ end >+end >diff --git a/Source/JavaScriptCore/generator/main.rb b/Source/JavaScriptCore/generator/main.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..adfc3cf57fe62a03433c0cb9665e3cd50e8cb48e >--- /dev/null >+++ b/Source/JavaScriptCore/generator/main.rb >@@ -0,0 +1,15 @@ >+require_relative 'DSL' >+require_relative 'Options' >+ >+# for some reason, lower case variables are not accessible until the next invocation of eval >+# so we bind them here, before eval'ing the file >+DSL::types [ >+ :bool, >+ :int, >+ :unsigned, >+] >+ >+ >+ >+options = Options::parse(ARGV) >+DSL::run(options) >diff --git a/Source/JavaScriptCore/generator/runtime/DumpValue.cpp b/Source/JavaScriptCore/generator/runtime/DumpValue.cpp >new file mode 100644 >index 0000000000000000000000000000000000000000..0f5aecb8fb6c2dd48370cfeb69de0a06d894a794 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/runtime/DumpValue.cpp >@@ -0,0 +1,36 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, >+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#include "BytecodeDumper.h" >+ >+namespace JSC { >+ >+template<typename Block> >+void BytecodeDumper<Block>::dumpValue(VirtualRegister value) >+{ >+ m_out.printf("%s", registerName(operand).data()); >+} >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/generator/runtime/DumpValue.h b/Source/JavaScriptCore/generator/runtime/DumpValue.h >new file mode 100644 >index 0000000000000000000000000000000000000000..323e101787da8482e9e0ea90dba5ef5106b041be >--- /dev/null >+++ b/Source/JavaScriptCore/generator/runtime/DumpValue.h >@@ -0,0 +1,33 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#pragma once >+ >+namespace JSC { >+ >+template<typename Block> >+void BytecodeDumper<Block>::dumpValue(VirtualRegister); >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/generator/runtime/Fits.h b/Source/JavaScriptCore/generator/runtime/Fits.h >new file mode 100644 >index 0000000000000000000000000000000000000000..77ef6ad43bca167135a31db6b3cd4cb66120cbad >--- /dev/null >+++ b/Source/JavaScriptCore/generator/runtime/Fits.h >@@ -0,0 +1,256 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#pragma once >+ >+#include "GetPutInfo.h" >+#include "Interpreter.h" >+#include "ProfileTypeBytecodeFlag.h" >+#include "ResultType.h" >+#include "ScopeOffset.h" >+#include "SpecialPointer.h" >+#include "VirtualRegister.h" >+#include <type_traits> >+ >+namespace JSC { >+ >+enum OpcodeSize { >+ Narrow = 1, >+ Wide = 4, >+}; >+ >+template<OpcodeSize> >+struct TypeBySize; >+ >+template<> >+struct TypeBySize<OpcodeSize::Narrow> { >+ using type = uint8_t; >+}; >+ >+template<> >+struct TypeBySize<OpcodeSize::Wide> { >+ using type = uint32_t; >+}; >+ >+template<OpcodeSize> >+struct PaddingBySize; >+ >+template<> >+struct PaddingBySize<OpcodeSize::Narrow> { >+ static constexpr uint8_t value = 0; >+}; >+ >+template<> >+struct PaddingBySize<OpcodeSize::Wide> { >+ static constexpr uint8_t value = 1; >+}; >+ >+// Fits template >+template<typename, OpcodeSize, typename = std::true_type> >+struct Fits; >+ >+// Implicit conversion for types of the same size >+template<typename T, OpcodeSize size> >+struct Fits<T, size, std::enable_if_t<sizeof(T) == size, std::true_type>> { >+ static bool check(T) { return true; } >+ >+ static typename TypeBySize<size>::type convert(T t) { return *reinterpret_cast<typename TypeBySize<size>::type*>(&t); } >+ >+ template<class T1 = T, OpcodeSize size1 = size, typename = std::enable_if_t<!std::is_same<T1, typename TypeBySize<size1>::type>::value, std::true_type>> >+ static T1 convert(typename TypeBySize<size1>::type t) { return *reinterpret_cast<T1*>(&t); } >+}; >+ >+template<typename T, OpcodeSize size> >+struct Fits<T, size, std::enable_if_t<sizeof(T) < size, std::true_type>> { >+ static bool check(T) { return true; } >+ >+ static typename TypeBySize<size>::type convert(T t) { return static_cast<typename TypeBySize<size>::type>(t); } >+ >+ template<class T1 = T, OpcodeSize size1 = size, typename = std::enable_if_t<!std::is_same<T1, typename TypeBySize<size1>::type>::value, std::true_type>> >+ static T1 convert(typename TypeBySize<size1>::type t) { return static_cast<T1>(t); } >+}; >+ >+template<> >+struct Fits<uint32_t, OpcodeSize::Narrow> { >+ static bool check(unsigned u) { return u <= UINT8_MAX; } >+ >+ static uint8_t convert(unsigned u) >+ { >+ assert(check(u)); >+ return static_cast<uint8_t>(u); >+ } >+ static unsigned convert(uint8_t u) >+ { >+ return u; >+ } >+}; >+ >+template<> >+struct Fits<int, OpcodeSize::Narrow> { >+ static bool check(int i) >+ { >+ return i >= INT8_MIN && i <= INT8_MAX; >+ } >+ >+ static uint8_t convert(int i) >+ { >+ return static_cast<uint8_t>(i); >+ } >+ >+ static int convert(uint8_t i) >+ { >+ return static_cast<int8_t>(i); >+ } >+}; >+ >+template<> >+struct Fits<VirtualRegister, OpcodeSize::Narrow> : public Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(const VirtualRegister& r) { return Base::check(r.offset()); } >+ static uint8_t convert(const VirtualRegister& r) >+ { >+ return Base::convert(r.offset()); >+ } >+ static VirtualRegister convert(uint8_t i) >+ { >+ return VirtualRegister { Base::convert(i) }; >+ } >+}; >+ >+template<> >+struct Fits<Special::Pointer, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(Special::Pointer sp) { return Base::check(static_cast<int>(sp)); } >+ static uint8_t convert(Special::Pointer sp) >+ { >+ return Base::convert(static_cast<int>(sp)); >+ } >+ static Special::Pointer convert(uint8_t sp) >+ { >+ return static_cast<Special::Pointer>(Base::convert(sp)); >+ } >+}; >+ >+template<> >+struct Fits<ScopeOffset, OpcodeSize::Narrow> : Fits<unsigned, OpcodeSize::Narrow> { >+ using Base = Fits<unsigned, OpcodeSize::Narrow>; >+ static bool check(ScopeOffset so) { return Base::check(so.offsetUnchecked()); } >+ static uint8_t convert(ScopeOffset so) >+ { >+ return Base::convert(so.offsetUnchecked()); >+ } >+ static ScopeOffset convert(uint8_t so) >+ { >+ return ScopeOffset { Base::convert(so) }; >+ } >+}; >+ >+template<> >+struct Fits<GetPutInfo, OpcodeSize::Narrow> : Fits<unsigned, OpcodeSize::Narrow> { >+ using Base = Fits<unsigned, OpcodeSize::Narrow>; >+ static bool check(GetPutInfo gpi) { return Base::check(gpi.operand()); } >+ static uint8_t convert(GetPutInfo gpi) >+ { >+ return Base::convert(gpi.operand()); >+ } >+ static GetPutInfo convert(uint8_t gpi) >+ { >+ return GetPutInfo { Base::convert(gpi) }; >+ } >+}; >+ >+template<> >+struct Fits<DebugHookType, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(DebugHookType dht) { return Base::check(static_cast<int>(dht)); } >+ static uint8_t convert(DebugHookType dht) >+ { >+ return Base::convert(static_cast<int>(dht)); >+ } >+ static DebugHookType convert(uint8_t dht) >+ { >+ return static_cast<DebugHookType>(Base::convert(dht)); >+ } >+}; >+ >+template<> >+struct Fits<ProfileTypeBytecodeFlag, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(ProfileTypeBytecodeFlag ptbf) { return Base::check(static_cast<int>(ptbf)); } >+ static uint8_t convert(ProfileTypeBytecodeFlag ptbf) >+ { >+ return Base::convert(static_cast<int>(ptbf)); >+ } >+ static ProfileTypeBytecodeFlag convert(uint8_t ptbf) >+ { >+ return static_cast<ProfileTypeBytecodeFlag>(Base::convert(ptbf)); >+ } >+}; >+ >+template<> >+struct Fits<ResolveType, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(ResolveType rt) { return Base::check(static_cast<int>(rt)); } >+ static uint8_t convert(ResolveType rt) >+ { >+ return Base::convert(static_cast<int>(rt)); >+ } >+ >+ static ResolveType convert(uint8_t rt) >+ { >+ return static_cast<ResolveType>(Base::convert(rt)); >+ } >+}; >+ >+template<> >+struct Fits<OperandTypes, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(OperandTypes types) { return Base::check(types.toInt()); } >+ static uint8_t convert(OperandTypes types) >+ { >+ return Base::convert(types.toInt()); >+ } >+ static OperandTypes convert(uint8_t types) >+ { >+ return OperandTypes::fromInt(Base::convert(types)); >+ } >+}; >+ >+template<> >+struct Fits<PutByIdFlags, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(PutByIdFlags flags) { return Base::check(static_cast<int>(flags)); } >+ static uint8_t convert(PutByIdFlags flags) >+ { >+ return Base::convert(static_cast<int>(flags)); >+ } >+ >+ static PutByIdFlags convert(uint8_t flags) >+ { >+ return static_cast<PutByIdFlags>(Base::convert(flags)); >+ } >+}; >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/generator/runtime/Instruction.h b/Source/JavaScriptCore/generator/runtime/Instruction.h >new file mode 100644 >index 0000000000000000000000000000000000000000..0308305ca3e8f629fbf11dfb39d76115bff2b593 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/runtime/Instruction.h >@@ -0,0 +1,114 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#pragma once >+ >+#include "Fits.h" >+#include "Opcode.h" >+ >+namespace JSC { >+ >+struct Instruction { >+protected: >+ Instruction() >+ { } >+ >+private: >+ template<OpcodeSize Width> >+ class Impl { >+ public: >+ OpcodeID opcodeID() const { return static_cast<OpcodeID>(m_opcode); } >+ >+ private: >+ typename TypeBySize<Width>::type m_opcode; >+ }; >+ >+public: >+ OpcodeID opcodeID() const >+ { >+ if (isWide()) >+ return wide()->opcodeID(); >+ return narrow()->opcodeID(); >+ } >+ >+ const char* name() const >+ { >+ return opcodeNames[opcodeID()]; >+ } >+ >+ bool isWide() const >+ { >+ return narrow()->opcodeID() == op_wide; >+ } >+ >+ size_t size() const >+ { >+ auto wide = isWide(); >+ auto padding = wide ? 1 : 0; >+ auto size = wide ? 4 : 1; >+ return opcodeLengths[opcodeID()] * size + padding; >+ } >+ >+ template<class T> >+ bool is() const >+ { >+ return opcodeID() == T::opcodeID(); >+ } >+ >+ template<class T> >+ T as() const >+ { >+ assert(is<T>()); >+ return T(reinterpret_cast<const uint8_t*>(this)); >+ } >+ >+ template<class T> >+ T* cast() >+ { >+ assert(is<T>()); >+ return reinterpret_cast<T*>(this); >+ } >+ >+ template<class T> >+ const T* cast() const >+ { >+ assert(is<T>()); >+ return reinterpret_cast<const T*>(this); >+ } >+ >+ const Impl<OpcodeSize::Narrow>* narrow() const >+ { >+ return reinterpret_cast<const Impl<OpcodeSize::Narrow>*>(this); >+ } >+ >+ const Impl<OpcodeSize::Wide>* wide() const >+ { >+ >+ ASSERT(isWide()); >+ return reinterpret_cast<const Impl<OpcodeSize::Wide>*>((uintptr_t)this + 1); >+ } >+}; >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/interpreter/AbstractPC.h b/Source/JavaScriptCore/interpreter/AbstractPC.h >index 877d3d04ac5e4fc76c6343f34c9136937f4f6492..4832d6de8253856cdb0e5125439a9f97946d8f68 100644 >--- a/Source/JavaScriptCore/interpreter/AbstractPC.h >+++ b/Source/JavaScriptCore/interpreter/AbstractPC.h >@@ -62,7 +62,7 @@ public: > > private: > #if ENABLE(JIT) >- void* m_pointer { nullptr }; >+ const void* m_pointer { nullptr }; > #endif > > enum Mode { None, JIT, Interpreter }; >diff --git a/Source/JavaScriptCore/interpreter/CallFrame.cpp b/Source/JavaScriptCore/interpreter/CallFrame.cpp >index 6325dc27c51f62e01420e54bd8774f7b56386dbd..b9f6b49d197b3582e3e646e1ce759ee001249b2f 100644 >--- a/Source/JavaScriptCore/interpreter/CallFrame.cpp >+++ b/Source/JavaScriptCore/interpreter/CallFrame.cpp >@@ -106,12 +106,12 @@ SUPPRESS_ASAN CallSiteIndex CallFrame::unsafeCallSiteIndex() const > } > > #if USE(JSVALUE32_64) >-Instruction* CallFrame::currentVPC() const >+const Instruction* CallFrame::currentVPC() const > { > return bitwise_cast<Instruction*>(callSiteIndex().bits()); > } > >-void CallFrame::setCurrentVPC(Instruction* vpc) >+void CallFrame::setCurrentVPC(const Instruction* vpc) > { > CallSiteIndex callSite(vpc); > this[CallFrameSlot::argumentCount].tag() = callSite.bits(); >@@ -125,13 +125,13 @@ unsigned CallFrame::callSiteBitsAsBytecodeOffset() const > } > > #else // USE(JSVALUE32_64) >-Instruction* CallFrame::currentVPC() const >+const Instruction* CallFrame::currentVPC() const > { > ASSERT(callSiteBitsAreBytecodeOffset()); >- return &codeBlock()->instructions()[callSiteBitsAsBytecodeOffset()]; >+ return codeBlock()->instructions().at(callSiteBitsAsBytecodeOffset()).ptr(); > } > >-void CallFrame::setCurrentVPC(Instruction* vpc) >+void CallFrame::setCurrentVPC(const Instruction* vpc) > { > CallSiteIndex callSite(codeBlock()->bytecodeOffset(vpc)); > this[CallFrameSlot::argumentCount].tag() = static_cast<int32_t>(callSite.bits()); >diff --git a/Source/JavaScriptCore/interpreter/CallFrame.h b/Source/JavaScriptCore/interpreter/CallFrame.h >index 6d5d72379b7954b582bcf87edf23616f5c337d0f..7e3c1b89ef4490aa5b1fb406dd1a212debe9fa6e 100644 >--- a/Source/JavaScriptCore/interpreter/CallFrame.h >+++ b/Source/JavaScriptCore/interpreter/CallFrame.h >@@ -53,7 +53,7 @@ namespace JSC { > : m_bits(bits) > { } > #if USE(JSVALUE32_64) >- explicit CallSiteIndex(Instruction* instruction) >+ explicit CallSiteIndex(const Instruction* instruction) > : m_bits(bitwise_cast<uint32_t>(instruction)) > { } > #endif >@@ -69,7 +69,7 @@ namespace JSC { > > struct CallerFrameAndPC { > CallFrame* callerFrame; >- Instruction* pc; >+ const Instruction* pc; > static const int sizeInRegisters = 2 * sizeof(void*) / sizeof(Register); > }; > static_assert(CallerFrameAndPC::sizeInRegisters == sizeof(CallerFrameAndPC) / sizeof(Register), "CallerFrameAndPC::sizeInRegisters is incorrect."); >@@ -187,8 +187,8 @@ namespace JSC { > return topOfFrameInternal(); > } > >- Instruction* currentVPC() const; // This only makes sense in the LLInt and baseline. >- void setCurrentVPC(Instruction* vpc); >+ const Instruction* currentVPC() const; // This only makes sense in the LLInt and baseline. >+ void setCurrentVPC(const Instruction* vpc); > > void setCallerFrame(CallFrame* frame) { callerFrameAndPC().callerFrame = frame; } > void setScope(int scopeRegisterOffset, JSScope* scope) { static_cast<Register*>(this)[scopeRegisterOffset] = scope; } >@@ -260,7 +260,7 @@ namespace JSC { > void setArgumentCountIncludingThis(int count) { static_cast<Register*>(this)[CallFrameSlot::argumentCount].payload() = count; } > void setCallee(JSObject* callee) { static_cast<Register*>(this)[CallFrameSlot::callee] = callee; } > void setCodeBlock(CodeBlock* codeBlock) { static_cast<Register*>(this)[CallFrameSlot::codeBlock] = codeBlock; } >- void setReturnPC(void* value) { callerFrameAndPC().pc = reinterpret_cast<Instruction*>(value); } >+ void setReturnPC(void* value) { callerFrameAndPC().pc = reinterpret_cast<const Instruction*>(value); } > > String friendlyFunctionName(); > >diff --git a/Source/JavaScriptCore/interpreter/Interpreter.h b/Source/JavaScriptCore/interpreter/Interpreter.h >index 49227ebe515663ffde03c9e0a3fcb64967a0f568..c1aca1d4ccabf717b2634c637d002bbe5ce339ae 100644 >--- a/Source/JavaScriptCore/interpreter/Interpreter.h >+++ b/Source/JavaScriptCore/interpreter/Interpreter.h >@@ -62,7 +62,6 @@ namespace JSC { > struct HandlerInfo; > struct Instruction; > struct ProtoCallFrame; >- struct UnlinkedInstruction; > > enum UnwindStart : uint8_t { UnwindFromCurrentFrame, UnwindFromCallerFrame }; > >@@ -102,8 +101,7 @@ namespace JSC { > static inline Opcode getOpcode(OpcodeID); > > static inline OpcodeID getOpcodeID(Opcode); >- static inline OpcodeID getOpcodeID(const Instruction&); >- static inline OpcodeID getOpcodeID(const UnlinkedInstruction&); >+ static inline OpcodeID getOpcodeID(OpcodeID); > > #if !ASSERT_DISABLED > static bool isOpcode(Opcode); >diff --git a/Source/JavaScriptCore/interpreter/InterpreterInlines.h b/Source/JavaScriptCore/interpreter/InterpreterInlines.h >index fc89a189d6057d8e4e0ab10a8791f856b49f9071..b9f82c92c8230887a84be9042626982ff7279d42 100644 >--- a/Source/JavaScriptCore/interpreter/InterpreterInlines.h >+++ b/Source/JavaScriptCore/interpreter/InterpreterInlines.h >@@ -63,14 +63,9 @@ inline OpcodeID Interpreter::getOpcodeID(Opcode opcode) > #endif > } > >-inline OpcodeID Interpreter::getOpcodeID(const Instruction& instruction) >+inline OpcodeID Interpreter::getOpcodeID(OpcodeID opcode) > { >- return getOpcodeID(instruction.u.opcode); >-} >- >-inline OpcodeID Interpreter::getOpcodeID(const UnlinkedInstruction& instruction) >-{ >- return instruction.u.opcode; >+ return opcode; > } > > ALWAYS_INLINE JSValue Interpreter::execute(CallFrameClosure& closure) >diff --git a/Source/JavaScriptCore/interpreter/StackVisitor.cpp b/Source/JavaScriptCore/interpreter/StackVisitor.cpp >index cae3d9a442ecfbed489626f125454eda8f9f092f..615ece7a59dbdca42a598185765bb690ede78280 100644 >--- a/Source/JavaScriptCore/interpreter/StackVisitor.cpp >+++ b/Source/JavaScriptCore/interpreter/StackVisitor.cpp >@@ -443,7 +443,7 @@ void StackVisitor::Frame::dump(PrintStream& out, Indenter indent, WTF::Function< > > CallFrame* callFrame = m_callFrame; > CallFrame* callerFrame = this->callerFrame(); >- void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : nullptr; >+ const void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : nullptr; > > out.print(indent, "name: ", functionName(), "\n"); > out.print(indent, "sourceURL: ", sourceURL(), "\n"); >diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h >index 593d5bf38be7f86a2ee275cac2a1846a974526c7..b913444b2e0cd1cc622ed3a8cae0127709f73c60 100644 >--- a/Source/JavaScriptCore/jit/JIT.h >+++ b/Source/JavaScriptCore/jit/JIT.h >@@ -335,8 +335,7 @@ namespace JSC { > // This assumes that the value to profile is in regT0 and that regT3 is available for > // scratch. > void emitValueProfilingSite(ValueProfile&); >- void emitValueProfilingSite(unsigned bytecodeOffset); >- void emitValueProfilingSite(); >+ template<typename Op> void emitValueProfilingSite(Op); > void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*); > void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex); > void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*); >diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h >index 2bb67f13bede04d754b92c576af8a9fa0397c384..a60a3c8ef90101637aafcffa0c9ee7ebaba9c248 100644 >--- a/Source/JavaScriptCore/jit/JITInlines.h >+++ b/Source/JavaScriptCore/jit/JITInlines.h >@@ -328,16 +328,10 @@ inline void JIT::emitValueProfilingSite(ValueProfile& valueProfile) > #endif > } > >-inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset) >+template<typename Op> >+inline void JIT::emitValueProfilingSite(Op op) > { >- if (!shouldEmitProfiling()) >- return; >- emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset)); >-} >- >-inline void JIT::emitValueProfilingSite() >-{ >- emitValueProfilingSite(m_bytecodeOffset); >+ emitValueProfilingSite(op.metadata(m_codeBlock).profile); > } > > inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile) >diff --git a/Source/JavaScriptCore/jit/JITMathIC.h b/Source/JavaScriptCore/jit/JITMathIC.h >index 4e00958d19f8f308b390f6ab7cbcde42d04c9baa..d82a2f3d12ceae6ba03fa787d5ada4dff513bba1 100644 >--- a/Source/JavaScriptCore/jit/JITMathIC.h >+++ b/Source/JavaScriptCore/jit/JITMathIC.h >@@ -56,7 +56,7 @@ template <typename GeneratorType, bool(*isProfileEmpty)(ArithProfile&)> > class JITMathIC { > WTF_MAKE_FAST_ALLOCATED; > public: >- JITMathIC(ArithProfile* arithProfile, Instruction* instruction) >+ JITMathIC(ArithProfile* arithProfile, const Instruction* instruction) > : m_arithProfile(arithProfile) > , m_instruction(instruction) > { >@@ -235,7 +235,7 @@ public: > } > > ArithProfile* arithProfile() const { return m_arithProfile; } >- Instruction* instruction() const { return m_instruction; } >+ const Instruction* instruction() const { return m_instruction; } > > #if ENABLE(MATH_IC_STATS) > size_t m_generatedCodeSize { 0 }; >@@ -249,7 +249,7 @@ public: > #endif > > ArithProfile* m_arithProfile; >- Instruction* m_instruction; >+ const Instruction* m_instruction; > MacroAssemblerCodeRef<JITStubRoutinePtrTag> m_code; > CodeLocationLabel<JSInternalPtrTag> m_inlineStart; > int32_t m_inlineSize; >@@ -266,7 +266,7 @@ inline bool isBinaryProfileEmpty(ArithProfile& arithProfile) > template <typename GeneratorType> > class JITBinaryMathIC : public JITMathIC<GeneratorType, isBinaryProfileEmpty> { > public: >- JITBinaryMathIC(ArithProfile* arithProfile, Instruction* instruction) >+ JITBinaryMathIC(ArithProfile* arithProfile, const Instruction* instruction) > : JITMathIC<GeneratorType, isBinaryProfileEmpty>(arithProfile, instruction) > { > } >@@ -284,7 +284,7 @@ inline bool isUnaryProfileEmpty(ArithProfile& arithProfile) > template <typename GeneratorType> > class JITUnaryMathIC : public JITMathIC<GeneratorType, isUnaryProfileEmpty> { > public: >- JITUnaryMathIC(ArithProfile* arithProfile, Instruction* instruction) >+ JITUnaryMathIC(ArithProfile* arithProfile, const Instruction* instruction) > : JITMathIC<GeneratorType, isUnaryProfileEmpty>(arithProfile, instruction) > { > } >diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp >index c93d15c74c58ff5d96aa0aad943413f085a3d92e..b92259220b637307bb11475932080abeb457e3b3 100644 >--- a/Source/JavaScriptCore/llint/LLIntData.cpp >+++ b/Source/JavaScriptCore/llint/LLIntData.cpp >@@ -45,9 +45,10 @@ namespace JSC { namespace LLInt { > > Instruction Data::s_exceptionInstructions[maxOpcodeLength + 1] = { }; > Opcode Data::s_opcodeMap[numOpcodeIDs] = { }; >+Opcode Data::s_opcodeMapWide[numOpcodeIDs] = { }; > > #if ENABLE(JIT) >-extern "C" void llint_entry(void*); >+extern "C" void llint_entry(void*, void*); > #endif > > void initialize() >@@ -56,7 +57,7 @@ void initialize() > CLoop::initialize(); > > #else // ENABLE(JIT) >- llint_entry(&Data::s_opcodeMap); >+ llint_entry(&Data::s_opcodeMap, &Data::s_opcodeMapWide); > > for (int i = 0; i < numOpcodeIDs; ++i) > Data::s_opcodeMap[i] = tagCodePtr(Data::s_opcodeMap[i], BytecodePtrTag); >diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h >index be58c00ae5c66ac30581ae3d4849428e5bb301d0..376776c4e99ae55e30c9541189a9d28aa19d882e 100644 >--- a/Source/JavaScriptCore/llint/LLIntData.h >+++ b/Source/JavaScriptCore/llint/LLIntData.h >@@ -43,12 +43,14 @@ typedef void (*LLIntCode)(); > namespace LLInt { > > class Data { >+ > public: > static void performAssertions(VM&); > > private: > static Instruction s_exceptionInstructions[maxOpcodeLength + 1]; > static Opcode s_opcodeMap[numOpcodeIDs]; >+ static Opcode s_opcodeMapWide[numOpcodeIDs]; > > friend void initialize(); > >@@ -83,9 +85,7 @@ inline Opcode getOpcode(OpcodeID id) > template<PtrTag tag> > ALWAYS_INLINE MacroAssemblerCodePtr<tag> getCodePtr(OpcodeID opcodeID) > { >- void* address = reinterpret_cast<void*>(getOpcode(opcodeID)); >- address = retagCodePtr<BytecodePtrTag, tag>(address); >- return MacroAssemblerCodePtr<tag>::createFromExecutableAddress(address); >+ return MacroAssemblerCodePtr<tag>::createFromExecutableAddress((void*)opcodeID); > } > > template<PtrTag tag> >@@ -109,7 +109,7 @@ ALWAYS_INLINE LLIntCode getCodeFunctionPtr(OpcodeID opcodeID) > #else > ALWAYS_INLINE void* getCodePtr(OpcodeID id) > { >- return reinterpret_cast<void*>(getOpcode(id)); >+ return reinterpret_cast<void*>(id); > } > #endif > >diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp >index 961b27c2f58981990612ded4d74eca9caab14709..d7019f7dd948aea1926fdebb1d15642f03e5d3c6 100644 >--- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp >+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp >@@ -20,7 +20,7 @@ > * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY > * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT > * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. > */ > > #include "config.h" >@@ -48,6 +48,7 @@ > #include "JSString.h" > #include "JSTypeInfo.h" > #include "JumpTable.h" >+#include "LLIntData.h" > #include "LLIntOfflineAsmConfig.h" > #include "MarkedSpace.h" > #include "NativeExecutable.h" >diff --git a/Source/JavaScriptCore/llint/LLIntSettingsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntSettingsExtractor.cpp >new file mode 100644 >index 0000000000000000000000000000000000000000..d5e53a24d1ed93c711648c61e1af7faf9019f303 >--- /dev/null >+++ b/Source/JavaScriptCore/llint/LLIntSettingsExtractor.cpp >@@ -0,0 +1,39 @@ >+/* >+ * Copyright (C) 2012-2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#include "config.h" >+ >+#include "LLIntOfflineAsmConfig.h" >+ >+int main(int, char**) >+{ >+ // Out of an abundance of caution, make sure that LLIntSettingsExtractor::dummy() is live, >+ // and the extractorTable is live, too. >+#include "LLIntDesiredSettings.h" >+ printf("%p\n", extractorTable); >+ return 0; >+} >+ >+ >diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >index f2e411f8da89cfa10a3832e41cc1c5a650f456d1..74776b38d01817c42c691114a29282c3150bd0b8 100644 >--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >@@ -237,7 +237,7 @@ extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction* > exec->codeBlock(), > exec, > static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)), >- Interpreter::getOpcodeID(pc[0].u.opcode), >+ pc[0].u.opcode, > fromWhere, > operand, > pc[operand].u.operand); >@@ -264,7 +264,7 @@ extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc > exec->codeBlock(), > exec, > static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)), >- Interpreter::getOpcodeID(pc[0].u.opcode), >+ pc[0].u.opcode, > fromWhere, > operand, > pc[operand].u.operand, >@@ -327,7 +327,7 @@ LLINT_SLOW_PATH_DECL(trace) > if (!Options::traceLLIntExecution()) > LLINT_END_IMPL(); > >- OpcodeID opcodeID = Interpreter::getOpcodeID(pc[0].u.opcode); >+ OpcodeID opcodeID = pc[0].u.opcode; > dataLogF("<%p> %p / %p: executing bc#%zu, %s, pc = %p\n", > &Thread::current(), > exec->codeBlock(), >@@ -726,13 +726,13 @@ static void setupGetByIdPrototypeCache(ExecState* exec, VM& vm, Instruction* pc, > ConcurrentJSLocker locker(codeBlock->m_lock); > > if (slot.isUnset()) { >- pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_unset); >+ pc[0].u.unsignedValue = op_get_by_id_unset; > pc[4].u.structureID = structure->id(); > return; > } > ASSERT(slot.isValue()); > >- pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_proto_load); >+ pc[0].u.unsignedValue = op_get_by_id_proto_load; > pc[4].u.structureID = structure->id(); > pc[5].u.operand = offset; > // We know that this pointer will remain valid because it will be cleared by either a watchpoint fire or >@@ -760,7 +760,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id) > { > StructureID oldStructureID = pc[4].u.structureID; > if (oldStructureID) { >- auto opcode = Interpreter::getOpcodeID(pc[0]); >+ auto opcode = pc[0].u.opcode; > if (opcode == op_get_by_id > || opcode == op_get_by_id_unset > || opcode == op_get_by_id_proto_load) { >@@ -779,7 +779,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id) > Structure* structure = baseCell->structure(vm); > if (slot.isValue() && slot.slotBase() == baseValue) { > // Start out by clearing out the old cache. >- pc[0].u.opcode = LLInt::getOpcode(op_get_by_id); >+ pc[0].u.unsignedValue = op_get_by_id; > pc[4].u.pointer = nullptr; // old structure > pc[5].u.pointer = nullptr; // offset > >@@ -804,7 +804,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id) > } else if (!LLINT_ALWAYS_ACCESS_SLOW > && isJSArray(baseValue) > && ident == vm.propertyNames->length) { >- pc[0].u.opcode = LLInt::getOpcode(op_get_array_length); >+ pc[0].u.unsignedValue = op_get_array_length; > ArrayProfile* arrayProfile = codeBlock->getOrAddArrayProfile(codeBlock->bytecodeOffset(pc)); > arrayProfile->observeStructure(baseValue.asCell()->structure(vm)); > pc[4].u.arrayProfile = arrayProfile; >@@ -1712,16 +1712,17 @@ LLINT_SLOW_PATH_DECL(slow_path_handle_exception) > LLINT_SLOW_PATH_DECL(slow_path_get_from_scope) > { > LLINT_BEGIN(); >- const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); >- JSObject* scope = jsCast<JSObject*>(LLINT_OP(2).jsValue()); >- GetPutInfo getPutInfo(pc[4].u.operand); >+ auto& op = pc->as<OpGetFromScope>(); >+ auto& metadata = op.metadata(); >+ const Identifier& ident = exec->codeBlock()->identifier(op.var); >+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(op.scope).jsValue()); > > // ModuleVar is always converted to ClosureVar for get_from_scope. >- ASSERT(getPutInfo.resolveType() != ModuleVar); >+ ASSERT(metadata.getPutInfo.resolveType() != ModuleVar); > > LLINT_RETURN(scope->getPropertySlot(exec, ident, [&] (bool found, PropertySlot& slot) -> JSValue { > if (!found) { >- if (getPutInfo.resolveMode() == ThrowIfNotFound) >+ if (metadata.getPutInfo.resolveMode() == ThrowIfNotFound) > return throwException(exec, throwScope, createUndefinedVariableError(exec, ident)); > return jsUndefined(); > } >@@ -1734,7 +1735,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_from_scope) > return throwException(exec, throwScope, createTDZError(exec)); > } > >- CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident); >+ CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, op, scope, slot, ident); > > if (!result) > return slot.getValue(exec, ident); >@@ -1746,19 +1747,20 @@ LLINT_SLOW_PATH_DECL(slow_path_put_to_scope) > { > LLINT_BEGIN(); > >+ auto op = pc->as<OpPutToScope>(); >+ auto& metadata = op.metadata(); > CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[2].u.operand); >- JSObject* scope = jsCast<JSObject*>(LLINT_OP(1).jsValue()); >- JSValue value = LLINT_OP_C(3).jsValue(); >- GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >- if (getPutInfo.resolveType() == LocalClosureVar) { >+ const Identifier& ident = codeBlock->identifier(op->var); >+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(op->scope).jsValue()); >+ JSValue value = LLINT_OP_C(op->value).jsValue(); >+ if (metadata.getPutInfo.resolveType() == LocalClosureVar) { > JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope); >- environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value); >+ environment->variableAt(metadata.scopeOffset).set(vm, environment, value); > > // Have to do this *after* the write, because if this puts the set into IsWatched, then we need > // to have already changed the value of the variable. Otherwise we might watch and constant-fold > // to the Undefined value from before the assignment. >- if (WatchpointSet* set = pc[5].u.watchpointSet) >+ if (metadata.watchpointSet) > set->touch(vm, "Executed op_put_scope<LocalClosureVar>"); > LLINT_END(); > } >@@ -1767,7 +1769,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_to_scope) > LLINT_CHECK_EXCEPTION(); > if (hasProperty > && scope->isGlobalLexicalEnvironment() >- && !isInitialization(getPutInfo.initializationMode())) { >+ && !isInitialization(metadata.getPutInfo.initializationMode())) { > // When we can't statically prove we need a TDZ check, we must perform the check on the slow path. > PropertySlot slot(scope, PropertySlot::InternalMethodType::Get); > JSGlobalLexicalEnvironment::getOwnPropertySlot(scope, exec, ident, slot); >@@ -1775,13 +1777,13 @@ LLINT_SLOW_PATH_DECL(slow_path_put_to_scope) > LLINT_THROW(createTDZError(exec)); > } > >- if (getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) >+ if (metadata.getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) > LLINT_THROW(createUndefinedVariableError(exec, ident)); > > PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, isInitialization(getPutInfo.initializationMode())); > scope->methodTable(vm)->put(scope, exec, ident, value, slot); > >- CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident); >+ CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, op, scope, slot, ident); > > LLINT_END(); > } >diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h >index 7cfeca7a816d1dc530a347e86fb9d5fd360bcb80..320c8de3637a1347c65741dcba1df010f0c0a509 100644 >--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h >+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h >@@ -36,12 +36,12 @@ struct ProtoCallFrame; > > namespace LLInt { > >-extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand); >-extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand); >+extern "C" SlowPathReturnType llint_trace_operand(ExecState*, const Instruction*, int fromWhere, int operand); >+extern "C" SlowPathReturnType llint_trace_value(ExecState*, const Instruction*, int fromWhere, int operand); > extern "C" void llint_write_barrier_slow(ExecState*, JSCell*) WTF_INTERNAL; > > #define LLINT_SLOW_PATH_DECL(name) \ >- extern "C" SlowPathReturnType llint_##name(ExecState* exec, Instruction* pc) >+ extern "C" SlowPathReturnType llint_##name(ExecState* exec, const Instruction* pc) > > #define LLINT_SLOW_PATH_HIDDEN_DECL(name) \ > LLINT_SLOW_PATH_DECL(name) WTF_INTERNAL >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >index 88b80d37720c4251f8235d79cd15026dd5b9f1d5..1d0229a5a77b67898c41b32788e22bc76c3bc848 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >@@ -352,6 +352,50 @@ else > end > end > >+macro dispatchWide(advance) >+ leap 1[advance, PC, 4], PC >+ loadb [PB, PC, 1], t0 >+ loadp [t0, OpcodeMapWide, 1], t0 >+ jmp t0, BytecodePtrTag >+end >+ >+macro dispatch(advance) >+ addi advance, PC >+ loadb [PB, PC, 1], t0 >+ loadp [t0, OpcodeMap, 1], t0 >+ jmp t0, BytecodePtrTag >+end >+ >+macro dispatchIndirect(offset) >+ dispatch(offset[PB, PC, 1]) >+end >+ >+macro getOperandNarrow(offset, dst) >+ loadb [offset, PC, 1], dst >+end >+ >+macro getOperandWide(offset, dst) >+ loadis [offset, PC, 4], dst >+end >+ >+macro commonOp(label, op, fn) >+_%label%: >+ traceExecution() >+ fn(getOperandNarrow, macro () dispatch(constexpr %op%_length) end) >+ >+_%label%_wide: >+ traceExecution() >+ fn(getOperandWide, macro () dispatch(constexpr %op%_wide_length) end) >+end >+ >+macro op(l, fn) >+ commonOp(l, l, fn) >+end >+ >+macro llintOp(l, fn) >+ commonOp(llint_%l%, l, fn) >+end >+ > if X86_64_WIN > const extraTempReg = t0 > else >@@ -1239,30 +1283,38 @@ end > # The PC base is in t1, as this is what _llint_entry leaves behind through > # initPCRelative(t1) > macro setEntryAddress(index, label) >+ setEntryAddressCommon(index, label, a0) >+end >+ >+macro setEntryAddressWide(index, label) >+ setEntryAddressCommon(index, label, a1) >+end >+ >+macro setEntryAddressCommon(index, label, map) > if X86_64 or X86_64_WIN > leap (label - _relativePCBase)[t1], t3 > move index, t4 >- storep t3, [a0, t4, 8] >+ storep t3, [map, t4, 8] > elsif X86 or X86_WIN > leap (label - _relativePCBase)[t1], t3 > move index, t4 >- storep t3, [a0, t4, 4] >+ storep t3, [map, t4, 4] > elsif ARM64 or ARM64E > pcrtoaddr label, t1 > move index, t4 >- storep t1, [a0, t4, 8] >+ storep t1, [map, t4, 8] > elsif ARM or ARMv7 or ARMv7_TRADITIONAL > mvlbl (label - _relativePCBase), t4 > addp t4, t1, t4 > move index, t3 >- storep t4, [a0, t3, 4] >+ storep t4, [map, t3, 4] > elsif MIPS > la label, t4 > la _relativePCBase, t3 > subp t3, t4 > addp t4, t1, t4 > move index, t3 >- storep t4, [a0, t3, 4] >+ storep t4, [map, t3, 4] > end > end > >@@ -1273,7 +1325,12 @@ _llint_entry: > pushCalleeSaves() > if X86 or X86_WIN > loadp 20[sp], a0 >+ loadp 24[sp], a1 > end >+ >+ const OpcodeMap = a0 >+ const OpcodeMapWide = a1 >+ > initPCRelative(t1) > > # Include generated bytecode initialization file. >@@ -1284,47 +1341,54 @@ _llint_entry: > ret > end > >-_llint_program_prologue: >+op(llint_program_prologue, macro (getOperand, disp__) > prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) >- dispatch(0) >+ disp__() >+end) > > >-_llint_module_program_prologue: >+op(llint_module_program_prologue, macro (getOperand, disp__) > prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) >- dispatch(0) >+ disp__() >+end) > > >-_llint_eval_prologue: >+op(llint_eval_prologue, macro (getOperand, disp__) > prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) >- dispatch(0) >+ disp__() >+end) > > >-_llint_function_for_call_prologue: >+op(llint_function_for_call_prologue, macro (getOperand, disp__) > prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call) > functionInitialization(0) >- dispatch(0) >+ disp__() >+end) > > >-_llint_function_for_construct_prologue: >+op(llint_function_for_construct_prologue, macro (getOperand, disp__) > prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct) > functionInitialization(1) >- dispatch(0) >+ disp__() >+end) > > >-_llint_function_for_call_arity_check: >+op(llint_function_for_call_arity_check, macro (getOperand, disp__) > prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call) > functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck) > .functionForCallBegin: > functionInitialization(0) >- dispatch(0) >+ disp__() >+end) > > >-_llint_function_for_construct_arity_check: >+op(llint_function_for_construct_arity_check, macro (getOperand, disp__) > prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct) > functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck) > .functionForConstructBegin: > functionInitialization(1) >- dispatch(0) >+ disp__() >+end) > > > # Value-representation-specific code. >@@ -1336,374 +1400,378 @@ end > > > # Value-representation-agnostic code. >-_llint_op_create_direct_arguments: >- traceExecution() >+llintOp(op_create_direct_arguments, macro (getOperand, disp__) > callSlowPath(_slow_path_create_direct_arguments) >- dispatch(constexpr op_create_direct_arguments_length) >+ disp__() >+end) > > >-_llint_op_create_scoped_arguments: >- traceExecution() >+llintOp(op_create_scoped_arguments, macro (getOperand, disp__) > callSlowPath(_slow_path_create_scoped_arguments) >- dispatch(constexpr op_create_scoped_arguments_length) >+ disp__() >+end) > > >-_llint_op_create_cloned_arguments: >- traceExecution() >+llintOp(op_create_cloned_arguments, macro (getOperand, disp__) > callSlowPath(_slow_path_create_cloned_arguments) >- dispatch(constexpr op_create_cloned_arguments_length) >+ disp__() >+end) > > >-_llint_op_create_this: >- traceExecution() >+llintOp(op_create_this, macro (getOperand, disp__) > callSlowPath(_slow_path_create_this) >- dispatch(constexpr op_create_this_length) >+ disp__() >+end) > > >-_llint_op_new_object: >- traceExecution() >+llintOp(op_new_object, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_object) >- dispatch(constexpr op_new_object_length) >+ disp__() >+end) > > >-_llint_op_new_func: >- traceExecution() >+llintOp(op_new_func, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_func) >- dispatch(constexpr op_new_func_length) >+ disp__() >+end) > > >-_llint_op_new_generator_func: >- traceExecution() >+llintOp(op_new_generator_func, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_generator_func) >- dispatch(constexpr op_new_generator_func_length) >+ disp__() >+end) > >-_llint_op_new_async_generator_func: >- traceExecution() >+ >+llintOp(op_new_async_generator_func, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_async_generator_func) >- dispatch(constexpr op_new_async_generator_func_length) >+ disp__() >+end) > >-_llint_op_new_async_generator_func_exp: >- traceExecution() >+ >+llintOp(op_new_async_generator_func_exp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_async_generator_func_exp) >- dispatch(constexpr op_new_async_generator_func_exp_length) >+ disp__() >+end) > >-_llint_op_new_async_func: >- traceExecution() >+ >+llintOp(op_new_async_func, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_async_func) >- dispatch(constexpr op_new_async_func_length) >+ disp__() >+end) > > >-_llint_op_new_array: >- traceExecution() >+llintOp(op_new_array, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_array) >- dispatch(constexpr op_new_array_length) >+ disp__() >+end) > > >-_llint_op_new_array_with_spread: >- traceExecution() >+llintOp(op_new_array_with_spread, macro (getOperand, disp__) > callSlowPath(_slow_path_new_array_with_spread) >- dispatch(constexpr op_new_array_with_spread_length) >+ disp__() >+end) > > >-_llint_op_spread: >- traceExecution() >+llintOp(op_spread, macro (getOperand, disp__) > callSlowPath(_slow_path_spread) >- dispatch(constexpr op_spread_length) >+ disp__() >+end) > > >-_llint_op_new_array_with_size: >- traceExecution() >+llintOp(op_new_array_with_size, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_array_with_size) >- dispatch(constexpr op_new_array_with_size_length) >+ disp__() >+end) > > >-_llint_op_new_array_buffer: >- traceExecution() >+llintOp(op_new_array_buffer, macro (getOperand, disp__) > callSlowPath(_slow_path_new_array_buffer) >- dispatch(constexpr op_new_array_buffer_length) >+ disp__() >+end) > > >-_llint_op_new_regexp: >- traceExecution() >+llintOp(op_new_regexp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_regexp) >- dispatch(constexpr op_new_regexp_length) >+ disp__() >+end) > > >-_llint_op_less: >- traceExecution() >+llintOp(op_less, macro (getOperand, disp__) > callSlowPath(_slow_path_less) >- dispatch(constexpr op_less_length) >+ disp__() >+end) > > >-_llint_op_lesseq: >- traceExecution() >+llintOp(op_lesseq, macro (getOperand, disp__) > callSlowPath(_slow_path_lesseq) >- dispatch(constexpr op_lesseq_length) >+ disp__() >+end) > > >-_llint_op_greater: >- traceExecution() >+llintOp(op_greater, macro (getOperand, disp__) > callSlowPath(_slow_path_greater) >- dispatch(constexpr op_greater_length) >+ disp__() >+end) > > >-_llint_op_greatereq: >- traceExecution() >+llintOp(op_greatereq, macro (getOperand, disp__) > callSlowPath(_slow_path_greatereq) >- dispatch(constexpr op_greatereq_length) >+ disp__() >+end) > > >-_llint_op_eq: >- traceExecution() >+llintOp(op_eq, macro (getOperand, disp__) > equalityComparison( > macro (left, right, result) cieq left, right, result end, > _slow_path_eq) >+end) > > >-_llint_op_neq: >- traceExecution() >+llintOp(op_neq, macro (getOperand, disp__) > equalityComparison( > macro (left, right, result) cineq left, right, result end, > _slow_path_neq) >+end) > > >-_llint_op_below: >- traceExecution() >+llintOp(op_below, macro (getOperand, disp__) > compareUnsigned( > macro (left, right, result) cib left, right, result end) >+end) > > >-_llint_op_beloweq: >- traceExecution() >+llintOp(op_beloweq, macro (getOperand, disp__) > compareUnsigned( > macro (left, right, result) cibeq left, right, result end) >+end) > > >-_llint_op_mod: >- traceExecution() >+llintOp(op_mod, macro (getOperand, disp__) > callSlowPath(_slow_path_mod) >- dispatch(constexpr op_mod_length) >+ disp__() >+end) > > >-_llint_op_pow: >- traceExecution() >+llintOp(op_pow, macro (getOperand, disp__) > callSlowPath(_slow_path_pow) >- dispatch(constexpr op_pow_length) >+ disp__() >+end) > > >-_llint_op_typeof: >- traceExecution() >+llintOp(op_typeof, macro (getOperand, disp__) > callSlowPath(_slow_path_typeof) >- dispatch(constexpr op_typeof_length) >+ disp__() >+end) > > >-_llint_op_is_object_or_null: >- traceExecution() >+llintOp(op_is_object_or_null, macro (getOperand, disp__) > callSlowPath(_slow_path_is_object_or_null) >- dispatch(constexpr op_is_object_or_null_length) >+ disp__() >+end) > >-_llint_op_is_function: >- traceExecution() >+ >+llintOp(op_is_function, macro (getOperand, disp__) > callSlowPath(_slow_path_is_function) >- dispatch(constexpr op_is_function_length) >+ disp__() >+end) > > >-_llint_op_in_by_id: >- traceExecution() >+llintOp(op_in_by_id, macro (getOperand, disp__) > callSlowPath(_slow_path_in_by_id) >- dispatch(constexpr op_in_by_id_length) >+ disp__() >+end) > > >-_llint_op_in_by_val: >- traceExecution() >+llintOp(op_in_by_val, macro (getOperand, disp__) > callSlowPath(_slow_path_in_by_val) >- dispatch(constexpr op_in_by_val_length) >+ disp__() >+end) > > >-_llint_op_try_get_by_id: >- traceExecution() >+llintOp(op_try_get_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_try_get_by_id) >- dispatch(constexpr op_try_get_by_id_length) >+ disp__() >+end) > > >-_llint_op_del_by_id: >- traceExecution() >+llintOp(op_del_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_del_by_id) >- dispatch(constexpr op_del_by_id_length) >+ disp__() >+end) > > >-_llint_op_del_by_val: >- traceExecution() >+llintOp(op_del_by_val, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_del_by_val) >- dispatch(constexpr op_del_by_val_length) >+ disp__() >+end) > > >-_llint_op_put_getter_by_id: >- traceExecution() >+llintOp(op_put_getter_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_getter_by_id) >- dispatch(constexpr op_put_getter_by_id_length) >+ disp__() >+end) > > >-_llint_op_put_setter_by_id: >- traceExecution() >+llintOp(op_put_setter_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_setter_by_id) >- dispatch(constexpr op_put_setter_by_id_length) >+ disp__() >+end) > > >-_llint_op_put_getter_setter_by_id: >- traceExecution() >+llintOp(op_put_getter_setter_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_getter_setter_by_id) >- dispatch(constexpr op_put_getter_setter_by_id_length) >+ disp__() >+end) > > >-_llint_op_put_getter_by_val: >- traceExecution() >+llintOp(op_put_getter_by_val, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_getter_by_val) >- dispatch(constexpr op_put_getter_by_val_length) >+ disp__() >+end) > > >-_llint_op_put_setter_by_val: >- traceExecution() >+llintOp(op_put_setter_by_val, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_setter_by_val) >- dispatch(constexpr op_put_setter_by_val_length) >+ disp__() >+end) > > >-_llint_op_define_data_property: >- traceExecution() >+llintOp(op_define_data_property, macro (getOperand, disp__) > callSlowPath(_slow_path_define_data_property) >- dispatch(constexpr op_define_data_property_length) >+ disp__() >+end) > > >-_llint_op_define_accessor_property: >- traceExecution() >+llintOp(op_define_accessor_property, macro (getOperand, disp__) > callSlowPath(_slow_path_define_accessor_property) >- dispatch(constexpr op_define_accessor_property_length) >+ disp__() >+end) > > >-_llint_op_jtrue: >- traceExecution() >+llintOp(op_jtrue, macro (getOperand, disp__) > jumpTrueOrFalse( > macro (value, target) btinz value, 1, target end, > _llint_slow_path_jtrue) >+end) > > >-_llint_op_jfalse: >- traceExecution() >+llintOp(op_jfalse, macro (getOperand, disp__) > jumpTrueOrFalse( > macro (value, target) btiz value, 1, target end, > _llint_slow_path_jfalse) >+end) > > >-_llint_op_jless: >- traceExecution() >+llintOp(op_jless, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bilt left, right, target end, > macro (left, right, target) bdlt left, right, target end, > _llint_slow_path_jless) >+end) > > >-_llint_op_jnless: >- traceExecution() >+llintOp(op_jnless, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bigteq left, right, target end, > macro (left, right, target) bdgtequn left, right, target end, > _llint_slow_path_jnless) >+end) > > >-_llint_op_jgreater: >- traceExecution() >+llintOp(op_jgreater, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bigt left, right, target end, > macro (left, right, target) bdgt left, right, target end, > _llint_slow_path_jgreater) >+end) > > >-_llint_op_jngreater: >- traceExecution() >+llintOp(op_jngreater, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bilteq left, right, target end, > macro (left, right, target) bdltequn left, right, target end, > _llint_slow_path_jngreater) >+end) > > >-_llint_op_jlesseq: >- traceExecution() >+llintOp(op_jlesseq, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bilteq left, right, target end, > macro (left, right, target) bdlteq left, right, target end, > _llint_slow_path_jlesseq) >+end) > > >-_llint_op_jnlesseq: >- traceExecution() >+llintOp(op_jnlesseq, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bigt left, right, target end, > macro (left, right, target) bdgtun left, right, target end, > _llint_slow_path_jnlesseq) >+end) > > >-_llint_op_jgreatereq: >- traceExecution() >+llintOp(op_jgreatereq, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bigteq left, right, target end, > macro (left, right, target) bdgteq left, right, target end, > _llint_slow_path_jgreatereq) >+end) > > >-_llint_op_jngreatereq: >- traceExecution() >+llintOp(op_jngreatereq, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bilt left, right, target end, > macro (left, right, target) bdltun left, right, target end, > _llint_slow_path_jngreatereq) >+end) > > >-_llint_op_jeq: >- traceExecution() >+llintOp(op_jeq, macro (getOperand, disp__) > equalityJump( > macro (left, right, target) bieq left, right, target end, > _llint_slow_path_jeq) >+end) > > >-_llint_op_jneq: >- traceExecution() >+llintOp(op_jneq, macro (getOperand, disp__) > equalityJump( > macro (left, right, target) bineq left, right, target end, > _llint_slow_path_jneq) >+end) > > >-_llint_op_jbelow: >- traceExecution() >+llintOp(op_jbelow, macro (getOperand, disp__) > compareUnsignedJump( > macro (left, right, target) bib left, right, target end) >+end) > > >-_llint_op_jbeloweq: >- traceExecution() >+llintOp(op_jbeloweq, macro (getOperand, disp__) > compareUnsignedJump( > macro (left, right, target) bibeq left, right, target end) >+end) > > >-_llint_op_loop_hint: >- traceExecution() >+llintOp(op_loop_hint, macro (getOperand, disp__) > checkSwitchToJITForLoop() >- dispatch(constexpr op_loop_hint_length) >+ disp__() >+end) > > >-_llint_op_check_traps: >- traceExecution() >+llintOp(op_check_traps, macro (getOperand, disp__) > loadp CodeBlock[cfr], t1 > loadp CodeBlock::m_poisonedVM[t1], t1 > unpoison(_g_CodeBlockPoison, t1, t2) > loadb VM::m_traps+VMTraps::m_needTrapHandling[t1], t0 > btpnz t0, .handleTraps > .afterHandlingTraps: >- dispatch(constexpr op_check_traps_length) >+ disp__() > .handleTraps: > callTrapHandler(.throwHandler) > jmp .afterHandlingTraps > .throwHandler: > jmp _llint_throw_from_slow_path_trampoline >+end) > > > # Returns the packet pointer in t0. >@@ -1719,62 +1787,68 @@ macro acquireShadowChickenPacket(slow) > end > > >-_llint_op_nop: >- dispatch(constexpr op_nop_length) >+llintOp(op_nop, macro (getOperand, disp__) >+ disp__() >+end) > > >-_llint_op_super_sampler_begin: >+llintOp(op_super_sampler_begin, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_super_sampler_begin) >- dispatch(constexpr op_super_sampler_begin_length) >+ disp__() >+end) > > >-_llint_op_super_sampler_end: >- traceExecution() >+llintOp(op_super_sampler_end, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_super_sampler_end) >- dispatch(constexpr op_super_sampler_end_length) >+ disp__() >+end) > > >-_llint_op_switch_string: >- traceExecution() >+llintOp(op_switch_string, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_switch_string) >- dispatch(0) >+ disp__() >+end) > > >-_llint_op_new_func_exp: >- traceExecution() >+llintOp(op_new_func_exp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_func_exp) >- dispatch(constexpr op_new_func_exp_length) >+ disp__() >+end) > >-_llint_op_new_generator_func_exp: >- traceExecution() >+llintOp(op_new_generator_func_exp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_generator_func_exp) >- dispatch(constexpr op_new_generator_func_exp_length) >+ disp__() >+end) > >-_llint_op_new_async_func_exp: >- traceExecution() >+llintOp(op_new_async_func_exp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_async_func_exp) >- dispatch(constexpr op_new_async_func_exp_length) >+ disp__() >+end) > > >-_llint_op_set_function_name: >- traceExecution() >+llintOp(op_set_function_name, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_set_function_name) >- dispatch(constexpr op_set_function_name_length) >+ disp__() >+end) > >-_llint_op_call: >- traceExecution() >+ >+llintOp(op_call, macro (getOperand, disp__) > arrayProfileForCall() > doCall(_llint_slow_path_call, prepareForRegularCall) >+end) > >-_llint_op_tail_call: >- traceExecution() >+ >+llintOp(op_tail_call, macro (getOperand, disp__) > arrayProfileForCall() > checkSwitchToJITForEpilogue() > doCall(_llint_slow_path_call, prepareForTailCall) >+end) > >-_llint_op_construct: >- traceExecution() >+ >+llintOp(op_construct, macro (getOperand, disp__) > doCall(_llint_slow_path_construct, prepareForRegularCall) >+end) >+ > > macro doCallVarargs(frameSlowPath, slowPath, prepareCall) > callSlowPath(frameSlowPath) >@@ -1794,34 +1868,33 @@ macro doCallVarargs(frameSlowPath, slowPath, prepareCall) > slowPathForCall(slowPath, prepareCall) > end > >-_llint_op_call_varargs: >- traceExecution() >+ >+llintOp(op_call_varargs, macro (getOperand, disp__) > doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForRegularCall) >+end) > >-_llint_op_tail_call_varargs: >- traceExecution() >+llintOp(op_tail_call_varargs, macro (getOperand, disp__) > checkSwitchToJITForEpilogue() > # We lie and perform the tail call instead of preparing it since we can't > # prepare the frame for a call opcode > doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForTailCall) >+end) > > >-_llint_op_tail_call_forward_arguments: >- traceExecution() >+llintOp(op_tail_call_forward_arguments, macro (getOperand, disp__) > checkSwitchToJITForEpilogue() > # We lie and perform the tail call instead of preparing it since we can't > # prepare the frame for a call opcode > doCallVarargs(_llint_slow_path_size_frame_for_forward_arguments, _llint_slow_path_tail_call_forward_arguments, prepareForTailCall) >+end) > > >-_llint_op_construct_varargs: >- traceExecution() >+llintOp(op_construct_varargs, macro (getOperand, disp__) > doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_construct_varargs, prepareForRegularCall) >+end) > > >-_llint_op_call_eval: >- traceExecution() >- >+llintOp(op_call_eval, macro (getOperand, disp__) > # Eval is executed in one of two modes: > # > # 1) We find that we're really invoking eval() in which case the >@@ -1856,162 +1929,169 @@ _llint_op_call_eval: > # returns the JS value that the eval returned. > > slowPathForCall(_llint_slow_path_call_eval, prepareForRegularCall) >+end) > > >-_llint_generic_return_point: >+op(llint_generic_return_point, macro (getOperand, disp__) > dispatchAfterCall() >+end) > > >-_llint_op_strcat: >- traceExecution() >+llintOp(op_strcat, macro (getOperand, disp__) > callSlowPath(_slow_path_strcat) >- dispatch(constexpr op_strcat_length) >+ disp__() >+end) > > >-_llint_op_push_with_scope: >- traceExecution() >+llintOp(op_push_with_scope, macro (getOperand, disp__) > callSlowPath(_slow_path_push_with_scope) >- dispatch(constexpr op_push_with_scope_length) >+ disp__() >+end) > > >-_llint_op_identity_with_profile: >- traceExecution() >- dispatch(constexpr op_identity_with_profile_length) >+llintOp(op_identity_with_profile, macro (getOperand, disp__) >+ disp__() >+end) > > >-_llint_op_unreachable: >- traceExecution() >+llintOp(op_unreachable, macro (getOperand, disp__) > callSlowPath(_slow_path_unreachable) >- dispatch(constexpr op_unreachable_length) >+ disp__() >+end) > > >-_llint_op_yield: >+llintOp(op_yield, macro (getOperand, disp__) > notSupported() >+end) > > >-_llint_op_create_lexical_environment: >- traceExecution() >+llintOp(op_create_lexical_environment, macro (getOperand, disp__) > callSlowPath(_slow_path_create_lexical_environment) >- dispatch(constexpr op_create_lexical_environment_length) >+ disp__() >+end) > > >-_llint_op_throw: >- traceExecution() >+llintOp(op_throw, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_throw) >- dispatch(constexpr op_throw_length) >+ disp__() >+end) > > >-_llint_op_throw_static_error: >- traceExecution() >+llintOp(op_throw_static_error, macro (getOperand, disp__) > callSlowPath(_slow_path_throw_static_error) >- dispatch(constexpr op_throw_static_error_length) >+ disp__() >+end) > > >-_llint_op_debug: >- traceExecution() >+llintOp(op_debug, macro (getOperand, disp__) > loadp CodeBlock[cfr], t0 > loadi CodeBlock::m_debuggerRequests[t0], t0 > btiz t0, .opDebugDone > callSlowPath(_llint_slow_path_debug) > .opDebugDone: >- dispatch(constexpr op_debug_length) >+ disp__() >+end) > > >-_llint_native_call_trampoline: >+op(llint_native_call_trampoline, macro (getOperand, disp__) > nativeCallTrampoline(NativeExecutable::m_function) >+end) > > >-_llint_native_construct_trampoline: >+op(llint_native_construct_trampoline, macro (getOperand, disp__) > nativeCallTrampoline(NativeExecutable::m_constructor) >+end) > > >-_llint_internal_function_call_trampoline: >+op(llint_internal_function_call_trampoline, macro (getOperand, disp__) > internalFunctionCallTrampoline(InternalFunction::m_functionForCall) >+end) > > >-_llint_internal_function_construct_trampoline: >+op(llint_internal_function_construct_trampoline, macro (getOperand, disp__) > internalFunctionCallTrampoline(InternalFunction::m_functionForConstruct) >+end) > > >-_llint_op_get_enumerable_length: >- traceExecution() >+llintOp(op_get_enumerable_length, macro (getOperand, disp__) > callSlowPath(_slow_path_get_enumerable_length) >- dispatch(constexpr op_get_enumerable_length_length) >+ disp__() >+end) > >-_llint_op_has_indexed_property: >- traceExecution() >+llintOp(op_has_indexed_property, macro (getOperand, disp__) > callSlowPath(_slow_path_has_indexed_property) >- dispatch(constexpr op_has_indexed_property_length) >+ disp__() >+end) > >-_llint_op_has_structure_property: >- traceExecution() >+llintOp(op_has_structure_property, macro (getOperand, disp__) > callSlowPath(_slow_path_has_structure_property) >- dispatch(constexpr op_has_structure_property_length) >+ disp__() >+end) > >-_llint_op_has_generic_property: >- traceExecution() >+llintOp(op_has_generic_property, macro (getOperand, disp__) > callSlowPath(_slow_path_has_generic_property) >- dispatch(constexpr op_has_generic_property_length) >+ disp__() >+end) > >-_llint_op_get_direct_pname: >- traceExecution() >+llintOp(op_get_direct_pname, macro (getOperand, disp__) > callSlowPath(_slow_path_get_direct_pname) >- dispatch(constexpr op_get_direct_pname_length) >+ disp__() >+end) > >-_llint_op_get_property_enumerator: >- traceExecution() >+llintOp(op_get_property_enumerator, macro (getOperand, disp__) > callSlowPath(_slow_path_get_property_enumerator) >- dispatch(constexpr op_get_property_enumerator_length) >+ disp__() >+end) > >-_llint_op_enumerator_structure_pname: >- traceExecution() >+llintOp(op_enumerator_structure_pname, macro (getOperand, disp__) > callSlowPath(_slow_path_next_structure_enumerator_pname) >- dispatch(constexpr op_enumerator_structure_pname_length) >+ disp__() >+end) > >-_llint_op_enumerator_generic_pname: >- traceExecution() >+llintOp(op_enumerator_generic_pname, macro (getOperand, disp__) > callSlowPath(_slow_path_next_generic_enumerator_pname) >- dispatch(constexpr op_enumerator_generic_pname_length) >+ disp__() >+end) > >-_llint_op_to_index_string: >- traceExecution() >+llintOp(op_to_index_string, macro (getOperand, disp__) > callSlowPath(_slow_path_to_index_string) >- dispatch(constexpr op_to_index_string_length) >+ disp__() >+end) > >-_llint_op_create_rest: >- traceExecution() >+llintOp(op_create_rest, macro (getOperand, disp__) > callSlowPath(_slow_path_create_rest) >- dispatch(constexpr op_create_rest_length) >+ disp__() >+end) > >-_llint_op_instanceof: >- traceExecution() >+llintOp(op_instanceof, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_instanceof) >- dispatch(constexpr op_instanceof_length) >+ disp__() >+end) > >-_llint_op_get_by_id_with_this: >- traceExecution() >+llintOp(op_get_by_id_with_this, macro (getOperand, disp__) > callSlowPath(_slow_path_get_by_id_with_this) >- dispatch(constexpr op_get_by_id_with_this_length) >+ disp__() >+end) > >-_llint_op_get_by_val_with_this: >- traceExecution() >+llintOp(op_get_by_val_with_this, macro (getOperand, disp__) > callSlowPath(_slow_path_get_by_val_with_this) >- dispatch(constexpr op_get_by_val_with_this_length) >+ disp__() >+end) > >-_llint_op_put_by_id_with_this: >- traceExecution() >+llintOp(op_put_by_id_with_this, macro (getOperand, disp__) > callSlowPath(_slow_path_put_by_id_with_this) >- dispatch(constexpr op_put_by_id_with_this_length) >+ disp__() >+end) > >-_llint_op_put_by_val_with_this: >- traceExecution() >+llintOp(op_put_by_val_with_this, macro (getOperand, disp__) > callSlowPath(_slow_path_put_by_val_with_this) >- dispatch(constexpr op_put_by_val_with_this_length) >+ disp__() >+end) > >-_llint_op_resolve_scope_for_hoisting_func_decl_in_eval: >- traceExecution() >+llintOp(op_resolve_scope_for_hoisting_func_decl_in_eval, macro (getOperand, disp__) > callSlowPath(_slow_path_resolve_scope_for_hoisting_func_decl_in_eval) >- dispatch(constexpr op_resolve_scope_for_hoisting_func_decl_in_eval_length) >+ disp__() >+end) > > # Lastly, make sure that we can link even though we don't support all opcodes. > # These opcodes should never arise when using LLInt or either JIT. We assert >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >index 78bff0884c4802939a4de860f76b582eaa9a4265..828fc4f85ac7495a75f5cf9b4ae8fb6d68669ebd 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >@@ -108,13 +108,20 @@ using namespace JSC::LLInt; > > #define OFFLINE_ASM_GLOBAL_LABEL(label) label: USE_LABEL(label); > >+#if ENABLE(LABEL_TRACING) >+#define TRACE_LABEL(prefix, label) dataLog(#prefix, ": ", #label, "\n") >+#else >+#define TRACE_LABEL(prefix, label) do { } while (false); >+#endif >+ >+ > #if ENABLE(COMPUTED_GOTO_OPCODES) >-#define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label); >+#define OFFLINE_ASM_GLUE_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_GLUE_LABEL", label); USE_LABEL(label); > #else > #define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label); > #endif > >-#define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label); >+#define OFFLINE_ASM_LOCAL_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_LOCAL_LABEL", #label); USE_LABEL(label); > > > //============================================================================ >@@ -238,7 +245,7 @@ struct CLoopRegister { > EncodedJSValue encodedJSValue; > double castToDouble; > #endif >- Opcode opcode; >+ OpcodeID opcode; > }; > > operator ExecState*() { return execState; } >@@ -288,8 +295,8 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > // can depend on the opcodeMap. > Instruction* exceptionInstructions = LLInt::exceptionInstructions(); > for (int i = 0; i < maxOpcodeLength + 1; ++i) >- exceptionInstructions[i].u.pointer = >- LLInt::getCodePtr(llint_throw_from_slow_path_trampoline); >+ exceptionInstructions[i].u.unsignedValue = >+ llint_throw_from_slow_path_trampoline; > > return JSValue(); > } >@@ -353,7 +360,7 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > CLoopStack& cloopStack = vm->interpreter->cloopStack(); > StackPointerScope stackPointerScope(cloopStack); > >- lr.opcode = getOpcode(llint_return_to_host); >+ lr.opcode = llint_return_to_host; > sp.vp = cloopStack.currentStackPointer(); > cfr.callFrame = vm->topCallFrame; > #ifndef NDEBUG >@@ -376,7 +383,7 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > // Interpreter variables for value passing between opcodes and/or helpers: > NativeFunction nativeFunc = nullptr; > JSValue functionReturnValue; >- Opcode opcode = getOpcode(entryOpcodeID); >+ OpcodeID opcode = entryOpcodeID; > > #define PUSH(cloopReg) \ > do { \ >@@ -399,7 +406,7 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > #if USE(JSVALUE32_64) > #define FETCH_OPCODE() pc.opcode > #else // USE(JSVALUE64) >-#define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8) >+#define FETCH_OPCODE() *bitwise_cast<OpcodeID*>(pcBase.i8p + pc.i * 8) > #endif // USE(JSVALUE64) > > #define NEXT_INSTRUCTION() \ >@@ -413,7 +420,7 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > //======================================================================== > // Loop dispatch mechanism using computed goto statements: > >- #define DISPATCH_OPCODE() goto *opcode >+ #define DISPATCH_OPCODE() goto *getOpcode(opcode); > > #define DEFINE_OPCODE(__opcode) \ > __opcode: \ >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm >index 80f41d804a6dfa0d9124c94ec41dd11061e06489..c1ed88fdef76160425144d9194626f0762c0e75c 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm >@@ -44,7 +44,7 @@ macro dispatchAfterCall() > loadi 4[PC], t3 > storei r1, TagOffset[cfr, t3, 8] > storei r0, PayloadOffset[cfr, t3, 8] >- valueProfile(r1, r0, 4 * (CallOpCodeSize - 1), t3) >+ valueProfile32(r1, r0, 4 * (CallOpCodeSize - 1), t3) > dispatch(CallOpCodeSize) > end > >@@ -404,7 +404,7 @@ macro checkSwitchToJITForLoop() > end) > end > >-macro loadVariable(operand, index, tag, payload) >+macro loadVariable32(operand, index, tag, payload) > loadisFromInstruction(operand, index) > loadi TagOffset[cfr, index, 8], tag > loadi PayloadOffset[cfr, index, 8], payload >@@ -412,7 +412,7 @@ end > > # Index, tag, and payload must be different registers. Index is not > # changed. >-macro loadConstantOrVariable(index, tag, payload) >+macro loadConstantOrVariable32(index, tag, payload) > bigteq index, FirstConstantRegisterIndex, .constant > loadi TagOffset[cfr, index, 8], tag > loadi PayloadOffset[cfr, index, 8], payload >@@ -558,7 +558,7 @@ macro writeBarrierOnGlobalLexicalEnvironment(valueOperand) > end) > end > >-macro valueProfile(tag, payload, operand, scratch) >+macro valueProfile32(tag, payload, operand, scratch) > loadp operand[PC], scratch > storei tag, ValueProfile::m_buckets + TagOffset[scratch] > storei payload, ValueProfile::m_buckets + PayloadOffset[scratch] >@@ -672,13 +672,13 @@ _llint_op_get_argument: > loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t3 > storei t0, TagOffset[cfr, t1, 8] > storei t3, PayloadOffset[cfr, t1, 8] >- valueProfile(t0, t3, 12, t1) >+ valueProfile32(t0, t3, 12, t1) > dispatch(constexpr op_get_argument_length) > > .opGetArgumentOutOfBounds: > storei UndefinedTag, TagOffset[cfr, t1, 8] > storei 0, PayloadOffset[cfr, t1, 8] >- valueProfile(UndefinedTag, 0, 12, t1) >+ valueProfile32(UndefinedTag, 0, 12, t1) > dispatch(constexpr op_get_argument_length) > > >@@ -733,7 +733,7 @@ _llint_op_mov: > traceExecution() > loadi 8[PC], t1 > loadi 4[PC], t0 >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > storei t2, TagOffset[cfr, t0, 8] > storei t3, PayloadOffset[cfr, t0, 8] > dispatch(constexpr op_mov_length) >@@ -743,7 +743,7 @@ _llint_op_not: > traceExecution() > loadi 8[PC], t0 > loadi 4[PC], t1 >- loadConstantOrVariable(t0, t2, t3) >+ loadConstantOrVariable32(t0, t2, t3) > bineq t2, BooleanTag, .opNotSlow > xori 1, t3 > storei t2, TagOffset[cfr, t1, 8] >@@ -758,7 +758,7 @@ _llint_op_not: > macro equalityComparison(integerComparison, slowPath) > loadi 12[PC], t2 > loadi 8[PC], t0 >- loadConstantOrVariable(t2, t3, t1) >+ loadConstantOrVariable32(t2, t3, t1) > loadConstantOrVariable2Reg(t0, t2, t0) > bineq t2, t3, .opEqSlow > bieq t2, CellTag, .opEqSlow >@@ -778,7 +778,7 @@ end > macro equalityJump(integerComparison, slowPath) > loadi 8[PC], t2 > loadi 4[PC], t0 >- loadConstantOrVariable(t2, t3, t1) >+ loadConstantOrVariable32(t2, t3, t1) > loadConstantOrVariable2Reg(t0, t2, t0) > bineq t2, t3, .slow > bieq t2, CellTag, .slow >@@ -852,7 +852,7 @@ _llint_op_neq_null: > macro strictEq(equalityOperation, slowPath) > loadi 12[PC], t2 > loadi 8[PC], t0 >- loadConstantOrVariable(t2, t3, t1) >+ loadConstantOrVariable32(t2, t3, t1) > loadConstantOrVariable2Reg(t0, t2, t0) > bineq t2, t3, .slow > bib t2, LowestTag, .slow >@@ -875,7 +875,7 @@ end > macro strictEqualityJump(equalityOperation, slowPath) > loadi 8[PC], t2 > loadi 4[PC], t0 >- loadConstantOrVariable(t2, t3, t1) >+ loadConstantOrVariable32(t2, t3, t1) > loadConstantOrVariable2Reg(t0, t2, t0) > bineq t2, t3, .slow > bib t2, LowestTag, .slow >@@ -951,13 +951,13 @@ _llint_op_to_number: > traceExecution() > loadi 8[PC], t0 > loadi 4[PC], t1 >- loadConstantOrVariable(t0, t2, t3) >+ loadConstantOrVariable32(t0, t2, t3) > bieq t2, Int32Tag, .opToNumberIsInt > biaeq t2, LowestTag, .opToNumberSlow > .opToNumberIsInt: > storei t2, TagOffset[cfr, t1, 8] > storei t3, PayloadOffset[cfr, t1, 8] >- valueProfile(t2, t3, 12, t1) >+ valueProfile32(t2, t3, 12, t1) > dispatch(constexpr op_to_number_length) > > .opToNumberSlow: >@@ -969,7 +969,7 @@ _llint_op_to_string: > traceExecution() > loadi 8[PC], t0 > loadi 4[PC], t1 >- loadConstantOrVariable(t0, t2, t3) >+ loadConstantOrVariable32(t0, t2, t3) > bineq t2, CellTag, .opToStringSlow > bbneq JSCell::m_type[t3], StringType, .opToStringSlow > .opToStringIsString: >@@ -986,12 +986,12 @@ _llint_op_to_object: > traceExecution() > loadi 8[PC], t0 > loadi 4[PC], t1 >- loadConstantOrVariable(t0, t2, t3) >+ loadConstantOrVariable32(t0, t2, t3) > bineq t2, CellTag, .opToObjectSlow > bbb JSCell::m_type[t3], ObjectType, .opToObjectSlow > storei t2, TagOffset[cfr, t1, 8] > storei t3, PayloadOffset[cfr, t1, 8] >- valueProfile(t2, t3, 16, t1) >+ valueProfile32(t2, t3, 16, t1) > dispatch(constexpr op_to_object_length) > > .opToObjectSlow: >@@ -1003,7 +1003,7 @@ _llint_op_negate: > traceExecution() > loadi 8[PC], t0 > loadi 4[PC], t3 >- loadConstantOrVariable(t0, t1, t2) >+ loadConstantOrVariable32(t0, t1, t2) > loadisFromInstruction(3, t0) > bineq t1, Int32Tag, .opNegateSrcNotInt > btiz t2, 0x7fffffff, .opNegateSlow >@@ -1027,10 +1027,10 @@ _llint_op_negate: > dispatch(constexpr op_negate_length) > > >-macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath) >+macro binaryOpCustomStore32(integerOperationAndStore, doubleOperation, slowPath) > loadi 12[PC], t2 > loadi 8[PC], t0 >- loadConstantOrVariable(t2, t3, t1) >+ loadConstantOrVariable32(t2, t3, t1) > loadConstantOrVariable2Reg(t0, t2, t0) > bineq t2, Int32Tag, .op1NotInt > bineq t3, Int32Tag, .op2NotInt >@@ -1081,8 +1081,8 @@ macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath) > dispatch(5) > end > >-macro binaryOp(integerOperation, doubleOperation, slowPath) >- binaryOpCustomStore( >+macro binaryOp32(integerOperation, doubleOperation, slowPath) >+ binaryOpCustomStore32( > macro (int32Tag, left, right, slow, index) > integerOperation(left, right, slow) > storei int32Tag, TagOffset[cfr, index, 8] >@@ -1093,7 +1093,7 @@ end > > _llint_op_add: > traceExecution() >- binaryOp( >+ binaryOp32( > macro (left, right, slow) baddio left, right, slow end, > macro (left, right) addd left, right end, > _slow_path_add) >@@ -1101,7 +1101,7 @@ _llint_op_add: > > _llint_op_mul: > traceExecution() >- binaryOpCustomStore( >+ binaryOpCustomStore32( > macro (int32Tag, left, right, slow, index) > const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant. > move right, scratch >@@ -1119,7 +1119,7 @@ _llint_op_mul: > > _llint_op_sub: > traceExecution() >- binaryOp( >+ binaryOp32( > macro (left, right, slow) bsubio left, right, slow end, > macro (left, right) subd left, right end, > _slow_path_sub) >@@ -1127,7 +1127,7 @@ _llint_op_sub: > > _llint_op_div: > traceExecution() >- binaryOpCustomStore( >+ binaryOpCustomStore32( > macro (int32Tag, left, right, slow, index) > ci2d left, ft0 > ci2d right, ft1 >@@ -1147,7 +1147,7 @@ _llint_op_div: > macro bitOp(operation, slowPath, advance) > loadi 12[PC], t2 > loadi 8[PC], t0 >- loadConstantOrVariable(t2, t3, t1) >+ loadConstantOrVariable32(t2, t3, t1) > loadConstantOrVariable2Reg(t0, t2, t0) > bineq t3, Int32Tag, .slow > bineq t2, Int32Tag, .slow >@@ -1227,13 +1227,13 @@ _llint_op_bitor: > _llint_op_overrides_has_instance: > traceExecution() > >- loadisFromStruct(OpOverridesHasInstance::m_dst, t3) >+ loadisFromStruct(OpOverridesHasInstance::dst, t3) > storei BooleanTag, TagOffset[cfr, t3, 8] > > # First check if hasInstanceValue is the one on Function.prototype[Symbol.hasInstance] >- loadisFromStruct(OpOverridesHasInstance::m_hasInstanceValue, t0) >+ loadisFromStruct(OpOverridesHasInstance::hasInstanceValue, t0) > loadConstantOrVariablePayload(t0, CellTag, t2, .opOverrideshasInstanceValueNotCell) >- loadConstantOrVariable(t0, t1, t2) >+ loadConstantOrVariable32(t0, t1, t2) > bineq t1, CellTag, .opOverrideshasInstanceValueNotCell > > # We don't need hasInstanceValue's tag register anymore. >@@ -1243,7 +1243,7 @@ _llint_op_overrides_has_instance: > bineq t1, t2, .opOverrideshasInstanceValueNotDefault > > # We know the constructor is a cell. >- loadisFromStruct(OpOverridesHasInstance::m_constructor, t0) >+ loadisFromStruct(OpOverridesHasInstance::constructor, t0) > loadConstantOrVariablePayloadUnchecked(t0, t1) > tbz JSCell::m_flags[t1], ImplementsDefaultHasInstance, t0 > storei t0, PayloadOffset[cfr, t3, 8] >@@ -1264,7 +1264,7 @@ _llint_op_is_empty: > traceExecution() > loadi 8[PC], t1 > loadi 4[PC], t0 >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > cieq t2, EmptyValueTag, t3 > storei BooleanTag, TagOffset[cfr, t0, 8] > storei t3, PayloadOffset[cfr, t0, 8] >@@ -1275,7 +1275,7 @@ _llint_op_is_undefined: > traceExecution() > loadi 8[PC], t1 > loadi 4[PC], t0 >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > storei BooleanTag, TagOffset[cfr, t0, 8] > bieq t2, CellTag, .opIsUndefinedCell > cieq t2, UndefinedTag, t3 >@@ -1322,7 +1322,7 @@ _llint_op_is_cell_with_type: > traceExecution() > loadi 8[PC], t1 > loadi 4[PC], t2 >- loadConstantOrVariable(t1, t0, t3) >+ loadConstantOrVariable32(t1, t0, t3) > storei BooleanTag, TagOffset[cfr, t2, 8] > bineq t0, CellTag, .notCellCase > loadi 12[PC], t0 >@@ -1338,7 +1338,7 @@ _llint_op_is_object: > traceExecution() > loadi 8[PC], t1 > loadi 4[PC], t2 >- loadConstantOrVariable(t1, t0, t3) >+ loadConstantOrVariable32(t1, t0, t3) > storei BooleanTag, TagOffset[cfr, t2, 8] > bineq t0, CellTag, .opIsObjectNotCell > cbaeq JSCell::m_type[t3], ObjectType, t1 >@@ -1357,7 +1357,7 @@ macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorag > loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload > end > >-macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payload) >+macro loadPropertyAtVariableOffset32(propertyOffset, objectAndStorage, tag, payload) > bilt propertyOffset, firstOutOfLineOffset, .isInline > loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage > negi propertyOffset >@@ -1369,7 +1369,7 @@ macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payloa > loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload > end > >-macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, tag, payload) >+macro storePropertyAtVariableOffset32(propertyOffsetAsInt, objectAndStorage, tag, payload) > bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline > loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage > negi propertyOffsetAsInt >@@ -1397,11 +1397,11 @@ _llint_op_get_by_id_direct: > loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdDirectSlow) > loadi 20[PC], t2 > bineq JSCell::m_structureID[t3], t1, .opGetByIdDirectSlow >- loadPropertyAtVariableOffset(t2, t3, t0, t1) >+ loadPropertyAtVariableOffset32(t2, t3, t0, t1) > loadi 4[PC], t2 > storei t0, TagOffset[cfr, t2, 8] > storei t1, PayloadOffset[cfr, t2, 8] >- valueProfile(t0, t1, 24, t2) >+ valueProfile32(t0, t1, 24, t2) > dispatch(constexpr op_get_by_id_direct_length) > > .opGetByIdDirectSlow: >@@ -1416,11 +1416,11 @@ _llint_op_get_by_id: > loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow) > loadi 20[PC], t2 > bineq JSCell::m_structureID[t3], t1, .opGetByIdSlow >- loadPropertyAtVariableOffset(t2, t3, t0, t1) >+ loadPropertyAtVariableOffset32(t2, t3, t0, t1) > loadi 4[PC], t2 > storei t0, TagOffset[cfr, t2, 8] > storei t1, PayloadOffset[cfr, t2, 8] >- valueProfile(t0, t1, 32, t2) >+ valueProfile32(t0, t1, 32, t2) > dispatch(constexpr op_get_by_id_length) > > .opGetByIdSlow: >@@ -1436,11 +1436,11 @@ _llint_op_get_by_id_proto_load: > loadi 20[PC], t2 > bineq JSCell::m_structureID[t3], t1, .opGetByIdProtoSlow > loadpFromInstruction(6, t3) >- loadPropertyAtVariableOffset(t2, t3, t0, t1) >+ loadPropertyAtVariableOffset32(t2, t3, t0, t1) > loadi 4[PC], t2 > storei t0, TagOffset[cfr, t2, 8] > storei t1, PayloadOffset[cfr, t2, 8] >- valueProfile(t0, t1, 32, t2) >+ valueProfile32(t0, t1, 32, t2) > dispatch(constexpr op_get_by_id_proto_load_length) > > .opGetByIdProtoSlow: >@@ -1457,7 +1457,7 @@ _llint_op_get_by_id_unset: > loadi 4[PC], t2 > storei UndefinedTag, TagOffset[cfr, t2, 8] > storei 0, PayloadOffset[cfr, t2, 8] >- valueProfile(UndefinedTag, 0, 32, t2) >+ valueProfile32(UndefinedTag, 0, 32, t2) > dispatch(constexpr op_get_by_id_unset_length) > > .opGetByIdUnsetSlow: >@@ -1478,7 +1478,7 @@ _llint_op_get_array_length: > loadp JSObject::m_butterfly[t3], t0 > loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0 > bilt t0, 0, .opGetArrayLengthSlow >- valueProfile(Int32Tag, t0, 32, t2) >+ valueProfile32(Int32Tag, t0, 32, t2) > storep t0, PayloadOffset[cfr, t1, 8] > storep Int32Tag, TagOffset[cfr, t1, 8] > dispatch(constexpr op_get_array_length_length) >@@ -1502,7 +1502,7 @@ _llint_op_put_by_id: > # We will lose currentStructureID in the shenanigans below. > > loadi 12[PC], t1 >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > loadi 32[PC], t1 > > # At this point, we have: >@@ -1606,18 +1606,18 @@ _llint_op_put_by_id: > .opPutByIdTransitionDirect: > storei t1, JSCell::m_structureID[t0] > loadi 12[PC], t1 >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > loadi 20[PC], t1 >- storePropertyAtVariableOffset(t1, t0, t2, t3) >+ storePropertyAtVariableOffset32(t1, t0, t2, t3) > writeBarrierOnOperand(1) > dispatch(constexpr op_put_by_id_length) > > .opPutByIdNotTransition: > # The only thing live right now is t0, which holds the base. > loadi 12[PC], t1 >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > loadi 20[PC], t1 >- storePropertyAtVariableOffset(t1, t0, t2, t3) >+ storePropertyAtVariableOffset32(t1, t0, t2, t3) > dispatch(constexpr op_put_by_id_length) > > .opPutByIdSlow: >@@ -1668,7 +1668,7 @@ _llint_op_get_by_val: > .opGetByValNotEmpty: > storei t2, TagOffset[cfr, t0, 8] > storei t1, PayloadOffset[cfr, t0, 8] >- valueProfile(t2, t1, 20, t0) >+ valueProfile32(t2, t1, 20, t0) > dispatch(constexpr op_get_by_val_length) > > .opGetByValSlow: >@@ -1676,7 +1676,7 @@ _llint_op_get_by_val: > dispatch(constexpr op_get_by_val_length) > > >-macro contiguousPutByVal(storeCallback) >+macro contiguousPutByVal32(storeCallback) > biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds > .storeResult: > loadi 12[PC], t2 >@@ -1706,7 +1706,7 @@ macro putByVal(slowPath) > btinz t2, CopyOnWrite, .opPutByValSlow > andi IndexingShapeMask, t2 > bineq t2, Int32Shape, .opPutByValNotInt32 >- contiguousPutByVal( >+ contiguousPutByVal32( > macro (operand, scratch, base, index) > loadConstantOrVariablePayload(operand, Int32Tag, scratch, .opPutByValSlow) > storei Int32Tag, TagOffset[base, index, 8] >@@ -1715,7 +1715,7 @@ macro putByVal(slowPath) > > .opPutByValNotInt32: > bineq t2, DoubleShape, .opPutByValNotDouble >- contiguousPutByVal( >+ contiguousPutByVal32( > macro (operand, scratch, base, index) > const tag = scratch > const payload = operand >@@ -1732,7 +1732,7 @@ macro putByVal(slowPath) > > .opPutByValNotDouble: > bineq t2, ContiguousShape, .opPutByValNotContiguous >- contiguousPutByVal( >+ contiguousPutByVal32( > macro (operand, scratch, base, index) > const tag = scratch > const payload = operand >@@ -1858,7 +1858,7 @@ _llint_op_jneq_ptr: > macro compareUnsignedJump(integerCompare) > loadi 4[PC], t2 > loadi 8[PC], t3 >- loadConstantOrVariable(t2, t0, t1) >+ loadConstantOrVariable32(t2, t0, t1) > loadConstantOrVariable2Reg(t3, t2, t3) > integerCompare(t1, t3, .jumpTarget) > dispatch(4) >@@ -1871,7 +1871,7 @@ end > macro compareUnsigned(integerCompareAndSet) > loadi 12[PC], t2 > loadi 8[PC], t0 >- loadConstantOrVariable(t2, t3, t1) >+ loadConstantOrVariable32(t2, t3, t1) > loadConstantOrVariable2Reg(t0, t2, t0) > integerCompareAndSet(t0, t1, t0) > loadi 4[PC], t2 >@@ -1884,7 +1884,7 @@ end > macro compareJump(integerCompare, doubleCompare, slowPath) > loadi 4[PC], t2 > loadi 8[PC], t3 >- loadConstantOrVariable(t2, t0, t1) >+ loadConstantOrVariable32(t2, t0, t1) > loadConstantOrVariable2Reg(t3, t2, t3) > bineq t0, Int32Tag, .op1NotInt > bineq t2, Int32Tag, .op2NotInt >@@ -1924,7 +1924,7 @@ _llint_op_switch_imm: > traceExecution() > loadi 12[PC], t2 > loadi 4[PC], t3 >- loadConstantOrVariable(t2, t1, t0) >+ loadConstantOrVariable32(t2, t1, t0) > loadp CodeBlock[cfr], t2 > loadp CodeBlock::m_rareData[t2], t2 > muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this! >@@ -1952,7 +1952,7 @@ _llint_op_switch_char: > traceExecution() > loadi 12[PC], t2 > loadi 4[PC], t3 >- loadConstantOrVariable(t2, t1, t0) >+ loadConstantOrVariable32(t2, t1, t0) > loadp CodeBlock[cfr], t2 > loadp CodeBlock::m_rareData[t2], t2 > muli sizeof SimpleJumpTable, t3 >@@ -2023,7 +2023,7 @@ _llint_op_ret: > traceExecution() > checkSwitchToJITForEpilogue() > loadi 4[PC], t2 >- loadConstantOrVariable(t2, t1, t0) >+ loadConstantOrVariable32(t2, t1, t0) > doReturn() > > >@@ -2031,7 +2031,7 @@ _llint_op_to_primitive: > traceExecution() > loadi 8[PC], t2 > loadi 4[PC], t3 >- loadConstantOrVariable(t2, t1, t0) >+ loadConstantOrVariable32(t2, t1, t0) > bineq t1, CellTag, .opToPrimitiveIsImm > bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase > .opToPrimitiveIsImm: >@@ -2340,8 +2340,8 @@ end > > macro getProperty() > loadisFromInstruction(6, t3) >- loadPropertyAtVariableOffset(t3, t0, t1, t2) >- valueProfile(t1, t2, 28, t0) >+ loadPropertyAtVariableOffset32(t3, t0, t1, t2) >+ valueProfile32(t1, t2, 28, t0) > loadisFromInstruction(1, t0) > storei t1, TagOffset[cfr, t0, 8] > storei t2, PayloadOffset[cfr, t0, 8] >@@ -2352,7 +2352,7 @@ macro getGlobalVar(tdzCheckIfNecessary) > loadp TagOffset[t0], t1 > loadp PayloadOffset[t0], t2 > tdzCheckIfNecessary(t1) >- valueProfile(t1, t2, 28, t0) >+ valueProfile32(t1, t2, 28, t0) > loadisFromInstruction(1, t0) > storei t1, TagOffset[cfr, t0, 8] > storei t2, PayloadOffset[cfr, t0, 8] >@@ -2362,7 +2362,7 @@ macro getClosureVar() > loadisFromInstruction(6, t3) > loadp JSLexicalEnvironment_variables + TagOffset[t0, t3, 8], t1 > loadp JSLexicalEnvironment_variables + PayloadOffset[t0, t3, 8], t2 >- valueProfile(t1, t2, 28, t0) >+ valueProfile32(t1, t2, 28, t0) > loadisFromInstruction(1, t0) > storei t1, TagOffset[cfr, t0, 8] > storei t2, PayloadOffset[cfr, t0, 8] >@@ -2394,7 +2394,7 @@ _llint_op_get_from_scope: > > .gClosureVar: > bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks >- loadVariable(2, t2, t1, t0) >+ loadVariable32(2, t2, t1, t0) > getClosureVar() > dispatch(8) > >@@ -2422,7 +2422,7 @@ _llint_op_get_from_scope: > .gClosureVarWithVarInjectionChecks: > bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic > varInjectionCheck(.gDynamic) >- loadVariable(2, t2, t1, t0) >+ loadVariable32(2, t2, t1, t0) > getClosureVar() > dispatch(8) > >@@ -2433,14 +2433,14 @@ _llint_op_get_from_scope: > > macro putProperty() > loadisFromInstruction(3, t1) >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > loadisFromInstruction(6, t1) >- storePropertyAtVariableOffset(t1, t0, t2, t3) >+ storePropertyAtVariableOffset32(t1, t0, t2, t3) > end > > macro putGlobalVariable() > loadisFromInstruction(3, t0) >- loadConstantOrVariable(t0, t1, t2) >+ loadConstantOrVariable32(t0, t1, t2) > loadpFromInstruction(5, t3) > notifyWrite(t3, .pDynamic) > loadpFromInstruction(6, t0) >@@ -2450,7 +2450,7 @@ end > > macro putClosureVar() > loadisFromInstruction(3, t1) >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > loadisFromInstruction(6, t1) > storei t2, JSLexicalEnvironment_variables + TagOffset[t0, t1, 8] > storei t3, JSLexicalEnvironment_variables + PayloadOffset[t0, t1, 8] >@@ -2458,7 +2458,7 @@ end > > macro putLocalClosureVar() > loadisFromInstruction(3, t1) >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > loadpFromInstruction(5, t5) > btpz t5, .noVariableWatchpointSet > notifyWrite(t5, .pDynamic) >@@ -2477,7 +2477,7 @@ _llint_op_put_to_scope: > #pLocalClosureVar: > bineq t0, LocalClosureVar, .pGlobalProperty > writeBarrierOnOperands(1, 3) >- loadVariable(1, t2, t1, t0) >+ loadVariable32(1, t2, t1, t0) > putLocalClosureVar() > dispatch(7) > >@@ -2503,7 +2503,7 @@ _llint_op_put_to_scope: > .pClosureVar: > bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks > writeBarrierOnOperands(1, 3) >- loadVariable(1, t2, t1, t0) >+ loadVariable32(1, t2, t1, t0) > putClosureVar() > dispatch(7) > >@@ -2532,7 +2532,7 @@ _llint_op_put_to_scope: > bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar > writeBarrierOnOperands(1, 3) > varInjectionCheck(.pDynamic) >- loadVariable(1, t2, t1, t0) >+ loadVariable32(1, t2, t1, t0) > putClosureVar() > dispatch(7) > >@@ -2554,7 +2554,7 @@ _llint_op_get_from_arguments: > loadi DirectArguments_storage + TagOffset[t0, t1, 8], t2 > loadi DirectArguments_storage + PayloadOffset[t0, t1, 8], t3 > loadisFromInstruction(1, t1) >- valueProfile(t2, t3, 16, t0) >+ valueProfile32(t2, t3, 16, t0) > storei t2, TagOffset[cfr, t1, 8] > storei t3, PayloadOffset[cfr, t1, 8] > dispatch(5) >@@ -2566,7 +2566,7 @@ _llint_op_put_to_arguments: > loadisFromInstruction(1, t0) > loadi PayloadOffset[cfr, t0, 8], t0 > loadisFromInstruction(3, t1) >- loadConstantOrVariable(t1, t2, t3) >+ loadConstantOrVariable32(t1, t2, t3) > loadi 8[PC], t1 > storei t2, DirectArguments_storage + TagOffset[t0, t1, 8] > storei t3, DirectArguments_storage + PayloadOffset[t0, t1, 8] >@@ -2594,7 +2594,7 @@ _llint_op_profile_type: > > # t0 is holding the payload, t5 is holding the tag. > loadisFromInstruction(1, t2) >- loadConstantOrVariable(t2, t5, t0) >+ loadConstantOrVariable32(t2, t5, t0) > > bieq t5, EmptyValueTag, .opProfileTypeDone > >@@ -2679,7 +2679,7 @@ _llint_op_log_shadow_chicken_tail: > acquireShadowChickenPacket(.opLogShadowChickenTailSlow) > storep cfr, ShadowChicken::Packet::frame[t0] > storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0] >- loadVariable(1, t3, t2, t1) >+ loadVariable32(1, t3, t2, t1) > storei t2, TagOffset + ShadowChicken::Packet::thisValue[t0] > storei t1, PayloadOffset + ShadowChicken::Packet::thisValue[t0] > loadisFromInstruction(2, t1) >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >index f867597fc46e531a385561f271b871a98422bab0..3ae59065d44209018d1a3d035c3bbfd95cc6c883 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >@@ -23,23 +23,6 @@ > > > # Utilities. >-macro jumpToInstruction() >- jmp [PB, PC, 8], BytecodePtrTag >-end >- >-macro dispatch(advance) >- addp advance, PC >- jumpToInstruction() >-end >- >-macro dispatchInt(advance) >- addi advance, PC >- jumpToInstruction() >-end >- >-macro dispatchIntIndirect(offset) >- dispatchInt(offset * 8[PB, PC, 8]) >-end > > macro dispatchAfterCall() > loadi ArgumentCount + TagOffset[cfr], PC >@@ -225,7 +208,7 @@ macro doVMEntry(makeCall) > > checkStackPointerAlignment(extraTempReg, 0xbad0dc02) > >- makeCall(entry, t3) >+ makeCall(entry, t3, t4) > > # We may have just made a call into a JS function, so we can't rely on sp > # for anything but the fact that our own locals (ie the VMEntryRecord) are >@@ -249,7 +232,7 @@ macro doVMEntry(makeCall) > end > > >-macro makeJavaScriptCall(entry, temp) >+macro makeJavaScriptCall(entry, temp, unused) > addp 16, sp > if C_LOOP > cloopCallJSFunction entry >@@ -259,8 +242,7 @@ macro makeJavaScriptCall(entry, temp) > subp 16, sp > end > >- >-macro makeHostFunctionCall(entry, temp) >+macro makeHostFunctionCall(entry, temp, unused) > move entry, temp > storep cfr, [sp] > move sp, a0 >@@ -277,7 +259,7 @@ macro makeHostFunctionCall(entry, temp) > end > end > >-_handleUncaughtException: >+op(handleUncaughtException, macro (getOperand, disp__) > loadp Callee[cfr], t3 > andp MarkedBlockMask, t3 > loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t3], t3 >@@ -299,6 +281,7 @@ _handleUncaughtException: > popCalleeSaves() > functionEpilogue() > ret >+end) > > > macro prepareStateForCCall() >@@ -591,8 +574,15 @@ end > > > # Instruction implementations >-_llint_op_enter: >+_llint_op_wide: >+ traceExecution() >+ dispatchWide(constexpr op_wide_length) >+ >+_llint_op_wide_wide: > traceExecution() >+ crash() >+ >+llintOp(op_enter, macro (getOperand, disp__) > checkStackPointerAlignment(t2, 0xdead00e1) > loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock > loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars >@@ -609,11 +599,11 @@ _llint_op_enter: > btqnz t2, .opEnterLoop > .opEnterDone: > callSlowPath(_slow_path_enter) >- dispatch(constexpr op_enter_length) >+ disp__() >+end) > > >-_llint_op_get_argument: >- traceExecution() >+llintOp(op_get_argument, macro (getOperand, disp__) > loadisFromInstruction(1, t1) > loadisFromInstruction(2, t2) > loadi PayloadOffset + ArgumentCount[cfr], t0 >@@ -621,35 +611,35 @@ _llint_op_get_argument: > loadq ThisArgumentOffset[cfr, t2, 8], t0 > storeq t0, [cfr, t1, 8] > valueProfile(t0, 3, t2) >- dispatch(constexpr op_get_argument_length) >+ disp__() > > .opGetArgumentOutOfBounds: > storeq ValueUndefined, [cfr, t1, 8] > valueProfile(ValueUndefined, 3, t2) >- dispatch(constexpr op_get_argument_length) >+ disp__() >+end) > > >-_llint_op_argument_count: >- traceExecution() >- loadisFromInstruction(1, t1) >+llintOp(op_argument_count, macro (getOperand, disp__) >+ getOperand(1, t1) > loadi PayloadOffset + ArgumentCount[cfr], t0 > subi 1, t0 > orq TagTypeNumber, t0 > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_argument_count_length) >+ disp__() >+end) > > >-_llint_op_get_scope: >- traceExecution() >+llintOp(op_get_scope, macro (getOperand, disp__) > loadp Callee[cfr], t0 > loadp JSCallee::m_scope[t0], t0 > loadisFromInstruction(1, t1) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_scope_length) >+ disp__() >+end) > > >-_llint_op_to_this: >- traceExecution() >+llintOp(op_to_this, macro (getOperand, disp__) > loadisFromInstruction(1, t0) > loadq [cfr, t0, 8], t0 > btqnz t0, tagMask, .opToThisSlow >@@ -657,47 +647,48 @@ _llint_op_to_this: > loadStructureWithScratch(t0, t1, t2, t3) > loadpFromInstruction(2, t2) > bpneq t1, t2, .opToThisSlow >- dispatch(constexpr op_to_this_length) >+ disp__() > > .opToThisSlow: > callSlowPath(_slow_path_to_this) >- dispatch(constexpr op_to_this_length) >+ disp__() >+end) > > >-_llint_op_check_tdz: >- traceExecution() >- loadisFromInstruction(1, t0) >+llintOp(op_check_tdz, macro (getOperand, disp__) >+ getOperand(1, t0) > loadConstantOrVariable(t0, t1) > bqneq t1, ValueEmpty, .opNotTDZ > callSlowPath(_slow_path_throw_tdz_error) > > .opNotTDZ: >- dispatch(constexpr op_check_tdz_length) >+ disp__() >+end) > > >-_llint_op_mov: >- traceExecution() >- loadisFromInstruction(2, t1) >- loadisFromInstruction(1, t0) >+llintOp(op_mov, macro (getOperand, disp__) >+ getOperand(2, t1) >+ getOperand(1, t0) > loadConstantOrVariable(t1, t2) > storeq t2, [cfr, t0, 8] >- dispatch(constexpr op_mov_length) >+ disp__() >+end) > > >-_llint_op_not: >- traceExecution() >- loadisFromInstruction(2, t0) >- loadisFromInstruction(1, t1) >+llintOp(op_not, macro (getOperand, disp__) >+ getOperand(2, t0) >+ getOperand(1, t1) > loadConstantOrVariable(t0, t2) > xorq ValueFalse, t2 > btqnz t2, ~1, .opNotSlow > xorq ValueTrue, t2 > storeq t2, [cfr, t1, 8] >- dispatch(constexpr op_not_length) >+ disp__() > > .opNotSlow: > callSlowPath(_slow_path_not) >- dispatch(constexpr op_not_length) >+ disp__() >+end) > > > macro equalityComparison(integerComparison, slowPath) >@@ -726,7 +717,7 @@ macro equalityJump(integerComparison, slowPath) > dispatch(constexpr op_jeq_length) > > .jumpTarget: >- dispatchIntIndirect(3) >+ dispatchIndirect(3) > > .slow: > callSlowPath(slowPath) >@@ -753,22 +744,22 @@ macro equalNullComparison() > .done: > end > >-_llint_op_eq_null: >- traceExecution() >+llintOp(op_eq_null, macro (getOperand, disp__) > equalNullComparison() >- loadisFromInstruction(1, t1) >+ getOperand(1, t1) > orq ValueFalse, t0 > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_eq_null_length) >+ disp__() >+end) > > >-_llint_op_neq_null: >- traceExecution() >+llintOp(op_neq_null, macro (getOperand, disp__) > equalNullComparison() > loadisFromInstruction(1, t1) > xorq ValueTrue, t0 > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_neq_null_length) >+ disp__() >+end) > > > macro strictEq(equalityOperation, slowPath) >@@ -812,47 +803,46 @@ macro strictEqualityJump(equalityOperation, slowPath) > btqnz t1, tagTypeNumber, .slow > .rightOK: > equalityOperation(t0, t1, .jumpTarget) >- dispatch(constexpr op_jstricteq_length) >+ dispatch(4) > > .jumpTarget: >- dispatchIntIndirect(3) >+ dispatchIndirect(3) > > .slow: > callSlowPath(slowPath) >- dispatch(0) >+ dispatch(4) > end > > >-_llint_op_stricteq: >- traceExecution() >+llintOp(op_stricteq, macro (getOperand, disp__) > strictEq( > macro (left, right, result) cqeq left, right, result end, > _slow_path_stricteq) >+end) > > >-_llint_op_nstricteq: >- traceExecution() >+llintOp(op_nstricteq, macro (getOperand, disp__) > strictEq( > macro (left, right, result) cqneq left, right, result end, > _slow_path_nstricteq) >+end) > > >-_llint_op_jstricteq: >- traceExecution() >+llintOp(op_jstricteq, macro (getOperand, disp__) > strictEqualityJump( > macro (left, right, target) bqeq left, right, target end, > _llint_slow_path_jstricteq) >+end) > > >-_llint_op_jnstricteq: >- traceExecution() >+llintOp(op_jnstricteq, macro (getOperand, disp__) > strictEqualityJump( > macro (left, right, target) bqneq left, right, target end, > _llint_slow_path_jnstricteq) >+end) > > > macro preOp(arithmeticOperation, slowPath) >- traceExecution() > loadisFromInstruction(1, t0) > loadq [cfr, t0, 8], t1 > bqb t1, tagTypeNumber, .slow >@@ -866,20 +856,21 @@ macro preOp(arithmeticOperation, slowPath) > dispatch(2) > end > >-_llint_op_inc: >+llintOp(op_inc, macro (getOperand, disp__) > preOp( > macro (value, slow) baddio 1, value, slow end, > _slow_path_inc) >+end) > > >-_llint_op_dec: >+llintOp(op_dec, macro (getOperand, disp__) > preOp( > macro (value, slow) bsubio 1, value, slow end, > _slow_path_dec) >+end) > > >-_llint_op_to_number: >- traceExecution() >+llintOp(op_to_number, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadisFromInstruction(1, t1) > loadConstantOrVariable(t0, t2) >@@ -888,15 +879,15 @@ _llint_op_to_number: > .opToNumberIsImmediate: > storeq t2, [cfr, t1, 8] > valueProfile(t2, 3, t0) >- dispatch(constexpr op_to_number_length) >+ disp__() > > .opToNumberSlow: > callSlowPath(_slow_path_to_number) >- dispatch(constexpr op_to_number_length) >+ disp__() >+end) > > >-_llint_op_to_string: >- traceExecution() >+llintOp(op_to_string, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) >@@ -904,15 +895,15 @@ _llint_op_to_string: > bbneq JSCell::m_type[t0], StringType, .opToStringSlow > .opToStringIsString: > storeq t0, [cfr, t2, 8] >- dispatch(constexpr op_to_string_length) >+ disp__() > > .opToStringSlow: > callSlowPath(_slow_path_to_string) >- dispatch(constexpr op_to_string_length) >+ disp__() >+end) > > >-_llint_op_to_object: >- traceExecution() >+llintOp(op_to_object, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadisFromInstruction(1, t1) > loadConstantOrVariable(t0, t2) >@@ -920,15 +911,15 @@ _llint_op_to_object: > bbb JSCell::m_type[t2], ObjectType, .opToObjectSlow > storeq t2, [cfr, t1, 8] > valueProfile(t2, 4, t0) >- dispatch(constexpr op_to_object_length) >+ disp__() > > .opToObjectSlow: > callSlowPath(_slow_path_to_object) >- dispatch(constexpr op_to_object_length) >+ disp__() >+end) > > >-_llint_op_negate: >- traceExecution() >+llintOp(op_negate, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadisFromInstruction(1, t1) > loadConstantOrVariable(t0, t3) >@@ -940,18 +931,19 @@ _llint_op_negate: > orq tagTypeNumber, t3 > storeisToInstruction(t2, 3) > storeq t3, [cfr, t1, 8] >- dispatch(constexpr op_negate_length) >+ disp__() > .opNegateNotInt: > btqz t3, tagTypeNumber, .opNegateSlow > xorq 0x8000000000000000, t3 > ori ArithProfileNumber, t2 > storeq t3, [cfr, t1, 8] > storeisToInstruction(t2, 3) >- dispatch(constexpr op_negate_length) >+ disp__() > > .opNegateSlow: > callSlowPath(_slow_path_negate) >- dispatch(constexpr op_negate_length) >+ disp__() >+end) > > > macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath) >@@ -1025,16 +1017,15 @@ macro binaryOp(integerOperation, doubleOperation, slowPath) > doubleOperation, slowPath) > end > >-_llint_op_add: >- traceExecution() >+llintOp(op_add, macro (getOperand, disp__) > binaryOp( > macro (left, right, slow) baddio left, right, slow end, > macro (left, right) addd left, right end, > _slow_path_add) >+end) > > >-_llint_op_mul: >- traceExecution() >+llintOp(op_mul, macro (getOperand, disp__) > binaryOpCustomStore( > macro (left, right, slow, index) > # Assume t3 is scratchable. >@@ -1049,18 +1040,18 @@ _llint_op_mul: > end, > macro (left, right) muld left, right end, > _slow_path_mul) >+end) > > >-_llint_op_sub: >- traceExecution() >+llintOp(op_sub, macro (getOperand, disp__) > binaryOp( > macro (left, right, slow) bsubio left, right, slow end, > macro (left, right) subd left, right end, > _slow_path_sub) >+end) > > >-_llint_op_div: >- traceExecution() >+llintOp(op_div, macro (getOperand, disp__) > if X86_64 or X86_64_WIN > binaryOpCustomStore( > macro (left, right, slow, index) >@@ -1084,8 +1075,9 @@ _llint_op_div: > _slow_path_div) > else > callSlowPath(_slow_path_div) >- dispatch(constexpr op_div_length) >+ disp__() > end >+end) > > > macro bitOp(operation, slowPath, advance) >@@ -1106,109 +1098,108 @@ macro bitOp(operation, slowPath, advance) > dispatch(advance) > end > >-_llint_op_lshift: >- traceExecution() >+llintOp(op_lshift, macro (getOperand, disp__) > bitOp( > macro (left, right) lshifti left, right end, > _slow_path_lshift, > constexpr op_lshift_length) >+end) > > >-_llint_op_rshift: >- traceExecution() >+llintOp(op_rshift, macro (getOperand, disp__) > bitOp( > macro (left, right) rshifti left, right end, > _slow_path_rshift, > constexpr op_rshift_length) >+end) > > >-_llint_op_urshift: >- traceExecution() >+llintOp(op_urshift, macro (getOperand, disp__) > bitOp( > macro (left, right) urshifti left, right end, > _slow_path_urshift, > constexpr op_urshift_length) >+end) > > >-_llint_op_unsigned: >- traceExecution() >+llintOp(op_unsigned, macro (getOperand, disp__) > loadisFromInstruction(1, t0) > loadisFromInstruction(2, t1) > loadConstantOrVariable(t1, t2) > bilt t2, 0, .opUnsignedSlow > storeq t2, [cfr, t0, 8] >- dispatch(constexpr op_unsigned_length) >+ disp__() > .opUnsignedSlow: > callSlowPath(_slow_path_unsigned) >- dispatch(constexpr op_unsigned_length) >+ disp__() >+end) > > >-_llint_op_bitand: >- traceExecution() >+llintOp(op_bitand, macro (getOperand, disp__) > bitOp( > macro (left, right) andi left, right end, > _slow_path_bitand, > constexpr op_bitand_length) >+end) > > >-_llint_op_bitxor: >- traceExecution() >+llintOp(op_bitxor, macro (getOperand, disp__) > bitOp( > macro (left, right) xori left, right end, > _slow_path_bitxor, > constexpr op_bitxor_length) >+end) > > >-_llint_op_bitor: >- traceExecution() >+llintOp(op_bitor, macro (getOperand, disp__) > bitOp( > macro (left, right) ori left, right end, > _slow_path_bitor, > constexpr op_bitor_length) >+end) > > >-_llint_op_overrides_has_instance: >- traceExecution() >- loadisFromStruct(OpOverridesHasInstance::m_dst, t3) >+llintOp(op_overrides_has_instance, macro (getOperand, disp__) >+ loadisFromStruct(OpOverridesHasInstance::dst, t3) > >- loadisFromStruct(OpOverridesHasInstance::m_hasInstanceValue, t1) >+ loadisFromStruct(OpOverridesHasInstance::hasInstanceValue, t1) > loadConstantOrVariable(t1, t0) > loadp CodeBlock[cfr], t2 > loadp CodeBlock::m_globalObject[t2], t2 > loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t2], t2 > bqneq t0, t2, .opOverridesHasInstanceNotDefaultSymbol > >- loadisFromStruct(OpOverridesHasInstance::m_constructor, t1) >+ loadisFromStruct(OpOverridesHasInstance::constructor, t1) > loadConstantOrVariable(t1, t0) > tbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, t1 > orq ValueFalse, t1 > storeq t1, [cfr, t3, 8] >- dispatch(constexpr op_overrides_has_instance_length) >+ disp__() > > .opOverridesHasInstanceNotDefaultSymbol: > storeq ValueTrue, [cfr, t3, 8] >- dispatch(constexpr op_overrides_has_instance_length) >+ disp__() >+end) > > >-_llint_op_instanceof_custom: >- traceExecution() >+llintOp(op_instanceof_custom, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_instanceof_custom) >- dispatch(constexpr op_instanceof_custom_length) >+ disp__() >+end) > > >-_llint_op_is_empty: >- traceExecution() >+llintOp(op_is_empty, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) > cqeq t0, ValueEmpty, t3 > orq ValueFalse, t3 > storeq t3, [cfr, t2, 8] >- dispatch(constexpr op_is_empty_length) >+ disp__() >+end) > > >-_llint_op_is_undefined: >- traceExecution() >+llintOp(op_is_undefined, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) >@@ -1216,12 +1207,12 @@ _llint_op_is_undefined: > cqeq t0, ValueUndefined, t3 > orq ValueFalse, t3 > storeq t3, [cfr, t2, 8] >- dispatch(constexpr op_is_undefined_length) >+ disp__() > .opIsUndefinedCell: > btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined > move ValueFalse, t1 > storeq t1, [cfr, t2, 8] >- dispatch(constexpr op_is_undefined_length) >+ disp__() > .masqueradesAsUndefined: > loadStructureWithScratch(t0, t3, t1, t5) > loadp CodeBlock[cfr], t1 >@@ -1229,11 +1220,11 @@ _llint_op_is_undefined: > cpeq Structure::m_globalObject[t3], t1, t0 > orq ValueFalse, t0 > storeq t0, [cfr, t2, 8] >- dispatch(constexpr op_is_undefined_length) >+ disp__() >+end) > > >-_llint_op_is_boolean: >- traceExecution() >+llintOp(op_is_boolean, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) >@@ -1241,22 +1232,22 @@ _llint_op_is_boolean: > tqz t0, ~1, t0 > orq ValueFalse, t0 > storeq t0, [cfr, t2, 8] >- dispatch(constexpr op_is_boolean_length) >+ disp__() >+end) > > >-_llint_op_is_number: >- traceExecution() >+llintOp(op_is_number, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) > tqnz t0, tagTypeNumber, t1 > orq ValueFalse, t1 > storeq t1, [cfr, t2, 8] >- dispatch(constexpr op_is_number_length) >+ disp__() >+end) > > >-_llint_op_is_cell_with_type: >- traceExecution() >+llintOp(op_is_cell_with_type, macro (getOperand, disp__) > loadisFromInstruction(3, t0) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) >@@ -1265,14 +1256,14 @@ _llint_op_is_cell_with_type: > cbeq JSCell::m_type[t3], t0, t1 > orq ValueFalse, t1 > storeq t1, [cfr, t2, 8] >- dispatch(constexpr op_is_cell_with_type_length) >+ disp__() > .notCellCase: > storeq ValueFalse, [cfr, t2, 8] >- dispatch(constexpr op_is_cell_with_type_length) >+ disp__() >+end) > > >-_llint_op_is_object: >- traceExecution() >+llintOp(op_is_object, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) >@@ -1280,10 +1271,11 @@ _llint_op_is_object: > cbaeq JSCell::m_type[t0], ObjectType, t1 > orq ValueFalse, t1 > storeq t1, [cfr, t2, 8] >- dispatch(constexpr op_is_object_length) >+ disp__() > .opIsObjectNotCell: > storeq ValueFalse, [cfr, t2, 8] >- dispatch(constexpr op_is_object_length) >+ disp__() >+end) > > > macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value) >@@ -1312,8 +1304,7 @@ macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value > end > > >-_llint_op_get_by_id_direct: >- traceExecution() >+llintOp(op_get_by_id_direct, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadConstantOrVariableCell(t0, t3, .opGetByIdDirectSlow) > loadi JSCell::m_structureID[t3], t1 >@@ -1324,15 +1315,15 @@ _llint_op_get_by_id_direct: > loadPropertyAtVariableOffset(t1, t3, t0) > storeq t0, [cfr, t2, 8] > valueProfile(t0, 6, t1) >- dispatch(constexpr op_get_by_id_direct_length) >+ disp__() > > .opGetByIdDirectSlow: > callSlowPath(_llint_slow_path_get_by_id_direct) >- dispatch(constexpr op_get_by_id_direct_length) >+ disp__() >+end) > > >-_llint_op_get_by_id: >- traceExecution() >+llintOp(op_get_by_id, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadConstantOrVariableCell(t0, t3, .opGetByIdSlow) > loadi JSCell::m_structureID[t3], t1 >@@ -1343,15 +1334,15 @@ _llint_op_get_by_id: > loadPropertyAtVariableOffset(t1, t3, t0) > storeq t0, [cfr, t2, 8] > valueProfile(t0, 8, t1) >- dispatch(constexpr op_get_by_id_length) >+ disp__() > > .opGetByIdSlow: > callSlowPath(_llint_slow_path_get_by_id) >- dispatch(constexpr op_get_by_id_length) >+ disp__() >+end) > > >-_llint_op_get_by_id_proto_load: >- traceExecution() >+llintOp(op_get_by_id_proto_load, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadConstantOrVariableCell(t0, t3, .opGetByIdProtoSlow) > loadi JSCell::m_structureID[t3], t1 >@@ -1363,15 +1354,15 @@ _llint_op_get_by_id_proto_load: > loadPropertyAtVariableOffset(t1, t3, t0) > storeq t0, [cfr, t2, 8] > valueProfile(t0, 8, t1) >- dispatch(constexpr op_get_by_id_proto_load_length) >+ disp__() > > .opGetByIdProtoSlow: > callSlowPath(_llint_slow_path_get_by_id) >- dispatch(constexpr op_get_by_id_proto_load_length) >+ disp__() >+end) > > >-_llint_op_get_by_id_unset: >- traceExecution() >+llintOp(op_get_by_id_unset, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadConstantOrVariableCell(t0, t3, .opGetByIdUnsetSlow) > loadi JSCell::m_structureID[t3], t1 >@@ -1380,15 +1371,15 @@ _llint_op_get_by_id_unset: > loadisFromInstruction(1, t2) > storeq ValueUndefined, [cfr, t2, 8] > valueProfile(ValueUndefined, 8, t1) >- dispatch(constexpr op_get_by_id_unset_length) >+ disp__() > > .opGetByIdUnsetSlow: > callSlowPath(_llint_slow_path_get_by_id) >- dispatch(constexpr op_get_by_id_unset_length) >+ disp__() >+end) > > >-_llint_op_get_array_length: >- traceExecution() >+llintOp(op_get_array_length, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadpFromInstruction(4, t1) > loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow) >@@ -1403,15 +1394,15 @@ _llint_op_get_array_length: > orq tagTypeNumber, t0 > valueProfile(t0, 8, t2) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_array_length_length) >+ disp__() > > .opGetArrayLengthSlow: > callSlowPath(_llint_slow_path_get_by_id) >- dispatch(constexpr op_get_array_length_length) >+ disp__() >+end) > > >-_llint_op_put_by_id: >- traceExecution() >+llintOp(op_put_by_id, macro (getOperand, disp__) > loadisFromInstruction(1, t3) > loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) > loadisFromInstruction(4, t2) >@@ -1546,11 +1537,12 @@ _llint_op_put_by_id: > loadisFromInstruction(5, t1) > storePropertyAtVariableOffset(t1, t0, t2) > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_by_id_length) >+ disp__() > > .opPutByIdSlow: > callSlowPath(_llint_slow_path_put_by_id) >- dispatch(constexpr op_put_by_id_length) >+ disp__() >+end) > > > macro finishGetByVal(result, scratch) >@@ -1571,8 +1563,7 @@ macro finishDoubleGetByVal(result, scratch1, scratch2) > finishGetByVal(scratch1, scratch2) > end > >-_llint_op_get_by_val: >- traceExecution() >+llintOp(op_get_by_val, macro (getOperand, disp__) > loadisFromInstruction(2, t2) > loadConstantOrVariableCell(t2, t0, .opGetByValSlow) > loadpFromInstruction(4, t3) >@@ -1614,7 +1605,7 @@ _llint_op_get_by_val: > .opGetByValDone: > storeq t2, [cfr, t0, 8] > valueProfile(t2, 5, t0) >- dispatch(constexpr op_get_by_val_length) >+ disp__() > > .opGetByValNotIndexedStorage: > # First lets check if we even have a typed array. This lets us do some boilerplate up front. >@@ -1711,7 +1702,8 @@ _llint_op_get_by_val: > > .opGetByValSlow: > callSlowPath(_llint_slow_path_get_by_val) >- dispatch(constexpr op_get_by_val_length) >+ disp__() >+end) > > > macro contiguousPutByVal(storeCallback) >@@ -1806,17 +1798,18 @@ macro putByVal(slowPath) > dispatch(5) > end > >-_llint_op_put_by_val: >+llintOp(op_put_by_val, macro (getOperand, disp__) > putByVal(_llint_slow_path_put_by_val) >+end) > >-_llint_op_put_by_val_direct: >+llintOp(op_put_by_val_direct, macro (getOperand, disp__) > putByVal(_llint_slow_path_put_by_val_direct) >+end) > > >-_llint_op_jmp: >- traceExecution() >- dispatchIntIndirect(1) >- >+llintOp(op_jmp, macro (getOperand, disp__) >+ dispatchIndirect(1) >+end) > > macro jumpTrueOrFalse(conditionOp, slow) > loadisFromInstruction(1, t1) >@@ -1826,7 +1819,7 @@ macro jumpTrueOrFalse(conditionOp, slow) > dispatch(3) > > .target: >- dispatchIntIndirect(2) >+ dispatchIndirect(2) > > .slow: > callSlowPath(slow) >@@ -1844,7 +1837,7 @@ macro equalNull(cellHandler, immediateHandler) > dispatch(3) > > .target: >- dispatchIntIndirect(2) >+ dispatchIndirect(2) > > .immediate: > andq ~TagBitUndefined, t0 >@@ -1852,8 +1845,7 @@ macro equalNull(cellHandler, immediateHandler) > dispatch(3) > end > >-_llint_op_jeq_null: >- traceExecution() >+llintOp(op_jeq_null, macro (getOperand, disp__) > equalNull( > macro (structure, value, target) > btbz value, MasqueradesAsUndefined, .notMasqueradesAsUndefined >@@ -1863,10 +1855,10 @@ _llint_op_jeq_null: > .notMasqueradesAsUndefined: > end, > macro (value, target) bqeq value, ValueNull, target end) >+end) > > >-_llint_op_jneq_null: >- traceExecution() >+llintOp(op_jneq_null, macro (getOperand, disp__) > equalNull( > macro (structure, value, target) > btbz value, MasqueradesAsUndefined, target >@@ -1875,21 +1867,22 @@ _llint_op_jneq_null: > bpneq Structure::m_globalObject[structure], t0, target > end, > macro (value, target) bqneq value, ValueNull, target end) >+end) > > >-_llint_op_jneq_ptr: >- traceExecution() >+llintOp(op_jneq_ptr, macro (getOperand, disp__) > loadisFromInstruction(1, t0) > loadisFromInstruction(2, t1) > loadp CodeBlock[cfr], t2 > loadp CodeBlock::m_globalObject[t2], t2 > loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1 > bpneq t1, [cfr, t0, 8], .opJneqPtrTarget >- dispatch(5) >+ disp__() > > .opJneqPtrTarget: > storei 1, 32[PB, PC, 8] >- dispatchIntIndirect(3) >+ dispatchIndirect(3) >+end) > > > macro compareJump(integerCompare, doubleCompare, slowPath) >@@ -1926,7 +1919,7 @@ macro compareJump(integerCompare, doubleCompare, slowPath) > dispatch(4) > > .jumpTarget: >- dispatchIntIndirect(3) >+ dispatchIndirect(3) > > .slow: > callSlowPath(slowPath) >@@ -1943,7 +1936,7 @@ macro compareUnsignedJump(integerCompare) > dispatch(4) > > .jumpTarget: >- dispatchIntIndirect(3) >+ dispatchIndirect(3) > end > > >@@ -1960,8 +1953,7 @@ macro compareUnsigned(integerCompareAndSet) > end > > >-_llint_op_switch_imm: >- traceExecution() >+llintOp(op_switch_imm, macro (getOperand, disp__) > loadisFromInstruction(3, t2) > loadisFromInstruction(1, t3) > loadConstantOrVariable(t2, t1) >@@ -1981,15 +1973,15 @@ _llint_op_switch_imm: > .opSwitchImmNotInt: > btqnz t1, tagTypeNumber, .opSwitchImmSlow # Go slow if it's a double. > .opSwitchImmFallThrough: >- dispatchIntIndirect(2) >+ dispatchIndirect(2) > > .opSwitchImmSlow: > callSlowPath(_llint_slow_path_switch_imm) >- dispatch(0) >+ disp__() >+end) > > >-_llint_op_switch_char: >- traceExecution() >+llintOp(op_switch_char, macro (getOperand, disp__) > loadisFromInstruction(3, t2) > loadisFromInstruction(1, t3) > loadConstantOrVariable(t2, t1) >@@ -2018,11 +2010,12 @@ _llint_op_switch_char: > dispatch(t1) > > .opSwitchCharFallThrough: >- dispatchIntIndirect(2) >+ dispatchIndirect(2) > > .opSwitchOnRope: > callSlowPath(_llint_slow_path_switch_char) >- dispatch(0) >+ disp__() >+end) > > > macro arrayProfileForCall() >@@ -2068,16 +2061,15 @@ macro doCall(slowPath, prepareCall) > slowPathForCall(slowPath, prepareCall) > end > >-_llint_op_ret: >- traceExecution() >+llintOp(op_ret, macro (getOperand, disp__) > checkSwitchToJITForEpilogue() > loadisFromInstruction(1, t2) > loadConstantOrVariable(t2, r0) > doReturn() >+end) > > >-_llint_op_to_primitive: >- traceExecution() >+llintOp(op_to_primitive, macro (getOperand, disp__) > loadisFromInstruction(2, t2) > loadisFromInstruction(1, t3) > loadConstantOrVariable(t2, t0) >@@ -2085,14 +2077,15 @@ _llint_op_to_primitive: > bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase > .opToPrimitiveIsImm: > storeq t0, [cfr, t3, 8] >- dispatch(constexpr op_to_primitive_length) >+ disp__() > > .opToPrimitiveSlowCase: > callSlowPath(_slow_path_to_primitive) >- dispatch(constexpr op_to_primitive_length) >+ disp__() >+end) > > >-_llint_op_catch: >+llintOp(op_catch, macro (getOperand, disp__) > # This is where we end up from the JIT's throw trampoline (because the > # machine code return address will be set to _llint_op_catch), and from > # the interpreter's throw trampoline (see _llint_throw_trampoline). >@@ -2135,19 +2128,20 @@ _llint_op_catch: > > callSlowPath(_llint_slow_path_profile_catch) > >- dispatch(constexpr op_catch_length) >+ disp__() >+end) > > >-_llint_op_end: >- traceExecution() >+llintOp(op_end, macro (getOperand, disp__) > checkSwitchToJITForEpilogue() > loadisFromInstruction(1, t0) > assertNotConstant(t0) > loadq [cfr, t0, 8], r0 > doReturn() >+end) > > >-_llint_throw_from_slow_path_trampoline: >+op(llint_throw_from_slow_path_trampoline, macro (getOperand, disp__) > loadp Callee[cfr], t1 > andp MarkedBlockMask, t1 > loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1 >@@ -2162,11 +2156,13 @@ _llint_throw_from_slow_path_trampoline: > andp MarkedBlockMask, t1 > loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1 > jmp VM::targetMachinePCForThrow[t1], ExceptionHandlerPtrTag >+end) > > >-_llint_throw_during_call_trampoline: >+op(llint_throw_during_call_trampoline, macro (getOperand, disp__) > preserveReturnAddressAfterCall(t2) > jmp _llint_throw_from_slow_path_trampoline >+end) > > > macro nativeCallTrampoline(executableOffsetToFunction) >@@ -2288,62 +2284,62 @@ macro resolveScope() > end > > >-_llint_op_resolve_scope: >- traceExecution() >+llintOp(op_resolve_scope, macro (getOperand, disp__) > loadisFromInstruction(4, t0) > > #rGlobalProperty: > bineq t0, GlobalProperty, .rGlobalVar > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalVar: > bineq t0, GlobalVar, .rGlobalLexicalVar > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalLexicalVar: > bineq t0, GlobalLexicalVar, .rClosureVar > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rClosureVar: > bineq t0, ClosureVar, .rModuleVar > resolveScope() >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rModuleVar: > bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalPropertyWithVarInjectionChecks: > bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks > varInjectionCheck(.rDynamic) > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalVarWithVarInjectionChecks: > bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks > varInjectionCheck(.rDynamic) > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalLexicalVarWithVarInjectionChecks: > bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks > varInjectionCheck(.rDynamic) > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rClosureVarWithVarInjectionChecks: > bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic > varInjectionCheck(.rDynamic) > resolveScope() >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rDynamic: > callSlowPath(_slow_path_resolve_scope) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() >+end) > > > macro loadWithStructureCheck(operand, slowPath) >@@ -2379,8 +2375,7 @@ macro getClosureVar() > storeq t0, [cfr, t1, 8] > end > >-_llint_op_get_from_scope: >- traceExecution() >+llintOp(op_get_from_scope, macro (getOperand, disp__) > loadisFromInstruction(4, t0) > andi ResolveTypeMask, t0 > >@@ -2388,12 +2383,12 @@ _llint_op_get_from_scope: > bineq t0, GlobalProperty, .gGlobalVar > loadWithStructureCheck(2, .gDynamic) > getProperty() >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalVar: > bineq t0, GlobalVar, .gGlobalLexicalVar > getGlobalVar(macro(v) end) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalLexicalVar: > bineq t0, GlobalLexicalVar, .gClosureVar >@@ -2401,25 +2396,25 @@ _llint_op_get_from_scope: > macro (value) > bqeq value, ValueEmpty, .gDynamic > end) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gClosureVar: > bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks > loadVariable(2, t0) > getClosureVar() >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalPropertyWithVarInjectionChecks: > bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks > loadWithStructureCheck(2, .gDynamic) > getProperty() >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalVarWithVarInjectionChecks: > bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks > varInjectionCheck(.gDynamic) > getGlobalVar(macro(v) end) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalLexicalVarWithVarInjectionChecks: > bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks >@@ -2428,18 +2423,19 @@ _llint_op_get_from_scope: > macro (value) > bqeq value, ValueEmpty, .gDynamic > end) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gClosureVarWithVarInjectionChecks: > bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic > varInjectionCheck(.gDynamic) > loadVariable(2, t0) > getClosureVar() >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gDynamic: > callSlowPath(_llint_slow_path_get_from_scope) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() >+end) > > > macro putProperty() >@@ -2488,8 +2484,7 @@ macro checkTDZInGlobalPutToScopeIfNecessary() > end > > >-_llint_op_put_to_scope: >- traceExecution() >+llintOp(op_put_to_scope, macro (getOperand, disp__) > loadisFromInstruction(4, t0) > andi ResolveTypeMask, t0 > >@@ -2498,48 +2493,48 @@ _llint_op_put_to_scope: > loadVariable(1, t0) > putLocalClosureVar() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalProperty: > bineq t0, GlobalProperty, .pGlobalVar > loadWithStructureCheck(1, .pDynamic) > putProperty() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalVar: > bineq t0, GlobalVar, .pGlobalLexicalVar > writeBarrierOnGlobalObject(3) > putGlobalVariable() >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalLexicalVar: > bineq t0, GlobalLexicalVar, .pClosureVar > writeBarrierOnGlobalLexicalEnvironment(3) > checkTDZInGlobalPutToScopeIfNecessary() > putGlobalVariable() >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pClosureVar: > bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks > loadVariable(1, t0) > putClosureVar() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalPropertyWithVarInjectionChecks: > bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks > loadWithStructureCheck(1, .pDynamic) > putProperty() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalVarWithVarInjectionChecks: > bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks > writeBarrierOnGlobalObject(3) > varInjectionCheck(.pDynamic) > putGlobalVariable() >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalLexicalVarWithVarInjectionChecks: > bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks >@@ -2547,7 +2542,7 @@ _llint_op_put_to_scope: > varInjectionCheck(.pDynamic) > checkTDZInGlobalPutToScopeIfNecessary() > putGlobalVariable() >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pClosureVarWithVarInjectionChecks: > bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar >@@ -2555,51 +2550,51 @@ _llint_op_put_to_scope: > loadVariable(1, t0) > putClosureVar() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pModuleVar: > bineq t0, ModuleVar, .pDynamic > callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pDynamic: > callSlowPath(_llint_slow_path_put_to_scope) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() >+end) > > >-_llint_op_get_from_arguments: >- traceExecution() >+llintOp(op_get_from_arguments, macro (getOperand, disp__) > loadVariable(2, t0) > loadi 24[PB, PC, 8], t1 > loadq DirectArguments_storage[t0, t1, 8], t0 > valueProfile(t0, 4, t1) > loadisFromInstruction(1, t1) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_from_arguments_length) >+ disp__() >+end) > > >-_llint_op_put_to_arguments: >- traceExecution() >+llintOp(op_put_to_arguments, macro (getOperand, disp__) > loadVariable(1, t0) > loadi 16[PB, PC, 8], t1 > loadisFromInstruction(3, t3) > loadConstantOrVariable(t3, t2) > storeq t2, DirectArguments_storage[t0, t1, 8] > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_arguments_length) >+ disp__() >+end) > > >-_llint_op_get_parent_scope: >- traceExecution() >+llintOp(op_get_parent_scope, macro (getOperand, disp__) > loadVariable(2, t0) > loadp JSScope::m_next[t0], t0 > loadisFromInstruction(1, t1) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_parent_scope_length) >+ disp__() >+end) > > >-_llint_op_profile_type: >- traceExecution() >+llintOp(op_profile_type, macro (getOperand, disp__) > loadp CodeBlock[cfr], t1 > loadp CodeBlock::m_poisonedVM[t1], t1 > unpoison(_g_CodeBlockPoison, t1, t3) >@@ -2637,17 +2632,18 @@ _llint_op_profile_type: > callSlowPath(_slow_path_profile_type_clear_log) > > .opProfileTypeDone: >- dispatch(constexpr op_profile_type_length) >+ disp__() >+end) > >-_llint_op_profile_control_flow: >- traceExecution() >+ >+llintOp(op_profile_control_flow, macro (getOperand, disp__) > loadpFromInstruction(1, t0) > addq 1, BasicBlockLocation::m_executionCount[t0] >- dispatch(constexpr op_profile_control_flow_length) >+ disp__() >+end) > > >-_llint_op_get_rest_length: >- traceExecution() >+llintOp(op_get_rest_length, macro (getOperand, disp__) > loadi PayloadOffset + ArgumentCount[cfr], t0 > subi 1, t0 > loadisFromInstruction(2, t1) >@@ -2660,11 +2656,11 @@ _llint_op_get_rest_length: > orq tagTypeNumber, t0 > loadisFromInstruction(1, t1) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_rest_length_length) >+ disp__() >+end) > > >-_llint_op_log_shadow_chicken_prologue: >- traceExecution() >+llintOp(op_log_shadow_chicken_prologue, macro (getOperand, disp__) > acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow) > storep cfr, ShadowChicken::Packet::frame[t0] > loadp CallerFrame[cfr], t1 >@@ -2673,14 +2669,14 @@ _llint_op_log_shadow_chicken_prologue: > storep t1, ShadowChicken::Packet::callee[t0] > loadVariable(1, t1) > storep t1, ShadowChicken::Packet::scope[t0] >- dispatch(constexpr op_log_shadow_chicken_prologue_length) >+ disp__() > .opLogShadowChickenPrologueSlow: > callSlowPath(_llint_slow_path_log_shadow_chicken_prologue) >- dispatch(constexpr op_log_shadow_chicken_prologue_length) >+ disp__() >+end) > > >-_llint_op_log_shadow_chicken_tail: >- traceExecution() >+llintOp(op_log_shadow_chicken_tail, macro (getOperand, disp__) > acquireShadowChickenPacket(.opLogShadowChickenTailSlow) > storep cfr, ShadowChicken::Packet::frame[t0] > storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0] >@@ -2691,7 +2687,8 @@ _llint_op_log_shadow_chicken_tail: > loadp CodeBlock[cfr], t1 > storep t1, ShadowChicken::Packet::codeBlock[t0] > storei PC, ShadowChicken::Packet::callSiteIndex[t0] >- dispatch(constexpr op_log_shadow_chicken_tail_length) >+ disp__() > .opLogShadowChickenTailSlow: > callSlowPath(_llint_slow_path_log_shadow_chicken_tail) >- dispatch(constexpr op_log_shadow_chicken_tail_length) >+ disp__() >+end) >diff --git a/Source/JavaScriptCore/offlineasm/asm.rb b/Source/JavaScriptCore/offlineasm/asm.rb >index 06041497423eb4c5767d52fa894f914f53953c2b..8336277a87c04719e2ab80d982eb8c1fab49564d 100644 >--- a/Source/JavaScriptCore/offlineasm/asm.rb >+++ b/Source/JavaScriptCore/offlineasm/asm.rb >@@ -371,12 +371,13 @@ File.open(outputFlnm, "w") { > $asm = Assembler.new($output) > > ast = parse(asmFile) >+ settingsCombinations = computeSettingsCombinations(ast) > > configurationList.each { > | configuration | > offsetsList = configuration[0] > configIndex = configuration[1] >- forSettings(computeSettingsCombinations(ast)[configIndex], ast) { >+ forSettings(settingsCombinations[configIndex], ast) { > | concreteSettings, lowLevelAST, backend | > > # There could be multiple backends we are generating for, but the C_LOOP is >@@ -386,10 +387,17 @@ File.open(outputFlnm, "w") { > $enableDebugAnnotations = false > end > >+ lowLevelAST = lowLevelAST.demacroify({}) > lowLevelAST = lowLevelAST.resolve(buildOffsetsMap(lowLevelAST, offsetsList)) > lowLevelAST.validate > emitCodeInConfiguration(concreteSettings, lowLevelAST, backend) { > $asm.inAsm { >+ $wideOpcodes = false >+ lowLevelAST.lower(backend) >+ } >+ >+ $asm.inAsm { >+ $wideOpcodes = true > lowLevelAST.lower(backend) > } > } >diff --git a/Source/JavaScriptCore/offlineasm/ast.rb b/Source/JavaScriptCore/offlineasm/ast.rb >index 0ccf7b331bbb30ee11c976c08eb6b29660d8de15..b6b1ebcabe2fef7181227f3cc2dd94a1b2a8ef91 100644 >--- a/Source/JavaScriptCore/offlineasm/ast.rb >+++ b/Source/JavaScriptCore/offlineasm/ast.rb >@@ -73,6 +73,18 @@ class Node > def filter(type) > flatten.select{|v| v.is_a? type} > end >+ >+ def empty? >+ false >+ end >+ >+ def to_json(options={}) >+ hash = {} >+ self.instance_variables.each do |var| >+ hash[var] = self.instance_variable_get var >+ end >+ hash.to_json(options) >+ end > end > > class NoChildren < Node >@@ -910,7 +922,7 @@ class Instruction < Node > end > > def children >- operands >+ @operands > end > > def mapChildren(&proc) >@@ -961,7 +973,7 @@ class Error < NoChildren > end > > class ConstExpr < NoChildren >- attr_reader :variable, :value >+ attr_reader :value > > def initialize(codeOrigin, value) > super(codeOrigin) >@@ -1016,8 +1028,6 @@ $labelMapping = {} > $referencedExternLabels = Array.new > > class Label < NoChildren >- attr_reader :name >- > def initialize(codeOrigin, name) > super(codeOrigin) > @name = name >@@ -1076,6 +1086,10 @@ class Label < NoChildren > @global > end > >+ def name >+ $wideOpcodes ? "#{@name}_wide" : @name >+ end >+ > def dump > "#{name}:" > end >@@ -1250,6 +1264,10 @@ class Sequence < Node > def dump > list.collect{|v| v.dump}.join("\n") > end >+ >+ def empty? >+ list.all?(&:empty?) >+ end > end > > class True < NoChildren >@@ -1399,6 +1417,10 @@ class Skip < NoChildren > def dump > "\tskip" > end >+ >+ def empty? >+ true >+ end > end > > class IfThenElse < Node >@@ -1421,12 +1443,18 @@ class IfThenElse < Node > end > > def mapChildren >- IfThenElse.new(codeOrigin, (yield @predicate), (yield @thenCase), (yield @elseCase)) >+ ifThenElse = IfThenElse.new(codeOrigin, (yield @predicate), (yield @thenCase)) >+ ifThenElse.elseCase = yield @elseCase >+ ifThenElse > end > > def dump > "if #{predicate.dump}\n" + thenCase.dump + "\nelse\n" + elseCase.dump + "\nend" > end >+ >+ def empty? >+ @thenCase.empty? && @elseCase.empty? >+ end > end > > class Macro < Node >diff --git a/Source/JavaScriptCore/offlineasm/cloop.rb b/Source/JavaScriptCore/offlineasm/cloop.rb >index 870525922f02a4447e8732f99a0d8bfe5d186cc4..9dd818dc623d7e7f02e2384e5649a0fa04525324 100644 >--- a/Source/JavaScriptCore/offlineasm/cloop.rb >+++ b/Source/JavaScriptCore/offlineasm/cloop.rb >@@ -222,7 +222,7 @@ class Address > "*CAST<NativeFunction*>(#{pointerExpr})" > end > def opcodeMemRef >- "*CAST<Opcode*>(#{pointerExpr})" >+ "*CAST<OpcodeID*>(#{pointerExpr})" > end > def dblMemRef > "*CAST<double*>(#{pointerExpr})" >@@ -286,7 +286,7 @@ class BaseIndex > "*CAST<uintptr_t*>(#{pointerExpr})" > end > def opcodeMemRef >- "*CAST<Opcode*>(#{pointerExpr})" >+ "*CAST<OpcodeID*>(#{pointerExpr})" > end > def dblMemRef > "*CAST<double*>(#{pointerExpr})" >@@ -1077,7 +1077,7 @@ class Instruction > # as an opcode dispatch. > when "cloopCallJSFunction" > uid = $asm.newUID >- $asm.putc "lr.opcode = getOpcode(llint_cloop_did_return_from_js_#{uid});" >+ $asm.putc "lr.opcode = llint_cloop_did_return_from_js_#{uid};" > $asm.putc "opcode = #{operands[0].clValue(:opcode)};" > $asm.putc "DISPATCH_OPCODE();" > $asm.putsLabel("llint_cloop_did_return_from_js_#{uid}", false) >diff --git a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb >index fff398255f678dd2db422de2491fb92a7b099c24..58fba225b550a35928998c4867db67ed7553c078 100644 >--- a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb >+++ b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb >@@ -37,11 +37,21 @@ require "transform" > IncludeFile.processIncludeOptions() > > inputFlnm = ARGV.shift >+settingsFlnm = ARGV.shift > outputFlnm = ARGV.shift > >+$stderr.puts settingsFlnm >+ > validBackends = canonicalizeBackendNames(ARGV.shift.split(/[,\s]+/)) > includeOnlyBackends(validBackends) > >+begin >+ configurationList = configurationIndices(settingsFlnm) >+rescue MissingMagicValuesException >+ $stderr.puts "offlineasm: No magic values found. Skipping assembly file generation." >+ exit 1 >+end >+ > def emitMagicNumber > OFFSET_MAGIC_NUMBERS.each { > | number | >@@ -49,7 +59,8 @@ def emitMagicNumber > } > end > >-inputHash = "// offlineasm input hash: #{parseHash(inputFlnm)} #{selfHash}" >+configurationHash = Digest::SHA1.hexdigest(configurationList.join(' ')) >+inputHash = "// offlineasm input hash: #{parseHash(inputFlnm)} #{configurationHash} #{selfHash}" > > if FileTest.exist? outputFlnm > File.open(outputFlnm, "r") { >@@ -71,13 +82,22 @@ originalAST = parse(inputFlnm) > # > > class Node >+ def offsetsPrune >+ mapChildren { >+ | child | >+ child.offsetsPrune >+ } >+ end >+ > def offsetsPruneTo(sequence) > children.each { > | child | > child.offsetsPruneTo(sequence) > } > end >- >+end >+ >+class Sequence > def offsetsPrune > result = Sequence.new(codeOrigin, []) > offsetsPruneTo(result) >@@ -86,10 +106,14 @@ class Node > end > > class IfThenElse >- def offsetsPruneTo(sequence) >+ def offsetsPrune > ifThenElse = IfThenElse.new(codeOrigin, predicate, thenCase.offsetsPrune) > ifThenElse.elseCase = elseCase.offsetsPrune >- sequence.list << ifThenElse >+ ifThenElse >+ end >+ >+ def offsetsPruneTo(sequence) >+ sequence.list << offsetsPrune > end > end > >@@ -111,59 +135,65 @@ class ConstExpr > end > end > >+class Macro >+ def offsetsPrune >+ self >+ end >+ >+ def offsetsPruneTo(sequence) >+ sequence.list << offsetsPrune >+ end >+end >+ >+class MacroCall >+ def offsetsPruneTo(sequence) >+ sequence.list << offsetsPrune >+ end >+end >+ >+ > prunedAST = originalAST.offsetsPrune >+settingsCombinations = computeSettingsCombinations(prunedAST) > > File.open(outputFlnm, "w") { > | outp | > $output = outp > outp.puts inputHash >- length = 0 >- >- emitCodeInAllConfigurations(prunedAST) { >- | settings, ast, backend, index | >- constsList = ast.filter(ConstExpr).uniq.sort > >- constsList.each_with_index { >- | const, index | >- outp.puts "constexpr int64_t constValue#{index} = static_cast<int64_t>(#{const.value});" >- } >- } >- >- emitCodeInAllConfigurations(prunedAST) { >- | settings, ast, backend, index | >- offsetsList = ast.filter(StructOffset).uniq.sort >- sizesList = ast.filter(Sizeof).uniq.sort >- constsList = ast.filter(ConstExpr).uniq.sort >- length += OFFSET_HEADER_MAGIC_NUMBERS.size + (OFFSET_MAGIC_NUMBERS.size + 1) * (1 + offsetsList.size + sizesList.size + constsList.size) >- } >- outp.puts "static const int64_t extractorTable[#{length}] = {" >- emitCodeInAllConfigurations(prunedAST) { >- | settings, ast, backend, index | >- OFFSET_HEADER_MAGIC_NUMBERS.each { >- | number | >- $output.puts "unsigned(#{number})," >- } >- >- offsetsList = ast.filter(StructOffset).uniq.sort >- sizesList = ast.filter(Sizeof).uniq.sort >- constsList = ast.filter(ConstExpr).uniq.sort >- >- emitMagicNumber >- outp.puts "#{index}," >- offsetsList.each { >- | offset | >- emitMagicNumber >- outp.puts "OFFLINE_ASM_OFFSETOF(#{offset.struct}, #{offset.field})," >- } >- sizesList.each { >- | sizeof | >- emitMagicNumber >- outp.puts "sizeof(#{sizeof.struct})," >- } >- constsList.each_index { >- | index | >- emitMagicNumber >- outp.puts "constValue#{index}," >+ outp.puts "static const int64_t extractorTable[] = {" >+ configurationList.each { >+ |configIndex| >+ forSettings(settingsCombinations[configIndex], prunedAST) { >+ |concreteSettings, lowLevelAST, backend| >+ emitCodeInConfiguration(concreteSettings, lowLevelAST, backend) { >+ OFFSET_HEADER_MAGIC_NUMBERS.each { >+ | number | >+ outp.puts "unsigned(#{number})," >+ } >+ >+ lowLevelAST = lowLevelAST.demacroify({}) >+ offsetsList = offsetsList(lowLevelAST) >+ sizesList = sizesList(lowLevelAST) >+ constsList = constsList(lowLevelAST) >+ >+ emitMagicNumber >+ outp.puts "#{configIndex}," >+ offsetsList.each { >+ | offset | >+ emitMagicNumber >+ outp.puts "OFFLINE_ASM_OFFSETOF(#{offset.struct}, #{offset.field})," >+ } >+ sizesList.each { >+ | sizeof | >+ emitMagicNumber >+ outp.puts "sizeof(#{sizeof.struct})," >+ } >+ constsList.each { >+ | const | >+ emitMagicNumber >+ outp.puts "static_cast<int64_t>(#{const.value})," >+ } >+ } > } > } > outp.puts "};" >diff --git a/Source/JavaScriptCore/offlineasm/generate_settings_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_settings_extractor.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..01c7dd5540e23d7da44c4b44ac718633b66db9a7 >--- /dev/null >+++ b/Source/JavaScriptCore/offlineasm/generate_settings_extractor.rb >@@ -0,0 +1,80 @@ >+#!/usr/bin/env ruby >+ >+# Copyright (C) 2011 Apple Inc. All rights reserved. >+# >+# Redistribution and use in source and binary forms, with or without >+# modification, are permitted provided that the following conditions >+# are met: >+# 1. Redistributions of source code must retain the above copyright >+# notice, this list of conditions and the following disclaimer. >+# 2. Redistributions in binary form must reproduce the above copyright >+# notice, this list of conditions and the following disclaimer in the >+# documentation and/or other materials provided with the distribution. >+# >+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, >+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+# THE POSSIBILITY OF SUCH DAMAGE. >+ >+$: << File.dirname(__FILE__) >+ >+require "config" >+require "backends" >+require "digest/sha1" >+require "offsets" >+require "parser" >+require "self_hash" >+require "settings" >+require "transform" >+ >+IncludeFile.processIncludeOptions() >+ >+inputFlnm = ARGV.shift >+outputFlnm = ARGV.shift >+ >+validBackends = canonicalizeBackendNames(ARGV.shift.split(/[,\s]+/)) >+includeOnlyBackends(validBackends) >+ >+inputHash = "// offlineasm input hash: #{parseHash(inputFlnm)} #{selfHash}" >+ >+if FileTest.exist? outputFlnm >+ File.open(outputFlnm, "r") { >+ | inp | >+ firstLine = inp.gets >+ if firstLine and firstLine.chomp == inputHash >+ $stderr.puts "offlineasm: Nothing changed." >+ exit 0 >+ end >+ } >+end >+ >+originalAST = parse(inputFlnm) >+prunedAST = Sequence.new(originalAST.codeOrigin, originalAST.filter(Setting)) >+ >+File.open(outputFlnm, "w") { >+ | outp | >+ $output = outp >+ outp.puts inputHash >+ >+ settingsCombinations = computeSettingsCombinations(prunedAST) >+ length = settingsCombinations.size * (1 + OFFSET_HEADER_MAGIC_NUMBERS.size) >+ >+ outp.puts "static const int64_t extractorTable[#{length}] = {" >+ emitCodeInAllConfigurations(prunedAST) { >+ | settings, ast, backend, index | >+ OFFSET_HEADER_MAGIC_NUMBERS.each { >+ | number | >+ $output.puts "unsigned(#{number})," >+ } >+ outp.puts "#{index}," >+ } >+ outp.puts "};" >+ >+} >diff --git a/Source/JavaScriptCore/offlineasm/offsets.rb b/Source/JavaScriptCore/offlineasm/offsets.rb >index 2c1c1b773ca0a5875b574b62d711cd69cf527213..fe6b331f8d58835507a4891049bafcc84e64c42e 100644 >--- a/Source/JavaScriptCore/offlineasm/offsets.rb >+++ b/Source/JavaScriptCore/offlineasm/offsets.rb >@@ -56,65 +56,53 @@ def constsList(ast) > ast.filter(ConstExpr).uniq.sort > end > >-# >-# offsetsAndConfigurationIndex(ast, file) -> >-# [[offsets, index], ...] >-# >-# Parses the offsets from a file and returns a list of offsets and the >-# index of the configuration that is valid in this build target. >-# >- >-def offsetsAndConfigurationIndex(file) >- endiannessMarkerBytes = nil >- result = {} >- >- def readInt(endianness, bytes) >- if endianness == :little >- # Little endian >- number = (bytes[0] << 0 | >- bytes[1] << 8 | >- bytes[2] << 16 | >- bytes[3] << 24 | >- bytes[4] << 32 | >- bytes[5] << 40 | >- bytes[6] << 48 | >- bytes[7] << 56) >- else >- # Big endian >- number = (bytes[0] << 56 | >- bytes[1] << 48 | >- bytes[2] << 40 | >- bytes[3] << 32 | >- bytes[4] << 24 | >- bytes[5] << 16 | >- bytes[6] << 8 | >- bytes[7] << 0) >- end >- if number > 0x7fffffff_ffffffff >- number -= 1 << 64 >- end >- number >+def readInt(endianness, bytes) >+ if endianness == :little >+ # Little endian >+ number = (bytes[0] << 0 | >+ bytes[1] << 8 | >+ bytes[2] << 16 | >+ bytes[3] << 24 | >+ bytes[4] << 32 | >+ bytes[5] << 40 | >+ bytes[6] << 48 | >+ bytes[7] << 56) >+ else >+ # Big endian >+ number = (bytes[0] << 56 | >+ bytes[1] << 48 | >+ bytes[2] << 40 | >+ bytes[3] << 32 | >+ bytes[4] << 24 | >+ bytes[5] << 16 | >+ bytes[6] << 8 | >+ bytes[7] << 0) > end >- >- def prepareMagic(endianness, numbers) >- magicBytes = [] >- numbers.each { >- | number | >- currentBytes = [] >- 8.times { >- currentBytes << (number & 0xff) >- number >>= 8 >- } >- if endianness == :big >- currentBytes.reverse! >- end >- magicBytes += currentBytes >- } >- magicBytes >+ if number > 0x7fffffff_ffffffff >+ number -= 1 << 64 > end >- >+ number >+end >+ >+def prepareMagic(endianness, numbers) >+ magicBytes = [] >+ numbers.each { >+ | number | >+ currentBytes = [] >+ 8.times { >+ currentBytes << (number & 0xff) >+ number >>= 8 >+ } >+ if endianness == :big >+ currentBytes.reverse! >+ end >+ magicBytes += currentBytes >+ } >+ magicBytes >+end >+ >+def fileBytes(file) > fileBytes = [] >- > File.open(file, "rb") { > | inp | > loop { >@@ -123,36 +111,50 @@ def offsetsAndConfigurationIndex(file) > fileBytes << byte > } > } >- >- def sliceByteArrays(byteArray, pattern) >- result = [] >- lastSlicePoint = 0 >- (byteArray.length - pattern.length + 1).times { >- | index | >- foundOne = true >- pattern.length.times { >- | subIndex | >- if byteArray[index + subIndex] != pattern[subIndex] >- foundOne = false >- break >- end >- } >- if foundOne >- result << byteArray[lastSlicePoint...index] >- lastSlicePoint = index + pattern.length >+ fileBytes >+end >+ >+def sliceByteArrays(byteArray, pattern) >+ result = [] >+ lastSlicePoint = 0 >+ (byteArray.length - pattern.length + 1).times { >+ | index | >+ foundOne = true >+ pattern.length.times { >+ | subIndex | >+ if byteArray[index + subIndex] != pattern[subIndex] >+ foundOne = false >+ break > end > } >- >- result << byteArray[lastSlicePoint...(byteArray.length)] >- >- result >- end >- >+ if foundOne >+ result << byteArray[lastSlicePoint...index] >+ lastSlicePoint = index + pattern.length >+ end >+ } >+ >+ result << byteArray[lastSlicePoint...(byteArray.length)] >+ >+ result >+end >+ >+# >+# offsetsAndConfigurationIndex(ast, file) -> >+# [[offsets, index], ...] >+# >+# Parses the offsets from a file and returns a list of offsets and the >+# index of the configuration that is valid in this build target. >+# >+ >+def offsetsAndConfigurationIndex(file) >+ fileBytes = fileBytes(file) >+ result = {} >+ > [:little, :big].each { > | endianness | > headerMagicBytes = prepareMagic(endianness, OFFSET_HEADER_MAGIC_NUMBERS) > magicBytes = prepareMagic(endianness, OFFSET_MAGIC_NUMBERS) >- >+ > bigArray = sliceByteArrays(fileBytes, headerMagicBytes) > unless bigArray.size <= 1 > bigArray[1..-1].each { >@@ -168,9 +170,9 @@ def offsetsAndConfigurationIndex(file) > } > end > } >- >+ > raise MissingMagicValuesException unless result.length >= 1 >- >+ > # result is {index1=>offsets1, index2=>offsets2} but we want to return > # [[offsets1, index1], [offsets2, index2]]. > return result.map { >@@ -179,6 +181,28 @@ def offsetsAndConfigurationIndex(file) > } > end > >+def configurationIndices(file) >+ fileBytes = fileBytes(file) >+ result = [] >+ >+ [:little, :big].each { >+ | endianness | >+ headerMagicBytes = prepareMagic(endianness, OFFSET_HEADER_MAGIC_NUMBERS) >+ >+ bigArray = sliceByteArrays(fileBytes, headerMagicBytes) >+ unless bigArray.size <= 1 >+ bigArray[1..-1].each { >+ | configArray | >+ result << readInt(endianness, configArray) >+ } >+ end >+ } >+ >+ raise MissingMagicValuesException unless result.length >= 1 >+ >+ return result >+end >+ > # > # buildOffsetsMap(ast, extractedConstants) -> map > # >diff --git a/Source/JavaScriptCore/offlineasm/parser.rb b/Source/JavaScriptCore/offlineasm/parser.rb >index 3869e6c3fe1ed3c0a7deb0d62aa27736dc8b8adf..580743ade92e8e7c71e3f7a26852a667a999900a 100644 >--- a/Source/JavaScriptCore/offlineasm/parser.rb >+++ b/Source/JavaScriptCore/offlineasm/parser.rb >@@ -177,11 +177,11 @@ def lex(str, file) > end > result << Token.new(CodeOrigin.new(file, lineNumber), $&) > lineNumber += 1 >- when /\A[a-zA-Z]([a-zA-Z0-9_.]*)/ >+ when /\A[a-zA-Z%]([a-zA-Z0-9_.%]*)/ > result << Token.new(CodeOrigin.new(file, lineNumber), $&) > when /\A\.([a-zA-Z0-9_]*)/ > result << Token.new(CodeOrigin.new(file, lineNumber), $&) >- when /\A_([a-zA-Z0-9_]*)/ >+ when /\A_([a-zA-Z0-9_%]*)/ > result << Token.new(CodeOrigin.new(file, lineNumber), $&) > when /\A([ \t]+)/ > # whitespace, ignore >@@ -228,11 +228,11 @@ def isKeyword(token) > end > > def isIdentifier(token) >- token =~ /\A[a-zA-Z]([a-zA-Z0-9_.]*)\Z/ and not isKeyword(token) >+ token =~ /\A[a-zA-Z%]([a-zA-Z0-9_.%]*)\Z/ and not isKeyword(token) > end > > def isLabel(token) >- token =~ /\A_([a-zA-Z0-9_]*)\Z/ >+ token =~ /\A_([a-zA-Z0-9_%]*)\Z/ > end > > def isLocalLabel(token) >diff --git a/Source/JavaScriptCore/offlineasm/settings.rb b/Source/JavaScriptCore/offlineasm/settings.rb >index eec092584fecf441619bd0f87de9ffa08e797d05..0647c82bae8f53c22fd115ae27a31073fba53749 100644 >--- a/Source/JavaScriptCore/offlineasm/settings.rb >+++ b/Source/JavaScriptCore/offlineasm/settings.rb >@@ -90,7 +90,6 @@ end > > def forSettings(concreteSettings, ast) > # Check which architectures this combinator claims to support. >- numClaimedBackends = 0 > selectedBackend = nil > BACKENDS.each { > | backend | >diff --git a/Source/JavaScriptCore/offlineasm/transform.rb b/Source/JavaScriptCore/offlineasm/transform.rb >index 2a082555b74a9fc21b5570117f5537ec15affecf..203e2e07eab1e5bc3180bbcc6e06254c579643de 100644 >--- a/Source/JavaScriptCore/offlineasm/transform.rb >+++ b/Source/JavaScriptCore/offlineasm/transform.rb >@@ -118,7 +118,7 @@ class Node > child.demacroify(macros) > } > end >- >+ > def substitute(mapping) > mapChildren { > | child | >@@ -150,9 +150,16 @@ class Macro > end > end > >+ >+$concatenation = /%([a-zA-Z_]+)%/ > class Variable > def substitute(mapping) >- if mapping[self] >+ if @name =~ $concatenation >+ name = @name.gsub($concatenation) { |match| >+ Variable.forName(codeOrigin, match[1...-1]).substitute(mapping).dump >+ } >+ Variable.forName(codeOrigin, name) >+ elsif mapping[self] > mapping[self] > else > self >@@ -160,6 +167,19 @@ class Variable > end > end > >+class ConstExpr >+ def substitute(mapping) >+ if @value =~ $concatenation >+ value = @value.gsub($concatenation) { |match| >+ Variable.forName(codeOrigin, match[1...-1]).substitute(mapping).dump >+ } >+ ConstExpr.forName(codeOrigin, value) >+ else >+ self >+ end >+ end >+end >+ > class LocalLabel > def substituteLabels(mapping) > if mapping[self] >@@ -215,7 +235,7 @@ class Sequence > mapping = {} > myMyMacros = myMacros.dup > raise "Could not find macro #{item.name} at #{item.codeOriginString}" unless myMacros[item.name] >- raise "Argument count mismatch for call to #{item.name} at #{item.codeOriginString}" unless item.operands.size == myMacros[item.name].variables.size >+ raise "Argument count mismatch for call to #{item.name} at #{item.codeOriginString} (expected #{myMacros[item.name].variables.size} but got #{item.operands.size} arguments for macro #{item.name} defined at #{myMacros[item.name].codeOrigin})" unless item.operands.size == myMacros[item.name].variables.size > item.operands.size.times { > | idx | > if item.operands[idx].is_a? Variable and myMacros[item.operands[idx].name] >@@ -520,3 +540,102 @@ class Skip > end > end > >+ >+# >+# node.commuteMacros >+# >+# bring up macros from inside if statements >+# >+ >+class Node >+ def commuteMacros >+ mapChildren { >+ | child | >+ child.commuteMacros >+ } >+ end >+ def splitMacros >+ [self, []] >+ end >+end >+ >+class Sequence >+ def splitMacros >+ macros, children = flattenChildren.partition { |c| c.is_a? Macro } >+ left = children.empty? ? Skip.new(codeOrigin) : Sequence.new(codeOrigin, children) >+ [left, macros] >+ end >+ >+ def flattenSequences >+ Sequence.new codeOrigin, flattenChildren >+ end >+ >+ def flattenChildren >+ children.map do |c| >+ if c.is_a? Sequence >+ c.flattenChildren >+ else >+ [c] >+ end >+ end.flatten(1) >+ end >+end >+ >+class Macro >+ def injectIf(predicate) >+ ifThenElse = IfThenElse.new(codeOrigin, predicate, body) >+ body = Sequence.new(codeOrigin, [ifThenElse]) >+ Macro.new(codeOrigin, name, variables, body) >+ end >+ >+ def injectElse(predicate) >+ ifThenElse = IfThenElse.new(codeOrigin, predicate, Skip.new(codeOrigin)) >+ ifThenElse.elseCase = body >+ body = Sequence.new(codeOrigin, [ifThenElse]) >+ Macro.new(codeOrigin, name, variables, body) >+ end >+end >+ >+class IfThenElse >+ def commuteMacros >+ thenCase, thenMacros = @thenCase.commuteMacros.splitMacros >+ ifThenElse = IfThenElse.new(codeOrigin, @predicate, thenCase) >+ if @elseCase >+ ifThenElse.elseCase, elseMacros = @elseCase.commuteMacros.splitMacros >+ thenMacros.sort! { |a, b| a.name <=> b.name } >+ elseMacros.sort! { |a, b| a.name <=> b.name } >+ i = j = 0 >+ macros = [] >+ while i < thenMacros.length || j < elseMacros.length >+ if i < thenMacros.length && j < elseMacros.length && thenMacros[i].name == elseMacros[j].name >+ # assert(thenMacros[i].variables == elseMacros[j].variables) >+ macros << ifThenElse.injectIntoMacros(thenMacros[i], elseMacros[j]) >+ i += 1 >+ j += 1 >+ elsif j >= elseMacros.length || (i < thenMacros.length && thenMacros[i].name < elseMacros[j].name) >+ macros << thenMacros[i].injectIf(predicate) >+ i += 1 >+ else >+ macros << elseMacros[j].injectElse(predicate) >+ j += 1 >+ end >+ end >+ else >+ macros = thenMacros.map { |m| m.injectIf(@predicate) } >+ end >+ >+ unless ifThenElse.thenCase.empty? && ifThenElse.elseCase.empty? >+ macros << ifThenElse >+ end >+ >+ return Sequence.new(codeOrigin, macros) >+ end >+ >+ def injectIntoMacros(ifMacro, elseMacro) >+ # TODO: elseMacro.body[ifMacro.variables/elseMacro.variables] >+ ifThenElse = IfThenElse.new(codeOrigin, predicate, ifMacro.body) >+ ifThenElse.elseCase = elseMacro.body >+ body = Sequence.new(codeOrigin, [ifThenElse]) >+ Macro.new(codeOrigin, ifMacro.name, ifMacro.variables, body) >+ end >+end >diff --git a/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp b/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp >index 6e93ce810011618e8d4c8b80e670d83e8e18a129..f5054f8aa8fe08f49b8848ee5503900991620ae9 100644 >--- a/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp >+++ b/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp >@@ -55,7 +55,7 @@ BytecodeSequence::BytecodeSequence(CodeBlock* codeBlock) > for (unsigned bytecodeIndex = 0; bytecodeIndex < codeBlock->instructions().size();) { > out.reset(); > codeBlock->dumpBytecode(out, bytecodeIndex, statusMap); >- OpcodeID opcodeID = Interpreter::getOpcodeID(codeBlock->instructions()[bytecodeIndex].u.opcode); >+ OpcodeID opcodeID = codeBlock->instructions()[bytecodeIndex].u.opcode; > m_sequence.append(Bytecode(bytecodeIndex, opcodeID, out.toCString())); > bytecodeIndex += opcodeLength(opcodeID); > } >diff --git a/Source/JavaScriptCore/runtime/CommonSlowPaths.h b/Source/JavaScriptCore/runtime/CommonSlowPaths.h >index 1ece89592cd63118dd9b89f1b96bd008dd0ab5ed..52614165f35b63c3ad072ce01372f23390960346 100644 >--- a/Source/JavaScriptCore/runtime/CommonSlowPaths.h >+++ b/Source/JavaScriptCore/runtime/CommonSlowPaths.h >@@ -25,6 +25,7 @@ > > #pragma once > >+#include "BytecodeStructs.h" > #include "CodeBlock.h" > #include "CodeSpecializationKind.h" > #include "DirectArguments.h" >@@ -114,11 +115,12 @@ inline bool opInByVal(ExecState* exec, JSValue baseVal, JSValue propName, ArrayP > } > > inline void tryCachePutToScopeGlobal( >- ExecState* exec, CodeBlock* codeBlock, Instruction* pc, JSObject* scope, >- GetPutInfo getPutInfo, PutPropertySlot& slot, const Identifier& ident) >+ ExecState* exec, CodeBlock* codeBlock, OpPutToScope& op, JSObject* scope, >+ PutPropertySlot& slot, const Identifier& ident) > { > // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time. >- ResolveType resolveType = getPutInfo.resolveType(); >+ auto& metadata = op.metadata(exec); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); > if (resolveType != GlobalProperty && resolveType != GlobalPropertyWithVarInjectionChecks > && resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) > return; >@@ -127,18 +129,17 @@ inline void tryCachePutToScopeGlobal( > if (scope->isGlobalObject()) { > ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalProperty : GlobalPropertyWithVarInjectionChecks; > resolveType = newResolveType; >- getPutInfo = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()); > ConcurrentJSLocker locker(codeBlock->m_lock); >- pc[4].u.operand = getPutInfo.operand(); >+ metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); > } else if (scope->isGlobalLexicalEnvironment()) { > JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(scope); > ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalLexicalVar : GlobalLexicalVarWithVarInjectionChecks; >- pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand(); >+ metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); > SymbolTableEntry entry = globalLexicalEnvironment->symbolTable()->get(ident.impl()); > ASSERT(!entry.isNull()); > ConcurrentJSLocker locker(codeBlock->m_lock); >- pc[5].u.watchpointSet = entry.watchpointSet(); >- pc[6].u.pointer = static_cast<void*>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot()); >+ metadata.watchpointSet = entry.watchpointSet(); >+ metadata.scopeOffset = globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot(); > } > } > >@@ -161,32 +162,32 @@ inline void tryCachePutToScopeGlobal( > scope->structure(vm)->didCachePropertyReplacement(vm, slot.cachedOffset()); > > ConcurrentJSLocker locker(codeBlock->m_lock); >- pc[5].u.structure.set(vm, codeBlock, scope->structure(vm)); >- pc[6].u.operand = slot.cachedOffset(); >+ metadata.structure.set(vm, codeBlock, scope->structure(vm)); >+ metadata.varOffset = slot.cachedOffset(); > } > } > > inline void tryCacheGetFromScopeGlobal( >- ExecState* exec, VM& vm, Instruction* pc, JSObject* scope, PropertySlot& slot, const Identifier& ident) >+ ExecState* exec, VM& vm, OpGetFromScope& op, JSObject* scope, PropertySlot& slot, const Identifier& ident) > { >- GetPutInfo getPutInfo(pc[4].u.operand); >- ResolveType resolveType = getPutInfo.resolveType(); >+ auto& metadata = op.metadata(exec); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); > > if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) { > if (scope->isGlobalObject()) { > ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalProperty : GlobalPropertyWithVarInjectionChecks; > resolveType = newResolveType; // Allow below caching mechanism to kick in. > ConcurrentJSLocker locker(exec->codeBlock()->m_lock); >- pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand(); >+ metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); > } else if (scope->isGlobalLexicalEnvironment()) { > JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(scope); > ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalLexicalVar : GlobalLexicalVarWithVarInjectionChecks; > SymbolTableEntry entry = globalLexicalEnvironment->symbolTable()->get(ident.impl()); > ASSERT(!entry.isNull()); > ConcurrentJSLocker locker(exec->codeBlock()->m_lock); >- pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand(); >- pc[5].u.watchpointSet = entry.watchpointSet(); >- pc[6].u.pointer = static_cast<void*>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot()); >+ metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); >+ metadata.watchpointSet = entry.watchpointSet(); >+ metadata.scopeOffset = globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot(); > } > } > >@@ -199,8 +200,8 @@ inline void tryCacheGetFromScopeGlobal( > Structure* structure = scope->structure(vm); > { > ConcurrentJSLocker locker(codeBlock->m_lock); >- pc[5].u.structure.set(vm, codeBlock, structure); >- pc[6].u.operand = slot.cachedOffset(); >+ metadata.structure.set(vm, codeBlock, structure); >+ metadata.varOffset = slot.cachedOffset(); > } > structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset()); > } >@@ -283,7 +284,7 @@ struct Instruction; > #define SLOW_PATH > > #define SLOW_PATH_DECL(name) \ >-extern "C" SlowPathReturnType SLOW_PATH name(ExecState* exec, Instruction* pc) >+extern "C" SlowPathReturnType SLOW_PATH name(ExecState* exec, const Instruction* pc) > > #define SLOW_PATH_HIDDEN_DECL(name) \ > SLOW_PATH_DECL(name) WTF_INTERNAL >diff --git a/Source/JavaScriptCore/wip_bytecode/README.md b/Source/JavaScriptCore/wip_bytecode/README.md >new file mode 100644 >index 0000000000000000000000000000000000000000..dfd11654f7b196b89392d674711c5a383a4b74ab >--- /dev/null >+++ b/Source/JavaScriptCore/wip_bytecode/README.md >@@ -0,0 +1,151 @@ >+# Bytecode format >+ >++--------------+ >+| header | >++==============+ >+| instruction0 | >++--------------+ >+| instruction1 | >++--------------+ >+| ... | >++--------------+ >+| instructionN | >++--------------+ >+ >+## Header >+ >++--------------+ >+|num_parameters| >++--------------+ >+| has_metadata | >++--------------+ >+| count_op1 | >++--------------+ >+| ... | >++--------------+ >+| count_opN | >++--------------+ >+| liveness | >++--------------+ >+| global_info | >++--------------+ >+| constants | >++--------------+ >+ >+* `has_metada` is a BitMap that indicates which opcodes need side table entries >+* `count_opI` is a varible length unsigned number that indicates how many entries are necessary for opcode I. >+ >+Given that we currently have < 256 opcodes, the BitMap should fit in 4 bytes. >+Of all opcodes, ~40 will currently ever need metadata, so that if the bytecode for any CodeBlock uses all of this opcodes, it would an extra 40~160b, depending on how many instances of each opcode appear in the bytecode. >+ >+## Instruction >+ >+Instructions have variable length, and have the form >+ >++-----------+------+-----+------+------------+ >+| opcode_id | arg0 | ... | argN | metadataID | >++-----------+------+-----+------+------------+ >+ >+where N <= 0 and metadataID is optional >+ >+### Narrow Instructions >+ >+By the default, we try to encode every instruction in a narrow setting, where every segment has 1-byte. However, we will fall back to a "Wide Instruction" whenever any of the arguments overflows, i.e.: >+ >+* opcode_id: we currently have 167 opcodes, so this won't be a problem for now but, hypothetically, any opcodes beyond id 256 will have to be encoded as a wide instruction. >+* arg: the type of the operand should never be ambiguous, therefore we support: >+ + up to 256 of each of the following: local registers, constants and arguments >+ + up to 8-byte types: we'll attempt to fit integers and unsigned integers in 8 bytes, otherwise fallback to a wide instruction. >+* up to 256 metadata entries per opcode, i.e. if an opcode has metadata, only 256 instances of the same opcode will fit into the same CodeBlock. >+ >+### Wide Instructions >+ >+Wide instructions have 4-byte segments, but otherwise indistinguishable from narrow instructions. >+ >+We reserve the first opcode to a trampoline that will evaluate the next instruction as a "Wide Instruction", where each segment of the instruction has 4 bytes. This opcode will also be responsible to guaranteeing 4-byte alignment on ARM. >+ >+## API >+ >+A class/struct will be generated for each opcode. The struct wil be responsible for: >+* Encoding, e.g. dumping the instruction into a binary format, and choosing between narrow or wide encoding >+* Providing access to each of the instruction's arguments and metadata >+* Potentially allow dumping the instruction, simplifying the work done by the BytecodeDumper >+ >+Here's what the API may look like for each of this operations, for e.g. the `op_get_argument` (this opcode should be a good example, since it has multiple argument types and metadata). Here is its current declaration (syntax may still change) >+ >+```ruby >+op :get_argument, >+ args: { >+ dst: :Register, >+ index: :unsigned, >+ }, >+ metadata: { >+ profile: :ValueProfile, >+ } >+``` >+ >+### Encoding >+ >+```cpp >+static void OpGetArgument::create(BytecodeGenerator& generator RegisterID* register, unsigned index); >+``` >+ >+ >+### Field Access >+ >+```cpp >+RegisterID OpGetArgument::dst(); >+unsigned OpGetArgument::index(); >+``` >+ >+### Metadata Acess >+```cpp >+ValueProfile* OpGetArgument::profile(ExecState&); >+``` >+ >+### BytecodeDumper >+ >+```cpp >+void OpGetArguments::dump(BytecodeDumper&); >+``` >+ >+### Decoding >+ >+Decoding should be done by the base instruction/reader class. >+ >+```cpp >+Instruction::Unknown* Instruction::read(UnlinkedInstructionStream::Reader&); >+``` >+ >+## "Linking" >+ >+Linking, in its current form, should no longer be necessary. Instead, it will consist of creating the side table for the bytecode metadata and ensuring that the jump table with the offset for each opcode has been initialized. >+ >+### Side table >+ >+A callee-saved register pointing to the current CodeBlock's can be kept at all times to speed up metadata accesses that are necessary specially for profiling. >+ >+### Jump table >+ >+A mapping from opcode IDs to opcode addresses is already generated in InitBytecodes.asm and loaded by LLIntData. >+ >+## Portability >+ >+Due to different alignment requirements, the bytecode should not portable across different platforms. >+Does enabling the JIT affect the bytecode? Possibly not, since it may only affect the metadata and not the bytecode itself, but TBC. >+ >+## Performance >+ >+Removing the linking step means that the interpreter will no longer be direct-threaded. Disabling COMPUTED_GOTO in CLoop (in order to disable direct threading) shows a 1% regression on PLT. >+ >+However, CLoop's fallback implementation is a switch statement, which affects branch prediction. >+ >+Alternatively, hacking JSC to skip replacing opcodes with their addresses during linking and modifying the dispatch macro in CLoop to fetch opcodes addresses shows a ~1% progression over CLoop with COMPUTED_GOTO enabled. >+ >+### get_by_id >+ >+`get_by_id` is the instruction that will require the most change, since we currently rewrite the bytecode stream to select from multiple implementations that share the same size. We can default to trying the most performance critical version of `get_by_id` first and fallback to loading the metadata field that specifies which version of the opcode should we execute. >+ >+# Current issues >+ >+Forward jumps will always generate wide opcodes: UINT_MAX is used as invalidLocation, which means that the address won't fit into a 1-byte operand. We might need to compact it later. >diff --git a/Source/WTF/wtf/Vector.h b/Source/WTF/wtf/Vector.h >index 306f46249d257eb365efd2cb439d5daebc2d38d9..e5d76423495d3cf4b4208e069aef1662df689fdf 100644 >--- a/Source/WTF/wtf/Vector.h >+++ b/Source/WTF/wtf/Vector.h >@@ -780,7 +780,7 @@ public: > > template<typename U> void insert(size_t position, const U*, size_t); > template<typename U> void insert(size_t position, U&&); >- template<typename U, size_t c> void insertVector(size_t position, const Vector<U, c>&); >+ template<typename U, size_t c, typename OH> void insertVector(size_t position, const Vector<U, c, OH>&); > > void remove(size_t position); > void remove(size_t position, size_t length); >@@ -1436,8 +1436,8 @@ inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insert(size > } > > template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> >-template<typename U, size_t c> >-inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insertVector(size_t position, const Vector<U, c>& val) >+template<typename U, size_t c, typename OH> >+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insertVector(size_t position, const Vector<U, c, OH>& val) > { > insert(position, val.begin(), val.size()); > }
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 187373
:
344389
|
344531
|
344635
|
344935
|
345812
|
346138
|
346673
|
346756
|
346862
|
347641
|
347766
|
348149
|
348294
|
348572
|
348792
|
348847
|
348971
|
349051
|
349080
|
349211
|
349307
|
349396
|
349473
|
349594
|
349700
|
349991
|
350040
|
350625
|
350716
|
350743
|
350835
|
350888
|
350987
|
351708
|
351743
|
351841
|
351955
|
351964
|
351995
|
352037
|
352050
|
352126
|
352232
|
352267
|
352268
|
352284
|
352287
|
352288
|
352312
|
352319
|
352322
|
352565
|
352580
|
352600
|
352639
|
352651
|
352664
|
352677
|
352680
|
352689
|
352692
|
352707
|
352719
|
352750
|
352806
|
352809
|
352811
|
352823
|
352843
|
352852
|
352853
|
352861
|
352863
|
352865
|
352866
|
352868
|
352913
|
352926
|
352936
|
352948
|
352981
|
352988
|
352993
|
352999
|
353008
|
353009
|
353033
|
353166
|
353170
|
353199
|
353213
|
353227
|
353235