WebKit Bugzilla
Attachment 350040 Details for
Bug 187373
: New bytecode format for JSC
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-187373-20180918124908.patch (text/plain), 1.22 MB, created by
Tadeu Zagallo
on 2018-09-18 12:49:21 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
Tadeu Zagallo
Created:
2018-09-18 12:49:21 PDT
Size:
1.22 MB
patch
obsolete
>Subversion Revision: 234092 >diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog >index ef79ffda4221f29db15ccadf6d983a72b0d87a86..ec2e4fef865dc31a635711d09568bbe54b4caddd 100644 >--- a/Source/JavaScriptCore/ChangeLog >+++ b/Source/JavaScriptCore/ChangeLog >@@ -1,3 +1,25 @@ >+2018-07-05 Tadeu Zagallo <tzagallo@apple.com> >+ >+ New bytecode format for JSC >+ https://bugs.webkit.org/show_bug.cgi?id=187373 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ Work in progress for the new bytecode format. For now, there's just a >+ handful of docs that I've experimenting with as to how should we >+ declare the opcodes, how should we generate the code and what the >+ generated code should look like. >+ >+ * wip_bytecode/README.md: Briefly documents the goals of for the new >+ bytecode and how it's going work. Still missing a lot of info though. >+ * wip_bytecode/bytecode_generator.rb: Some hacky ruby that I'm >+ considering using for the generating the C++ code for the opcodes >+ * wip_bytecode/bytecode_structs.cpp: Some hacky C++ experiments of >+ what could/should the API for the generated opcodes look like. >+ * wip_bytecode/opcodes.yaml: A list of all the opcodes, with names and >+ types for its arguments and metadata. No idea why it ended up being a >+ yaml file, but if all is well I'll migrate it to the ruby syntax above. >+ > 2018-07-22 Yusuke Suzuki <utatane.tea@gmail.com> > > [JSC] GetByIdVariant and InByIdVariant do not need slot base if they are not "hit" variants >diff --git a/Source/JavaScriptCore/CMakeLists.txt b/Source/JavaScriptCore/CMakeLists.txt >index 3691cf274ed190e3a2f7763bd807162f736c7bda..a39528c5a20ea6f255a18d71bc3a03533102ad82 100644 >--- a/Source/JavaScriptCore/CMakeLists.txt >+++ b/Source/JavaScriptCore/CMakeLists.txt >@@ -200,11 +200,29 @@ set(OFFLINE_ASM > offlineasm/x86.rb > ) > >+set(GENERATOR >+ generator/Argument.rb >+ generator/Assertion.rb >+ generator/DSL.rb >+ generator/Fits.rb >+ generator/GeneratedFile.rb >+ generator/Implementation.rb >+ generator/Interface.rb >+ generator/Metadata.rb >+ generator/Opcode.rb >+ generator/OpcodeGroup.rb >+ generator/Options.rb >+ generator/Section.rb >+ generator/Template.rb >+ generator/Type.rb >+ generator/main.rb >+) >+ > add_custom_command( > OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h >- MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generate-bytecode-files >- DEPENDS ${JAVASCRIPTCORE_DIR}/generate-bytecode-files bytecode/BytecodeList.json >- COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generate-bytecode-files --bytecodes_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h --init_bytecodes_asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm --bytecode_structs_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h ${JAVASCRIPTCORE_DIR}/bytecode/BytecodeList.json >+ MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generator/main.rb >+ DEPENDS ${GENERATOR} bytecode/BytecodeList.rb >+ COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generator/main.rb --bytecodes_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h --init_bytecodes_asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm --bytecode_structs_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h ${JAVASCRIPTCORE_DIR}/bytecode/BytecodeList.rb > VERBATIM) > > list(APPEND JavaScriptCore_HEADERS >diff --git a/Source/JavaScriptCore/DerivedSources.make b/Source/JavaScriptCore/DerivedSources.make >index d95cac50b5d6f567a8aeb87d5e390c8d88ff910f..1b161fe56f3ba78767772a9731aea8025e3b2055 100644 >--- a/Source/JavaScriptCore/DerivedSources.make >+++ b/Source/JavaScriptCore/DerivedSources.make >@@ -215,14 +215,8 @@ udis86_itab.h: $(JavaScriptCore)/disassembler/udis86/ud_itab.py $(JavaScriptCore > > # Bytecode files > >-Bytecodes.h: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json >- $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --bytecodes_h Bytecodes.h $(JavaScriptCore)/bytecode/BytecodeList.json >- >-BytecodeStructs.h: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json >- $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --bytecode_structs_h BytecodeStructs.h $(JavaScriptCore)/bytecode/BytecodeList.json >- >-InitBytecodes.asm: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json >- $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --init_bytecodes_asm InitBytecodes.asm $(JavaScriptCore)/bytecode/BytecodeList.json >+Bytecodes.h BytecodeStructs.h InitBytecodes.asm: $(wildcard $(JavaScriptCore)/generator/*.rb) $(JavaScriptCore)/bytecode/BytecodeList.rb >+ $(RUBY) $(JavaScriptCore)/generator/main.rb $(JavaScriptCore)/bytecode/BytecodeList.rb --bytecode_structs_h BytecodeStructs.h --init_bytecodes_asm InitBytecodes.asm --bytecodes_h Bytecodes.h > > # Inspector interfaces > >diff --git a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj >index 325df6e9eba0c8e84d216e81058c9810d5b310ca..3fab46e4534e9893bb36d3f8345e0c5cf325988c 100644 >--- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj >+++ b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj >@@ -14,11 +14,23 @@ > 0F4680AA14BA7FD900BFE272 /* Generate Derived Sources */, > ); > dependencies = ( >- 65442D5018EBB744007AF92F /* PBXTargetDependency */, >+ 14CC67C4213F0402009B26F0 /* PBXTargetDependency */, > ); > name = "LLInt Offsets"; > productName = "Derived Sources"; > }; >+ 14CC67A5213ECFE2009B26F0 /* LLInt Settings */ = { >+ isa = PBXAggregateTarget; >+ buildConfigurationList = 14CC67A9213ECFE2009B26F0 /* Build configuration list for PBXAggregateTarget "LLInt Settings" */; >+ buildPhases = ( >+ 14CC67A8213ECFE2009B26F0 /* Generate Derived Sources */, >+ ); >+ dependencies = ( >+ 14CC67A6213ECFE2009B26F0 /* PBXTargetDependency */, >+ ); >+ name = "LLInt Settings"; >+ productName = "Derived Sources"; >+ }; > 53B4BD041F68AF8900D2BEA3 /* Generate Unified Sources */ = { > isa = PBXAggregateTarget; > buildConfigurationList = 53B4BD0A1F68AF8900D2BEA3 /* Build configuration list for PBXAggregateTarget "Generate Unified Sources" */; >@@ -782,6 +794,7 @@ > 14874AE615EBDE4A002E3587 /* JSScope.h in Headers */ = {isa = PBXBuildFile; fileRef = 14874AE215EBDE4A002E3587 /* JSScope.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 148A7BF01B82975A002D9157 /* InlineCallFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = 148A7BEE1B82975A002D9157 /* InlineCallFrame.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 148CD1D8108CF902008163C6 /* JSContextRefPrivate.h in Headers */ = {isa = PBXBuildFile; fileRef = 148CD1D7108CF902008163C6 /* JSContextRefPrivate.h */; settings = {ATTRIBUTES = (Private, ); }; }; >+ 1498CAD6214BF36D00710879 /* GetByIdMetadata.h in Headers */ = {isa = PBXBuildFile; fileRef = 1498CAD5214BF36D00710879 /* GetByIdMetadata.h */; }; > 14A1563210966365006FA260 /* DateInstanceCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 14A1563010966365006FA260 /* DateInstanceCache.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14AB66761DECF40900A56C26 /* UnlinkedSourceCode.h in Headers */ = {isa = PBXBuildFile; fileRef = 14AB66751DECF40900A56C26 /* UnlinkedSourceCode.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14AD910C1DCA92940014F9FE /* EvalCodeBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = 14AD91061DCA92940014F9FE /* EvalCodeBlock.h */; }; >@@ -804,6 +817,7 @@ > 14BFCE6910CDB1FC00364CCE /* WeakGCMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 14BFCE6810CDB1FC00364CCE /* WeakGCMap.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14CA958B16AB50DE00938A06 /* StaticPropertyAnalyzer.h in Headers */ = {isa = PBXBuildFile; fileRef = 14CA958A16AB50DE00938A06 /* StaticPropertyAnalyzer.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14CA958D16AB50FA00938A06 /* ObjectAllocationProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 14CA958C16AB50FA00938A06 /* ObjectAllocationProfile.h */; settings = {ATTRIBUTES = (Private, ); }; }; >+ 14CC67C2213ED58F009B26F0 /* LLIntSettingsExtractor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14CC67C0213ED588009B26F0 /* LLIntSettingsExtractor.cpp */; }; > 14D2F3DB139F4BE200491031 /* MarkedSpace.h in Headers */ = {isa = PBXBuildFile; fileRef = 14D2F3D9139F4BE200491031 /* MarkedSpace.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14DF04DA16B3996D0016A513 /* StaticPropertyAnalysis.h in Headers */ = {isa = PBXBuildFile; fileRef = 14DF04D916B3996D0016A513 /* StaticPropertyAnalysis.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 14E84F9F14EE1ACC00D6D5D4 /* WeakBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = 14E84F9A14EE1ACC00D6D5D4 /* WeakBlock.h */; settings = {ATTRIBUTES = (Private, ); }; }; >@@ -1235,7 +1249,6 @@ > 969A072B0ED1CE6900F1F681 /* RegisterID.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07280ED1CE6900F1F681 /* RegisterID.h */; }; > 969A07970ED1D3AE00F1F681 /* CodeBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07910ED1D3AE00F1F681 /* CodeBlock.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 969A07980ED1D3AE00F1F681 /* DirectEvalCodeCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07920ED1D3AE00F1F681 /* DirectEvalCodeCache.h */; settings = {ATTRIBUTES = (Private, ); }; }; >- 969A07990ED1D3AE00F1F681 /* Instruction.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07930ED1D3AE00F1F681 /* Instruction.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 969A079B0ED1D3AE00F1F681 /* Opcode.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07950ED1D3AE00F1F681 /* Opcode.h */; }; > 978801411471AD920041B016 /* JSDateMath.h in Headers */ = {isa = PBXBuildFile; fileRef = 9788FC231471AD0C0068CE2D /* JSDateMath.h */; settings = {ATTRIBUTES = (Private, ); }; }; > 981ED82328234D91BAECCADE /* MachineContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 28806E21155E478A93FA7B02 /* MachineContext.h */; settings = {ATTRIBUTES = (Private, ); }; }; >@@ -1835,6 +1848,27 @@ > remoteGlobalIDString = 0F4680A914BA7FD900BFE272; > remoteInfo = "LLInt Offsets"; > }; >+ 14CC67A7213ECFE2009B26F0 /* PBXContainerItemProxy */ = { >+ isa = PBXContainerItemProxy; >+ containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >+ proxyType = 1; >+ remoteGlobalIDString = 65FB3F6609D11E9100F49DEB; >+ remoteInfo = "Derived Sources"; >+ }; >+ 14CC67BE213ED459009B26F0 /* PBXContainerItemProxy */ = { >+ isa = PBXContainerItemProxy; >+ containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >+ proxyType = 1; >+ remoteGlobalIDString = 14CC67A5213ECFE2009B26F0; >+ remoteInfo = "LLInt Settings"; >+ }; >+ 14CC67C3213F0402009B26F0 /* PBXContainerItemProxy */ = { >+ isa = PBXContainerItemProxy; >+ containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >+ proxyType = 1; >+ remoteGlobalIDString = 14CC67B0213ED20C009B26F0; >+ remoteInfo = JSCLLIntSettingsExtractor; >+ }; > 53B4BD131F68C2AA00D2BEA3 /* PBXContainerItemProxy */ = { > isa = PBXContainerItemProxy; > containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >@@ -1884,13 +1918,6 @@ > remoteGlobalIDString = 65FB3F6609D11E9100F49DEB; > remoteInfo = "Derived Sources"; > }; >- 65442D4F18EBB744007AF92F /* PBXContainerItemProxy */ = { >- isa = PBXContainerItemProxy; >- containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >- proxyType = 1; >- remoteGlobalIDString = 65FB3F6609D11E9100F49DEB; >- remoteInfo = "Derived Sources"; >- }; > 65788A9F18B409EB00C189FF /* PBXContainerItemProxy */ = { > isa = PBXContainerItemProxy; > containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; >@@ -2260,7 +2287,7 @@ > 0F37308E1C0CD68500052BFA /* DisallowMacroScratchRegisterUsage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DisallowMacroScratchRegisterUsage.h; sourceTree = "<group>"; }; > 0F3730901C0CD70C00052BFA /* AllowMacroScratchRegisterUsage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AllowMacroScratchRegisterUsage.h; sourceTree = "<group>"; }; > 0F3730921C0D67EE00052BFA /* AirUseCounts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AirUseCounts.h; path = b3/air/AirUseCounts.h; sourceTree = "<group>"; }; >- 0F38B00F17CF077F00B144D3 /* LLIntEntrypoint.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntEntrypoint.cpp; path = llint/LLIntEntrypoint.cpp; sourceTree = "<group>"; }; >+ 0F38B00F17CF077F00B144D3 /* LLIntDesiredSettings.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = LLIntDesiredSettings.h; path = LLIntOffsets/LLIntDesiredSettings.h; sourceTree = BUILT_PRODUCTS_DIR; }; > 0F38B01017CF077F00B144D3 /* LLIntEntrypoint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = LLIntEntrypoint.h; path = llint/LLIntEntrypoint.h; sourceTree = "<group>"; }; > 0F38B01317CFE75500B144D3 /* DFGCompilationKey.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGCompilationKey.cpp; path = dfg/DFGCompilationKey.cpp; sourceTree = "<group>"; }; > 0F38B01417CFE75500B144D3 /* DFGCompilationKey.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCompilationKey.h; path = dfg/DFGCompilationKey.h; sourceTree = "<group>"; }; >@@ -3118,6 +3145,9 @@ > 147341DF1DC2CE9600AA29BA /* ProgramExecutable.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ProgramExecutable.cpp; sourceTree = "<group>"; }; > 147341E01DC2CE9600AA29BA /* ScriptExecutable.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ScriptExecutable.cpp; sourceTree = "<group>"; }; > 147341E91DC2CF2500AA29BA /* ExecutableBase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ExecutableBase.cpp; sourceTree = "<group>"; }; >+ 14788EE221501AF700A561C8 /* ProfileTypeBytecodeFlag.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ProfileTypeBytecodeFlag.cpp; sourceTree = "<group>"; }; >+ 14788EE421501B2800A561C8 /* JSType.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSType.cpp; sourceTree = "<group>"; }; >+ 14788EE521501B2900A561C8 /* GetPutInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GetPutInfo.cpp; sourceTree = "<group>"; }; > 147B83AA0E6DB8C9004775A4 /* BatchedTransitionOptimizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BatchedTransitionOptimizer.h; sourceTree = "<group>"; }; > 147B84620E6DE6B1004775A4 /* PutPropertySlot.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PutPropertySlot.h; sourceTree = "<group>"; }; > 1480DB9B0DDC227F003CFDF2 /* DebuggerCallFrame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DebuggerCallFrame.h; sourceTree = "<group>"; }; >@@ -3133,6 +3163,8 @@ > 148A7BEE1B82975A002D9157 /* InlineCallFrame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InlineCallFrame.h; sourceTree = "<group>"; }; > 148CD1D7108CF902008163C6 /* JSContextRefPrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSContextRefPrivate.h; sourceTree = "<group>"; }; > 149559ED0DDCDDF700648087 /* DebuggerCallFrame.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DebuggerCallFrame.cpp; sourceTree = "<group>"; }; >+ 1498CAD3214656C400710879 /* libWTF.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; path = libWTF.a; sourceTree = BUILT_PRODUCTS_DIR; }; >+ 1498CAD5214BF36D00710879 /* GetByIdMetadata.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GetByIdMetadata.h; sourceTree = "<group>"; }; > 149B24FF0D8AF6D1009CB8C7 /* Register.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Register.h; sourceTree = "<group>"; }; > 149DAAF212EB559D0083B12B /* ConservativeRoots.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConservativeRoots.h; sourceTree = "<group>"; }; > 14A1563010966365006FA260 /* DateInstanceCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DateInstanceCache.h; sourceTree = "<group>"; }; >@@ -3163,6 +3195,10 @@ > 14AD912B1DCAAAB00014F9FE /* UnlinkedFunctionCodeBlock.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = UnlinkedFunctionCodeBlock.cpp; sourceTree = "<group>"; }; > 14B7233F12D7D0DA003BD5ED /* MachineStackMarker.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MachineStackMarker.cpp; sourceTree = "<group>"; }; > 14B7234012D7D0DA003BD5ED /* MachineStackMarker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MachineStackMarker.h; sourceTree = "<group>"; }; >+ 14BA774F211085F0008D0B05 /* Fits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Fits.h; sourceTree = "<group>"; }; >+ 14BA7750211085F0008D0B05 /* Instruction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Instruction.h; sourceTree = "<group>"; }; >+ 14BA7751211086A0008D0B05 /* BytecodeList.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = BytecodeList.rb; sourceTree = "<group>"; }; >+ 14BA7752211A8E5F008D0B05 /* ProfileTypeBytecodeFlag.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ProfileTypeBytecodeFlag.h; sourceTree = "<group>"; }; > 14BA78F013AAB88F005B7C2C /* SlotVisitor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SlotVisitor.h; sourceTree = "<group>"; }; > 14BA7A9513AADFF8005B7C2C /* Heap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Heap.cpp; sourceTree = "<group>"; }; > 14BA7A9613AADFF8005B7C2C /* Heap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Heap.h; sourceTree = "<group>"; }; >@@ -3175,6 +3211,11 @@ > 14BFCE6810CDB1FC00364CCE /* WeakGCMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WeakGCMap.h; sourceTree = "<group>"; }; > 14CA958A16AB50DE00938A06 /* StaticPropertyAnalyzer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StaticPropertyAnalyzer.h; sourceTree = "<group>"; }; > 14CA958C16AB50FA00938A06 /* ObjectAllocationProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ObjectAllocationProfile.h; sourceTree = "<group>"; }; >+ 14CC3BA0213756B0002D58B6 /* DumpValue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DumpValue.h; sourceTree = "<group>"; }; >+ 14CC3BA12138A238002D58B6 /* InstructionStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = InstructionStream.cpp; sourceTree = "<group>"; }; >+ 14CC3BA22138A238002D58B6 /* InstructionStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InstructionStream.h; sourceTree = "<group>"; }; >+ 14CC67BD213ED20C009B26F0 /* JSCLLIntSettingsExtractor */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = JSCLLIntSettingsExtractor; sourceTree = BUILT_PRODUCTS_DIR; }; >+ 14CC67C0213ED588009B26F0 /* LLIntSettingsExtractor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntSettingsExtractor.cpp; path = llint/LLIntSettingsExtractor.cpp; sourceTree = "<group>"; }; > 14D2F3D8139F4BE200491031 /* MarkedSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MarkedSpace.cpp; sourceTree = "<group>"; }; > 14D2F3D9139F4BE200491031 /* MarkedSpace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MarkedSpace.h; sourceTree = "<group>"; }; > 14D792640DAA03FB001A9F05 /* CLoopStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CLoopStack.h; sourceTree = "<group>"; }; >@@ -3542,8 +3583,6 @@ > 6511230514046A4C002B101D /* testRegExp */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = testRegExp; sourceTree = BUILT_PRODUCTS_DIR; }; > 6514F21718B3E1670098FF8B /* Bytecodes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Bytecodes.h; sourceTree = "<group>"; }; > 6514F21818B3E1670098FF8B /* InitBytecodes.asm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm.asm; path = InitBytecodes.asm; sourceTree = "<group>"; }; >- 6529FB3018B2D63900C61102 /* generate-bytecode-files */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = "generate-bytecode-files"; sourceTree = "<group>"; }; >- 6529FB3118B2D99900C61102 /* BytecodeList.json */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = BytecodeList.json; sourceTree = "<group>"; }; > 652A3A201651C66100A80AFE /* ARM64Disassembler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = ARM64Disassembler.cpp; path = disassembler/ARM64Disassembler.cpp; sourceTree = "<group>"; }; > 652A3A221651C69700A80AFE /* A64DOpcode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = A64DOpcode.cpp; path = disassembler/ARM64/A64DOpcode.cpp; sourceTree = "<group>"; }; > 652A3A231651C69700A80AFE /* A64DOpcode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = A64DOpcode.h; path = disassembler/ARM64/A64DOpcode.h; sourceTree = "<group>"; }; >@@ -3891,7 +3930,6 @@ > 969A07900ED1D3AE00F1F681 /* CodeBlock.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CodeBlock.cpp; sourceTree = "<group>"; }; > 969A07910ED1D3AE00F1F681 /* CodeBlock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeBlock.h; sourceTree = "<group>"; }; > 969A07920ED1D3AE00F1F681 /* DirectEvalCodeCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DirectEvalCodeCache.h; sourceTree = "<group>"; }; >- 969A07930ED1D3AE00F1F681 /* Instruction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Instruction.h; sourceTree = "<group>"; }; > 969A07940ED1D3AE00F1F681 /* Opcode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Opcode.cpp; sourceTree = "<group>"; }; > 969A07950ED1D3AE00F1F681 /* Opcode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Opcode.h; sourceTree = "<group>"; }; > 969A09220ED1E09C00F1F681 /* Completion.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Completion.cpp; sourceTree = "<group>"; }; >@@ -4374,8 +4412,6 @@ > ADE802961E08F1C90058DE78 /* WebAssemblyLinkErrorPrototype.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = WebAssemblyLinkErrorPrototype.cpp; path = js/WebAssemblyLinkErrorPrototype.cpp; sourceTree = "<group>"; }; > ADE802971E08F1C90058DE78 /* WebAssemblyLinkErrorPrototype.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = WebAssemblyLinkErrorPrototype.h; path = js/WebAssemblyLinkErrorPrototype.h; sourceTree = "<group>"; }; > ADE8029D1E08F2260058DE78 /* WebAssemblyLinkErrorConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = WebAssemblyLinkErrorConstructor.cpp; path = js/WebAssemblyLinkErrorConstructor.cpp; sourceTree = "<group>"; }; >- B59F89371891AD3300D5CCDC /* UnlinkedInstructionStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnlinkedInstructionStream.h; sourceTree = "<group>"; }; >- B59F89381891ADB500D5CCDC /* UnlinkedInstructionStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = UnlinkedInstructionStream.cpp; sourceTree = "<group>"; }; > BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = ToolExecutable.xcconfig; sourceTree = "<group>"; }; > BC02E9040E1839DB000F9297 /* ErrorConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorConstructor.cpp; sourceTree = "<group>"; }; > BC02E9050E1839DB000F9297 /* ErrorConstructor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ErrorConstructor.h; sourceTree = "<group>"; }; >@@ -4848,6 +4884,13 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > }; >+ 14CC67B7213ED20C009B26F0 /* Frameworks */ = { >+ isa = PBXFrameworksBuildPhase; >+ buildActionMask = 2147483647; >+ files = ( >+ ); >+ runOnlyForDeploymentPostprocessing = 0; >+ }; > 651122FC14046A4C002B101D /* Frameworks */ = { > isa = PBXFrameworksBuildPhase; > buildActionMask = 2147483647; >@@ -4907,6 +4950,7 @@ > 0F9327591C20BCBA00CF6564 /* dynbench */, > 932F5BE10822A1C700736975 /* jsc */, > 0FF922CF14F46B130041A24E /* JSCLLIntOffsetsExtractor */, >+ 14CC67BD213ED20C009B26F0 /* JSCLLIntSettingsExtractor */, > 141211200A48793C00480255 /* minidom */, > 0F6183431C45F62A0072450B /* testair */, > 14BD59BF0A3E8F9000BAF59C /* testapi */, >@@ -4931,7 +4975,6 @@ > F692A8540255597D01FF60F7 /* create_hash_table */, > 937B63CC09E766D200A671DD /* DerivedSources.make */, > 0F93275A1C20BCDF00CF6564 /* dynbench.cpp */, >- 6529FB3018B2D63900C61102 /* generate-bytecode-files */, > F5C290E60284F98E018635CA /* JavaScriptCorePrefix.h */, > 45E12D8806A49B0F00E9DF84 /* jsc.cpp */, > A7C225CC139981F100FF1662 /* KeywordLookupGenerator.py */, >@@ -4950,6 +4993,7 @@ > E3FF752D1D9CE9EA00C7E16D /* domjit */, > 0867D69AFE84028FC02AAC07 /* Frameworks */, > 0FEA09FC1705137F00BB722C /* ftl */, >+ 14BA774C211085A0008D0B05 /* generator */, > 142E312A134FF0A600AFADB5 /* heap */, > A5BA15DF1823409200A82E69 /* inspector */, > 1429D77A0ED20D7300B89619 /* interpreter */, >@@ -4979,6 +5023,7 @@ > 5D5D8AD00E0D0EBE00F9C692 /* libedit.dylib */, > 9322A00306C341D3009067BB /* libicucore.dylib */, > 51F0EC0705C86C9A00E6DF1B /* libobjc.dylib */, >+ 1498CAD3214656C400710879 /* libWTF.a */, > A8A4748D151A8306004123FF /* libWTF.a */, > 371D842C17C98B6E00ECF994 /* libz.dylib */, > A5098B031C16AA0200087797 /* Security.framework */, >@@ -4997,7 +5042,7 @@ > 0F4680CE14BBB3D100BFE272 /* LLIntData.cpp */, > 0F4680CF14BBB3D100BFE272 /* LLIntData.h */, > 5DDDF44614FEE72200B4FB4D /* LLIntDesiredOffsets.h */, >- 0F38B00F17CF077F00B144D3 /* LLIntEntrypoint.cpp */, >+ 0F38B00F17CF077F00B144D3 /* LLIntDesiredSettings.h */, > 0F38B01017CF077F00B144D3 /* LLIntEntrypoint.h */, > 0F46809D14BA7F8200BFE272 /* LLIntExceptions.cpp */, > 0F46809E14BA7F8200BFE272 /* LLIntExceptions.h */, >@@ -5005,6 +5050,7 @@ > 0F4680A114BA7F8200BFE272 /* LLIntOffsetsExtractor.cpp */, > FED287B115EC9A5700DA8161 /* LLIntOpcode.h */, > 79CFC6EF1C33B10000C768EA /* LLIntPCRanges.h */, >+ 14CC67C0213ED588009B26F0 /* LLIntSettingsExtractor.cpp */, > 0F46809F14BA7F8200BFE272 /* LLIntSlowPaths.cpp */, > 0F4680A014BA7F8200BFE272 /* LLIntSlowPaths.h */, > 0F0B839714BCF45A00885B4F /* LLIntThunks.cpp */, >@@ -5951,6 +5997,24 @@ > path = debugger; > sourceTree = "<group>"; > }; >+ 14BA774C211085A0008D0B05 /* generator */ = { >+ isa = PBXGroup; >+ children = ( >+ 14BA774D211085DE008D0B05 /* runtime */, >+ ); >+ path = generator; >+ sourceTree = "<group>"; >+ }; >+ 14BA774D211085DE008D0B05 /* runtime */ = { >+ isa = PBXGroup; >+ children = ( >+ 14CC3BA0213756B0002D58B6 /* DumpValue.h */, >+ 14BA774F211085F0008D0B05 /* Fits.h */, >+ 14BA7750211085F0008D0B05 /* Instruction.h */, >+ ); >+ path = runtime; >+ sourceTree = "<group>"; >+ }; > 1C90513E0BA9E8830081E9D0 /* Configurations */ = { > isa = PBXGroup; > children = ( >@@ -6357,6 +6421,8 @@ > 969A07270ED1CE6900F1F681 /* Label.h */, > 960097A50EBABB58007A7297 /* LabelScope.h */, > 655EB29A10CE2581001A990E /* NodesCodegen.cpp */, >+ 14788EE221501AF700A561C8 /* ProfileTypeBytecodeFlag.cpp */, >+ 14BA7752211A8E5F008D0B05 /* ProfileTypeBytecodeFlag.h */, > 969A07280ED1CE6900F1F681 /* RegisterID.h */, > 14DF04D916B3996D0016A513 /* StaticPropertyAnalysis.h */, > 14CA958A16AB50DE00938A06 /* StaticPropertyAnalyzer.h */, >@@ -6585,6 +6651,7 @@ > 0FE050131AA9091100D33B33 /* GenericOffset.h */, > 0F2B66B217B6B5AB00A7AE3F /* GenericTypedArrayView.h */, > 0F2B66B317B6B5AB00A7AE3F /* GenericTypedArrayViewInlines.h */, >+ 14788EE521501B2900A561C8 /* GetPutInfo.cpp */, > 796465681B952FF0003059EE /* GetPutInfo.h */, > BC02E9B80E184545000F9297 /* GetterSetter.cpp */, > BC337BDE0E1AF0B80076918A /* GetterSetter.h */, >@@ -6807,6 +6874,7 @@ > 0F919D0A157EE09D004A4E7D /* JSSymbolTableObject.h */, > 70ECA6001AFDBEA200449739 /* JSTemplateObjectDescriptor.cpp */, > 70ECA6011AFDBEA200449739 /* JSTemplateObjectDescriptor.h */, >+ 14788EE421501B2800A561C8 /* JSType.cpp */, > 14ABB454099C2A0F00E2A24F /* JSType.h */, > 0F2B66CC17B6B5AB00A7AE3F /* JSTypedArrayConstructors.cpp */, > 0F2B66CD17B6B5AB00A7AE3F /* JSTypedArrayConstructors.h */, >@@ -7580,7 +7648,7 @@ > 7094C4DC1AE439530041A2EE /* BytecodeIntrinsicRegistry.cpp */, > 7094C4DD1AE439530041A2EE /* BytecodeIntrinsicRegistry.h */, > 0F2DD80A1AB3D85800BBB8E8 /* BytecodeKills.h */, >- 6529FB3118B2D99900C61102 /* BytecodeList.json */, >+ 14BA7751211086A0008D0B05 /* BytecodeList.rb */, > C2FCAE0E17A9C24E0034C735 /* BytecodeLivenessAnalysis.cpp */, > C2FCAE0F17A9C24E0034C735 /* BytecodeLivenessAnalysis.h */, > 0F666EBE183566F900D017F1 /* BytecodeLivenessAnalysisInlines.h */, >@@ -7643,6 +7711,7 @@ > AD4252501E5D0DEB009D2A97 /* FullCodeOrigin.h */, > 14AD91161DCA97FD0014F9FE /* FunctionCodeBlock.cpp */, > 14AD91071DCA92940014F9FE /* FunctionCodeBlock.h */, >+ 1498CAD5214BF36D00710879 /* GetByIdMetadata.h */, > 0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */, > 0F93329614CA7DC10085F3C6 /* GetByIdStatus.h */, > 0F0332C118B01763005F979A /* GetByIdVariant.cpp */, >@@ -7667,7 +7736,8 @@ > 0FB399BB20AF6B2A0017E213 /* InstanceOfStatus.h */, > 0FB399BC20AF6B2A0017E213 /* InstanceOfVariant.cpp */, > 0FB399B920AF6B2A0017E213 /* InstanceOfVariant.h */, >- 969A07930ED1D3AE00F1F681 /* Instruction.h */, >+ 14CC3BA12138A238002D58B6 /* InstructionStream.cpp */, >+ 14CC3BA22138A238002D58B6 /* InstructionStream.h */, > 53F6BF6C1C3F060A00F41E5D /* InternalFunctionAllocationProfile.h */, > BCFD8C900EEB2EE700283848 /* JumpTable.cpp */, > BCFD8C910EEB2EE700283848 /* JumpTable.h */, >@@ -7746,8 +7816,6 @@ > 14AD91211DCA9FA40014F9FE /* UnlinkedFunctionExecutable.h */, > 14142E501B796ECE00F4BF4B /* UnlinkedFunctionExecutable.h */, > 14AD911C1DCA9FA40014F9FE /* UnlinkedGlobalCodeBlock.h */, >- B59F89381891ADB500D5CCDC /* UnlinkedInstructionStream.cpp */, >- B59F89371891AD3300D5CCDC /* UnlinkedInstructionStream.h */, > 14AD912A1DCAAAB00014F9FE /* UnlinkedModuleProgramCodeBlock.cpp */, > 14AD911F1DCA9FA40014F9FE /* UnlinkedModuleProgramCodeBlock.h */, > 14AD91291DCAAAB00014F9FE /* UnlinkedProgramCodeBlock.cpp */, >@@ -8387,7 +8455,6 @@ > 53D444DC1DAF08AB00B92784 /* B3WasmAddressValue.h in Headers */, > 5341FC721DAC343C00E7E4D7 /* B3WasmBoundsCheckValue.h in Headers */, > 0F2C63B21E60AE4700C13839 /* B3Width.h in Headers */, >- 0F44A7B220BF68CE0022B171 /* ICStatusMap.h in Headers */, > 52678F8F1A031009006A306D /* BasicBlockLocation.h in Headers */, > 147B83AC0E6DB8C9004775A4 /* BatchedTransitionOptimizer.h in Headers */, > 86976E5F1FA3E8BC00E7C4E1 /* BigIntConstructor.h in Headers */, >@@ -8608,7 +8675,6 @@ > 86EC9DC61328DF82002B2AD7 /* DFGGenerationInfo.h in Headers */, > 86EC9DC81328DF82002B2AD7 /* DFGGraph.h in Headers */, > 0F2FCCFA18A60070001A27F8 /* DFGGraphSafepoint.h in Headers */, >- 0F44A7B120BF68C90022B171 /* ExitingInlineKind.h in Headers */, > 0FB17661196B8F9E0091052A /* DFGHeapLocation.h in Headers */, > 0FC841691BA8C3210061837D /* DFGInferredTypeCheck.h in Headers */, > 0FB14E211812570B009B6B4D /* DFGInlineCacheWrapper.h in Headers */, >@@ -8752,6 +8818,8 @@ > 14142E531B796EDD00F4BF4B /* ExecutableInfo.h in Headers */, > 0F60FE901FFC37020003320A /* ExecutableToCodeBlockEdge.h in Headers */, > 0F56A1D315000F35002992B1 /* ExecutionCounter.h in Headers */, >+ 0F44A7B020BF68620022B171 /* ExitFlag.h in Headers */, >+ 0F44A7B120BF68C90022B171 /* ExitingInlineKind.h in Headers */, > 0F3AC754188E5EC80032029F /* ExitingJITType.h in Headers */, > 0FB105861675481200F8AB6E /* ExitKind.h in Headers */, > 0F0B83AB14BCF5BB00885B4F /* ExpressionRangeInfo.h in Headers */, >@@ -8759,7 +8827,6 @@ > A7A8AF3817ADB5F3005AB174 /* Float32Array.h in Headers */, > A7A8AF3917ADB5F3005AB174 /* Float64Array.h in Headers */, > 0F24E54317EA9F5900ABB217 /* FPRInfo.h in Headers */, >- 0F44A7B320BF68D10022B171 /* RecordedStatuses.h in Headers */, > E34EDBF71DB5FFC900DC87A5 /* FrameTracers.h in Headers */, > 0F5513A61D5A682C00C32BD8 /* FreeList.h in Headers */, > 0F6585E11EE0805A0095176D /* FreeListInlines.h in Headers */, >@@ -8868,6 +8935,7 @@ > 0FE0501B1AA9091100D33B33 /* GenericOffset.h in Headers */, > 0F2B66E017B6B5AB00A7AE3F /* GenericTypedArrayView.h in Headers */, > 0F2B66E117B6B5AB00A7AE3F /* GenericTypedArrayViewInlines.h in Headers */, >+ 1498CAD6214BF36D00710879 /* GetByIdMetadata.h in Headers */, > 0F9332A014CA7DCD0085F3C6 /* GetByIdStatus.h in Headers */, > 0F0332C418B01763005F979A /* GetByIdVariant.h in Headers */, > 7964656A1B952FF0003059EE /* GetPutInfo.h in Headers */, >@@ -8899,6 +8967,7 @@ > FE1BD0251E72053800134BC9 /* HeapVerifier.h in Headers */, > 0F4680D514BBD24B00BFE272 /* HostCallReturnValue.h in Headers */, > DC2143071CA32E55000A8869 /* ICStats.h in Headers */, >+ 0F44A7B220BF68CE0022B171 /* ICStatusMap.h in Headers */, > 0FB399BE20AF6B3D0017E213 /* ICStatusUtils.h in Headers */, > BC18C40F0E16F5CD00B34460 /* Identifier.h in Headers */, > 8606DDEA18DA44AB00A383D0 /* IdentifierInlines.h in Headers */, >@@ -8949,7 +9018,6 @@ > 0F49E9AA20AB4D00001CA0AA /* InstanceOfAccessCase.h in Headers */, > 0FB399BF20AF6B3F0017E213 /* InstanceOfStatus.h in Headers */, > 0FB399C020AF6B430017E213 /* InstanceOfVariant.h in Headers */, >- 969A07990ED1D3AE00F1F681 /* Instruction.h in Headers */, > A7A8AF3B17ADB5F3005AB174 /* Int16Array.h in Headers */, > A7A8AF3C17ADB5F3005AB174 /* Int32Array.h in Headers */, > A7A8AF3A17ADB5F3005AB174 /* Int8Array.h in Headers */, >@@ -9109,7 +9177,6 @@ > 7013CA8C1B491A9400CAE613 /* JSJob.h in Headers */, > BC18C4160E16F5CD00B34460 /* JSLexicalEnvironment.h in Headers */, > BC18C4230E16F5CD00B34460 /* JSLock.h in Headers */, >- 0F44A7B020BF68620022B171 /* ExitFlag.h in Headers */, > C25D709C16DE99F400FCA6BC /* JSManagedValue.h in Headers */, > 2A4BB7F318A41179008A0FCD /* JSManagedValueInternal.h in Headers */, > A700874217CBE8EB00C3E643 /* JSMap.h in Headers */, >@@ -9366,6 +9433,7 @@ > 0F0CD4C215F1A6070032F1C0 /* PutDirectIndexMode.h in Headers */, > 0F9FC8C514E1B60400D52AE0 /* PutKind.h in Headers */, > 147B84630E6DE6B1004775A4 /* PutPropertySlot.h in Headers */, >+ 0F44A7B320BF68D10022B171 /* RecordedStatuses.h in Headers */, > 0FF60AC216740F8300029779 /* ReduceWhitespace.h in Headers */, > E33637A61B63220200EE0840 /* ReflectObject.h in Headers */, > 996B73231BDA08EF00331B84 /* ReflectObject.lut.h in Headers */, >@@ -9425,7 +9493,6 @@ > A7299DA217D12848005F5FF9 /* SetPrototype.h in Headers */, > 0FEE98411A8865B700754E93 /* SetupVarargsFrame.h in Headers */, > DC17E8181C9C91D9008A6AB3 /* ShadowChicken.h in Headers */, >- 0F44A7B420BF68D90022B171 /* TerminatedCodeOrigin.h in Headers */, > DC17E8191C9C91DB008A6AB3 /* ShadowChickenInlines.h in Headers */, > FE3022D31E3D73A500BAC493 /* SigillCrashAnalyzer.h in Headers */, > 0F4D8C781FCA3CFA001D32AC /* SimpleMarkingConstraint.h in Headers */, >@@ -9480,6 +9547,7 @@ > 0F766D3915AE4A1F008F363E /* StructureStubClearingWatchpoint.h in Headers */, > BCCF0D080EF0AAB900413C8F /* StructureStubInfo.h in Headers */, > BC9041480EB9250900FE26FA /* StructureTransitionTable.h in Headers */, >+ 0F44767020C5E2B4008B2C36 /* StubInfoSummary.h in Headers */, > 0F7DF1371E2970E10095951B /* Subspace.h in Headers */, > 0F7DF1381E2970E40095951B /* SubspaceInlines.h in Headers */, > 0F4A38FA1C8E13DF00190318 /* SuperSampler.h in Headers */, >@@ -9498,6 +9566,7 @@ > DC7997831CDE9FA0004D4A09 /* TagRegistersMode.h in Headers */, > 70ECA6091AFDBEA200449739 /* TemplateObjectDescriptor.h in Headers */, > 0F24E54F17EE274900ABB217 /* TempRegisterSet.h in Headers */, >+ 0F44A7B420BF68D90022B171 /* TerminatedCodeOrigin.h in Headers */, > 0FA2C17C17D7CF84009D015F /* TestRunnerUtils.h in Headers */, > FE3422121D6B81C30032BE88 /* ThrowScope.h in Headers */, > 0F572D4F16879FDD00E57FBD /* ThunkGenerator.h in Headers */, >@@ -9581,7 +9650,6 @@ > AD5B416F1EBAFB77008EFA43 /* WasmName.h in Headers */, > AD7B4B2E1FA3E29800C9DF79 /* WasmNameSection.h in Headers */, > ADD8FA461EB3079700DF542F /* WasmNameSectionParser.h in Headers */, >- 0F44767020C5E2B4008B2C36 /* StubInfoSummary.h in Headers */, > 5311BD4B1EA581E500525281 /* WasmOMGPlan.h in Headers */, > 53C6FEEF1E8ADFA900B18425 /* WasmOpcodeOrigin.h in Headers */, > 53B4BD121F68B32500D2BEA3 /* WasmOps.h in Headers */, >@@ -9768,6 +9836,24 @@ > productReference = 14BD59BF0A3E8F9000BAF59C /* testapi */; > productType = "com.apple.product-type.tool"; > }; >+ 14CC67B0213ED20C009B26F0 /* JSCLLIntSettingsExtractor */ = { >+ isa = PBXNativeTarget; >+ buildConfigurationList = 14CC67B8213ED20C009B26F0 /* Build configuration list for PBXNativeTarget "JSCLLIntSettingsExtractor" */; >+ buildPhases = ( >+ 14CC67B5213ED20C009B26F0 /* Sources */, >+ 14CC67B7213ED20C009B26F0 /* Frameworks */, >+ ); >+ buildRules = ( >+ ); >+ dependencies = ( >+ 14CC67BF213ED459009B26F0 /* PBXTargetDependency */, >+ ); >+ name = JSCLLIntSettingsExtractor; >+ productInstallPath = /usr/local/bin; >+ productName = jsc; >+ productReference = 14CC67BD213ED20C009B26F0 /* JSCLLIntSettingsExtractor */; >+ productType = "com.apple.product-type.tool"; >+ }; > 651122F714046A4C002B101D /* testRegExp */ = { > isa = PBXNativeTarget; > buildConfigurationList = 6511230014046A4C002B101D /* Build configuration list for PBXNativeTarget "testRegExp" */; >@@ -9888,10 +9974,12 @@ > projectDirPath = ""; > projectRoot = ""; > targets = ( >- 932F5BE30822A1C700736975 /* All */, > 932F5B3E0822A1C700736975 /* JavaScriptCore */, >- 0F4680A914BA7FD900BFE272 /* LLInt Offsets */, >+ 932F5BE30822A1C700736975 /* All */, > 65FB3F6609D11E9100F49DEB /* Derived Sources */, >+ 14CC67A5213ECFE2009B26F0 /* LLInt Settings */, >+ 14CC67B0213ED20C009B26F0 /* JSCLLIntSettingsExtractor */, >+ 0F4680A914BA7FD900BFE272 /* LLInt Offsets */, > 0FF922C314F46B130041A24E /* JSCLLIntOffsetsExtractor */, > 65788A9D18B409EB00C189FF /* Offline Assembler */, > 53B4BD041F68AF8900D2BEA3 /* Generate Unified Sources */, >@@ -9933,7 +10021,21 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > shellPath = /bin/sh; >- shellScript = "set -e\n\nmkdir -p \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/\"\n\n/usr/bin/env ruby \"${SRCROOT}/offlineasm/generate_offset_extractor.rb\" \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" \"${SRCROOT}/llint/LowLevelInterpreter.asm\" \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/LLIntDesiredOffsets.h\" \"${ARCHS} C_LOOP\"\n"; >+ shellScript = "set -e\n\nmkdir -p \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/\"\n\n/usr/bin/env ruby \"${SRCROOT}/offlineasm/generate_offset_extractor.rb\" \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" \"${SRCROOT}/llint/LowLevelInterpreter.asm\" \"${BUILT_PRODUCTS_DIR}/JSCLLIntSettingsExtractor\" \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/LLIntDesiredOffsets.h\" \"${ARCHS} C_LOOP\"\n"; >+ }; >+ 14CC67A8213ECFE2009B26F0 /* Generate Derived Sources */ = { >+ isa = PBXShellScriptBuildPhase; >+ buildActionMask = 2147483647; >+ files = ( >+ ); >+ inputPaths = ( >+ ); >+ name = "Generate Derived Sources"; >+ outputPaths = ( >+ ); >+ runOnlyForDeploymentPostprocessing = 0; >+ shellPath = /bin/sh; >+ shellScript = "set -e\n\nmkdir -p \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/\"\n\n/usr/bin/env ruby \"${SRCROOT}/offlineasm/generate_settings_extractor.rb\" \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" \"${SRCROOT}/llint/LowLevelInterpreter.asm\" \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/LLIntDesiredSettings.h\" \"${ARCHS} C_LOOP\"\n"; > }; > 1A02D9A81B34A882000D1522 /* Add Symlink in /System/Library/PrivateFrameworks */ = { > isa = PBXShellScriptBuildPhase; >@@ -9982,7 +10084,7 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > shellPath = /bin/sh; >- shellScript = "exec ${SRCROOT}/postprocess-headers.sh"; >+ shellScript = "exec ${SRCROOT}/postprocess-headers.sh\n"; > }; > 374F95C9205F9975002BF68F /* Make libWTF.a Symbolic Link */ = { > isa = PBXShellScriptBuildPhase; >@@ -10103,7 +10205,7 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > shellPath = /bin/sh; >- shellScript = "if [[ \"${ACTION}\" == \"installhdrs\" ]]; then\n exit 0\nfi\n\ncd \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"\n\n/usr/bin/env ruby JavaScriptCore/offlineasm/asm.rb \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" JavaScriptCore/llint/LowLevelInterpreter.asm \"${BUILT_PRODUCTS_DIR}/JSCLLIntOffsetsExtractor\" LLIntAssembly.h || exit 1"; >+ shellScript = "if [[ \"${ACTION}\" == \"installhdrs\" ]]; then\n exit 0\nfi\n\ncd \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"\n\n/usr/bin/env ruby JavaScriptCore/offlineasm/asm.rb \"-I${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\" JavaScriptCore/llint/LowLevelInterpreter.asm \"${BUILT_PRODUCTS_DIR}/JSCLLIntOffsetsExtractor\" LLIntAssembly.h || exit 1\n"; > }; > 65FB3F6509D11E9100F49DEB /* Generate Derived Sources */ = { > isa = PBXShellScriptBuildPhase; >@@ -10206,6 +10308,14 @@ > ); > runOnlyForDeploymentPostprocessing = 0; > }; >+ 14CC67B5213ED20C009B26F0 /* Sources */ = { >+ isa = PBXSourcesBuildPhase; >+ buildActionMask = 2147483647; >+ files = ( >+ 14CC67C2213ED58F009B26F0 /* LLIntSettingsExtractor.cpp in Sources */, >+ ); >+ runOnlyForDeploymentPostprocessing = 0; >+ }; > 651122FA14046A4C002B101D /* Sources */ = { > isa = PBXSourcesBuildPhase; > buildActionMask = 2147483647; >@@ -10426,6 +10536,21 @@ > target = 0F4680A914BA7FD900BFE272 /* LLInt Offsets */; > targetProxy = 0FF922D214F46B2F0041A24E /* PBXContainerItemProxy */; > }; >+ 14CC67A6213ECFE2009B26F0 /* PBXTargetDependency */ = { >+ isa = PBXTargetDependency; >+ target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; >+ targetProxy = 14CC67A7213ECFE2009B26F0 /* PBXContainerItemProxy */; >+ }; >+ 14CC67BF213ED459009B26F0 /* PBXTargetDependency */ = { >+ isa = PBXTargetDependency; >+ target = 14CC67A5213ECFE2009B26F0 /* LLInt Settings */; >+ targetProxy = 14CC67BE213ED459009B26F0 /* PBXContainerItemProxy */; >+ }; >+ 14CC67C4213F0402009B26F0 /* PBXTargetDependency */ = { >+ isa = PBXTargetDependency; >+ target = 14CC67B0213ED20C009B26F0 /* JSCLLIntSettingsExtractor */; >+ targetProxy = 14CC67C3213F0402009B26F0 /* PBXContainerItemProxy */; >+ }; > 53B4BD141F68C2AA00D2BEA3 /* PBXTargetDependency */ = { > isa = PBXTargetDependency; > target = 53B4BD041F68AF8900D2BEA3 /* Generate Unified Sources */; >@@ -10461,11 +10586,6 @@ > target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; > targetProxy = 65244BD218ECB5000010B708 /* PBXContainerItemProxy */; > }; >- 65442D5018EBB744007AF92F /* PBXTargetDependency */ = { >- isa = PBXTargetDependency; >- target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; >- targetProxy = 65442D4F18EBB744007AF92F /* PBXContainerItemProxy */; >- }; > 65788A9E18B409EB00C189FF /* PBXTargetDependency */ = { > isa = PBXTargetDependency; > target = 0FF922C314F46B130041A24E /* JSCLLIntOffsetsExtractor */; >@@ -10796,6 +10916,86 @@ > }; > name = Production; > }; >+ 14CC67AA213ECFE2009B26F0 /* Debug */ = { >+ isa = XCBuildConfiguration; >+ buildSettings = { >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Debug; >+ }; >+ 14CC67AB213ECFE2009B26F0 /* Release */ = { >+ isa = XCBuildConfiguration; >+ buildSettings = { >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Release; >+ }; >+ 14CC67AC213ECFE2009B26F0 /* Profiling */ = { >+ isa = XCBuildConfiguration; >+ buildSettings = { >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Profiling; >+ }; >+ 14CC67AD213ECFE2009B26F0 /* Production */ = { >+ isa = XCBuildConfiguration; >+ buildSettings = { >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Production; >+ }; >+ 14CC67B9213ED20C009B26F0 /* Debug */ = { >+ isa = XCBuildConfiguration; >+ baseConfigurationReference = BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */; >+ buildSettings = { >+ HEADER_SEARCH_PATHS = ( >+ "\"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"", >+ "\"$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders\"", >+ "$(inherited)", >+ ); >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Debug; >+ }; >+ 14CC67BA213ED20C009B26F0 /* Release */ = { >+ isa = XCBuildConfiguration; >+ baseConfigurationReference = BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */; >+ buildSettings = { >+ HEADER_SEARCH_PATHS = ( >+ "\"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"", >+ "\"$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders\"", >+ "$(inherited)", >+ ); >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Release; >+ }; >+ 14CC67BB213ED20C009B26F0 /* Profiling */ = { >+ isa = XCBuildConfiguration; >+ baseConfigurationReference = BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */; >+ buildSettings = { >+ HEADER_SEARCH_PATHS = ( >+ "\"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"", >+ "\"$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders\"", >+ "$(inherited)", >+ ); >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Profiling; >+ }; >+ 14CC67BC213ED20C009B26F0 /* Production */ = { >+ isa = XCBuildConfiguration; >+ baseConfigurationReference = BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */; >+ buildSettings = { >+ HEADER_SEARCH_PATHS = ( >+ "\"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"", >+ "\"$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders\"", >+ "$(inherited)", >+ ); >+ PRODUCT_NAME = "$(TARGET_NAME)"; >+ }; >+ name = Production; >+ }; > 53B4BD0B1F68AF8900D2BEA3 /* Debug */ = { > isa = XCBuildConfiguration; > buildSettings = { >@@ -11179,6 +11379,28 @@ > defaultConfigurationIsVisible = 0; > defaultConfigurationName = Production; > }; >+ 14CC67A9213ECFE2009B26F0 /* Build configuration list for PBXAggregateTarget "LLInt Settings" */ = { >+ isa = XCConfigurationList; >+ buildConfigurations = ( >+ 14CC67AA213ECFE2009B26F0 /* Debug */, >+ 14CC67AB213ECFE2009B26F0 /* Release */, >+ 14CC67AC213ECFE2009B26F0 /* Profiling */, >+ 14CC67AD213ECFE2009B26F0 /* Production */, >+ ); >+ defaultConfigurationIsVisible = 0; >+ defaultConfigurationName = Production; >+ }; >+ 14CC67B8213ED20C009B26F0 /* Build configuration list for PBXNativeTarget "JSCLLIntSettingsExtractor" */ = { >+ isa = XCConfigurationList; >+ buildConfigurations = ( >+ 14CC67B9213ED20C009B26F0 /* Debug */, >+ 14CC67BA213ED20C009B26F0 /* Release */, >+ 14CC67BB213ED20C009B26F0 /* Profiling */, >+ 14CC67BC213ED20C009B26F0 /* Production */, >+ ); >+ defaultConfigurationIsVisible = 0; >+ defaultConfigurationName = Production; >+ }; > 53B4BD0A1F68AF8900D2BEA3 /* Build configuration list for PBXAggregateTarget "Generate Unified Sources" */ = { > isa = XCConfigurationList; > buildConfigurations = ( >diff --git a/Source/JavaScriptCore/Sources.txt b/Source/JavaScriptCore/Sources.txt >index 76dfee251b23da282f380d522ebe2a60c4cc81eb..854dedac7407fc836775648094dd0c5c67a17e2d 100644 >--- a/Source/JavaScriptCore/Sources.txt >+++ b/Source/JavaScriptCore/Sources.txt >@@ -231,6 +231,7 @@ bytecode/InlineCallFrameSet.cpp > bytecode/InstanceOfAccessCase.cpp > bytecode/InstanceOfStatus.cpp > bytecode/InstanceOfVariant.cpp >+bytecode/InstructionStream.cpp > bytecode/IntrinsicGetterAccessCase.cpp > bytecode/JumpTable.cpp > bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp >@@ -266,7 +267,6 @@ bytecode/UnlinkedCodeBlock.cpp > bytecode/UnlinkedEvalCodeBlock.cpp > bytecode/UnlinkedFunctionCodeBlock.cpp > bytecode/UnlinkedFunctionExecutable.cpp >-bytecode/UnlinkedInstructionStream.cpp > bytecode/UnlinkedModuleProgramCodeBlock.cpp > bytecode/UnlinkedProgramCodeBlock.cpp > bytecode/ValueRecovery.cpp >@@ -276,6 +276,7 @@ bytecode/Watchpoint.cpp > > bytecompiler/BytecodeGenerator.cpp > bytecompiler/NodesCodegen.cpp >+bytecompiler/ProfileTypeBytecodeFlag.cpp > > debugger/Debugger.cpp > debugger/DebuggerCallFrame.cpp >@@ -757,6 +758,7 @@ runtime/FunctionRareData.cpp > runtime/GeneratorFunctionConstructor.cpp > runtime/GeneratorFunctionPrototype.cpp > runtime/GeneratorPrototype.cpp >+runtime/GetPutInfo.cpp > runtime/GetterSetter.cpp > runtime/HashMapImpl.cpp > runtime/Identifier.cpp >@@ -850,6 +852,7 @@ runtime/JSStringJoiner.cpp > runtime/JSStringHeapCellType.cpp > runtime/JSSymbolTableObject.cpp > runtime/JSTemplateObjectDescriptor.cpp >+runtime/JSType.cpp > runtime/JSTypedArrayConstructors.cpp > runtime/JSTypedArrayPrototypes.cpp > runtime/JSTypedArrayViewConstructor.cpp >diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h >index fac8b86bc1bc92645eb03e718890e8180cb11c11..83b49bb61c24c2b0ad401772678858c9efda1bb9 100644 >--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h >+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h >@@ -176,7 +176,7 @@ class ReturnAddressPtr { > public: > ReturnAddressPtr() { } > >- explicit ReturnAddressPtr(void* value) >+ explicit ReturnAddressPtr(const void* value) > : m_value(value) > { > PoisonedMasmPtr::assertIsNotPoisoned(m_value); >@@ -191,7 +191,7 @@ public: > ASSERT_VALID_CODE_POINTER(m_value); > } > >- void* value() const >+ const void* value() const > { > PoisonedMasmPtr::assertIsNotPoisoned(m_value); > return m_value; >@@ -203,7 +203,7 @@ public: > } > > private: >- void* m_value { nullptr }; >+ const void* m_value { nullptr }; > }; > > // MacroAssemblerCodePtr: >@@ -222,10 +222,10 @@ public: > MacroAssemblerCodePtr() = default; > MacroAssemblerCodePtr(std::nullptr_t) : m_value(nullptr) { } > >- explicit MacroAssemblerCodePtr(void* value) >+ explicit MacroAssemblerCodePtr(const void* value) > #if CPU(ARM_THUMB2) > // Decorate the pointer as a thumb code pointer. >- : m_value(reinterpret_cast<char*>(value) + 1) >+ : m_value(reinterpret_cast<const char*>(value) + 1) > #else > : m_value(value) > #endif >@@ -239,7 +239,7 @@ public: > ASSERT_VALID_CODE_POINTER(m_value.unpoisoned()); > } > >- static MacroAssemblerCodePtr createFromExecutableAddress(void* value) >+ static MacroAssemblerCodePtr createFromExecutableAddress(const void* value) > { > ASSERT(value); > ASSERT_VALID_CODE_POINTER(value); >diff --git a/Source/JavaScriptCore/bytecode/ArithProfile.h b/Source/JavaScriptCore/bytecode/ArithProfile.h >index 40fad1be3baa43486e98a03cd04d554876ed4dc7..011f8b9b225131893b72b173432f7a3701fa2ce5 100644 >--- a/Source/JavaScriptCore/bytecode/ArithProfile.h >+++ b/Source/JavaScriptCore/bytecode/ArithProfile.h >@@ -102,6 +102,11 @@ public: > ASSERT(lhsObservedType().isEmpty()); > ASSERT(rhsObservedType().isEmpty()); > } >+ >+ ArithProfile(OperandTypes types) >+ : ArithProfile(types.first(), types.second()) >+ { } >+ > ArithProfile() = default; > > static ArithProfile fromInt(uint32_t bits) >diff --git a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h >index c233deb7a24114553257a9ab8fc874e1e8709a6f..fef936e257888ed22ea20f8b18f9d9c7b13265f3 100644 >--- a/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h >+++ b/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h >@@ -32,6 +32,13 @@ namespace JSC { > > class ArrayAllocationProfile { > public: >+ ArrayAllocationProfile() = default; >+ >+ ArrayAllocationProfile(IndexingType recommendedIndexingMode) >+ { >+ initializeIndexingMode(recommendedIndexingMode); >+ } >+ > IndexingType selectIndexingType() > { > JSArray* lastArray = m_lastArray; >diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp >index a4397b5441f1c2acc6b62a6f0271a2292847e365..b29ad35afa2a4fd050ab2e3e9a6e2f0553a64848 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp >@@ -39,7 +39,7 @@ void BytecodeBasicBlock::shrinkToFit() > m_successors.shrinkToFit(); > } > >-static bool isJumpTarget(OpcodeID opcodeID, const Vector<unsigned, 32>& jumpTargets, unsigned bytecodeOffset) >+static bool isJumpTarget(OpcodeID opcodeID, const Vector<InstructionStream::Offset, 32>& jumpTargets, unsigned bytecodeOffset) > { > if (opcodeID == op_catch) > return true; >@@ -47,11 +47,11 @@ static bool isJumpTarget(OpcodeID opcodeID, const Vector<unsigned, 32>& jumpTarg > return std::binary_search(jumpTargets.begin(), jumpTargets.end(), bytecodeOffset); > } > >-template<typename Block, typename Instruction> >-void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) >+template<typename Block> >+void BytecodeBasicBlock::computeImpl(Block* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) > { >- Vector<unsigned, 32> jumpTargets; >- computePreciseJumpTargets(codeBlock, instructionsBegin, instructionCount, jumpTargets); >+ Vector<InstructionStream::Offset, 32> jumpTargets; >+ computePreciseJumpTargets(codeBlock, instructions, jumpTargets); > > auto appendBlock = [&] (std::unique_ptr<BytecodeBasicBlock>&& block) { > block->m_index = basicBlocks.size(); >@@ -66,7 +66,7 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > basicBlocks.reserveCapacity(jumpTargets.size() + 2); > > auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); >- auto firstBlock = std::make_unique<BytecodeBasicBlock>(0, 0); >+ auto firstBlock = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); > linkBlocks(entry.get(), firstBlock.get()); > > appendBlock(WTFMove(entry)); >@@ -77,19 +77,18 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > > bool nextInstructionIsLeader = false; > >- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- unsigned opcodeLength = opcodeLengths[opcodeID]; >+ for (const auto& instruction : instructions) { >+ auto bytecodeOffset = instruction.offset(); >+ OpcodeID opcodeID = instruction->opcodeID(); > > bool createdBlock = false; > // If the current bytecode is a jump target, then it's the leader of its own basic block. > if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) { >- auto newBlock = std::make_unique<BytecodeBasicBlock>(bytecodeOffset, opcodeLength); >+ auto newBlock = std::make_unique<BytecodeBasicBlock>(instruction); > current = newBlock.get(); > appendBlock(WTFMove(newBlock)); > createdBlock = true; > nextInstructionIsLeader = false; >- bytecodeOffset += opcodeLength; > } > > // If the current bytecode is a branch or a return, then the next instruction is the leader of its own basic block. >@@ -100,8 +99,7 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > continue; > > // Otherwise, just add to the length of the current block. >- current->addLength(opcodeLength); >- bytecodeOffset += opcodeLength; >+ current->addLength(instruction->size()); > } > > // Link basic blocks together. >@@ -111,24 +109,27 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > if (block->isEntryBlock() || block->isExitBlock()) > continue; > >- bool fallsThrough = true; >- for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- unsigned opcodeLength = opcodeLengths[opcodeID]; >+ bool fallsThrough = true; >+ for (auto bytecodeOffset : block->offsets()) { >+ auto instruction = instructions.at(bytecodeOffset); >+ OpcodeID opcodeID = instruction->opcodeID(); >+ > // If we found a terminal bytecode, link to the exit block. > if (isTerminal(opcodeID)) { >- ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >+ // TODO >+ // ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); > linkBlocks(block, exit.get()); > fallsThrough = false; > break; > } > >- // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. >+ // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. > // If there isn't one, treat this throw as a terminal. This is true even if we have a finally > // block because the finally block will create its own catch, which will generate a HandlerInfo. > if (isThrow(opcodeID)) { >- ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >- auto* handler = codeBlock->handlerForBytecodeOffset(bytecodeOffset); >+ // TODO >+ // ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >+ auto* handler = codeBlock->handlerForBytecodeOffset(instruction.offset()); > fallsThrough = false; > if (!handler) { > linkBlocks(block, exit.get()); >@@ -146,9 +147,10 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > > // If we found a branch, link to the block(s) that we jump to. > if (isBranch(opcodeID)) { >- ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >- Vector<unsigned, 1> bytecodeOffsetsJumpedTo; >- findJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, bytecodeOffsetsJumpedTo); >+ // TODO >+ // ASSERT(bytecodeOffset + opcodeLength == block->leaderOffset() + block->totalLength()); >+ Vector<InstructionStream::Offset, 1> bytecodeOffsetsJumpedTo; >+ findJumpTargetsForInstruction(codeBlock, instruction, bytecodeOffsetsJumpedTo); > > size_t numberOfJumpTargets = bytecodeOffsetsJumpedTo.size(); > ASSERT(numberOfJumpTargets); >@@ -172,7 +174,6 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > > break; > } >- bytecodeOffset += opcodeLength; > } > > // If we fall through then link to the next block in program order. >@@ -184,19 +185,19 @@ void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructions > } > > appendBlock(WTFMove(exit)); >- >+ > for (auto& basicBlock : basicBlocks) > basicBlock->shrinkToFit(); > } > >-void BytecodeBasicBlock::compute(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) >+void BytecodeBasicBlock::compute(CodeBlock* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) > { >- computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks); >+ computeImpl(codeBlock, instructions, basicBlocks); > } > >-void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) >+void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) > { >- BytecodeBasicBlock::computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks); >+ computeImpl(codeBlock, instructions, basicBlocks); > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h >index fb81650ca1f6516e9b61bb0f782f2c23b66b8be9..3697934ca230fe2957cd134a1d5d6a858edb9225 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h >@@ -25,6 +25,7 @@ > > #pragma once > >+#include "InstructionStream.h" > #include <limits.h> > #include <wtf/FastBitVector.h> > #include <wtf/Vector.h> >@@ -34,23 +35,22 @@ namespace JSC { > class CodeBlock; > class UnlinkedCodeBlock; > struct Instruction; >-struct UnlinkedInstruction; > > class BytecodeBasicBlock { > WTF_MAKE_FAST_ALLOCATED; > public: > enum SpecialBlockType { EntryBlock, ExitBlock }; >- BytecodeBasicBlock(unsigned start, unsigned length); >+ BytecodeBasicBlock(const InstructionStream::Ref&); > BytecodeBasicBlock(SpecialBlockType); > void shrinkToFit(); > > bool isEntryBlock() { return !m_leaderOffset && !m_totalLength; } > bool isExitBlock() { return m_leaderOffset == UINT_MAX && m_totalLength == UINT_MAX; } > >- unsigned leaderOffset() { return m_leaderOffset; } >- unsigned totalLength() { return m_totalLength; } >+ unsigned leaderOffset() const { return m_leaderOffset; } >+ unsigned totalLength() const { return m_totalLength; } > >- const Vector<unsigned>& offsets() const { return m_offsets; } >+ const Vector<InstructionStream::Offset>& offsets() const { return m_offsets; } > > const Vector<BytecodeBasicBlock*>& successors() const { return m_successors; } > >@@ -59,30 +59,30 @@ public: > > unsigned index() const { return m_index; } > >- static void compute(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>&); >- static void compute(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>&); >+ static void compute(CodeBlock*, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>&); >+ static void compute(UnlinkedCodeBlock*, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>&); > > private: >- template<typename Block, typename Instruction> static void computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks); >+ template<typename Block> static void computeImpl(Block* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks); > > void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); } > >- void addLength(unsigned); >+ void addLength(size_t); > >- unsigned m_leaderOffset; >- unsigned m_totalLength; >+ InstructionStream::Offset m_leaderOffset; >+ size_t m_totalLength; > unsigned m_index; > >- Vector<unsigned> m_offsets; >+ Vector<InstructionStream::Offset> m_offsets; > Vector<BytecodeBasicBlock*> m_successors; > > FastBitVector m_in; > FastBitVector m_out; > }; > >-inline BytecodeBasicBlock::BytecodeBasicBlock(unsigned start, unsigned length) >- : m_leaderOffset(start) >- , m_totalLength(length) >+inline BytecodeBasicBlock::BytecodeBasicBlock(const InstructionStream::Ref& instruction) >+ : m_leaderOffset(instruction.offset()) >+ , m_totalLength(instruction->size()) > { > m_offsets.append(m_leaderOffset); > } >@@ -93,7 +93,7 @@ inline BytecodeBasicBlock::BytecodeBasicBlock(BytecodeBasicBlock::SpecialBlockTy > { > } > >-inline void BytecodeBasicBlock::addLength(unsigned bytecodeLength) >+inline void BytecodeBasicBlock::addLength(size_t bytecodeLength) > { > m_offsets.append(m_leaderOffset + m_totalLength); > m_totalLength += bytecodeLength; >diff --git a/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp b/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >index 1eddbf361f94c865a80124848b9a3235b96c1bab..7c4421097f350abc26d5ed768c9c1ca3f545f9a2 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >@@ -28,6 +28,7 @@ > #include "BytecodeDumper.h" > > #include "ArithProfile.h" >+#include "BytecodeStructs.h" > #include "CallLinkStatus.h" > #include "CodeBlock.h" > #include "Error.h" >@@ -41,203 +42,6 @@ > > namespace JSC { > >-static StructureID getStructureID(const Instruction& instruction) >-{ >- return instruction.u.structureID; >-} >- >-static StructureID getStructureID(const UnlinkedInstruction&) >-{ >- return 0; >-} >- >-static Special::Pointer getSpecialPointer(const Instruction& instruction) >-{ >- return instruction.u.specialPointer; >-} >- >-static Special::Pointer getSpecialPointer(const UnlinkedInstruction& instruction) >-{ >- return static_cast<Special::Pointer>(instruction.u.operand); >-} >- >-static PutByIdFlags getPutByIdFlags(const Instruction& instruction) >-{ >- return instruction.u.putByIdFlags; >-} >- >-static PutByIdFlags getPutByIdFlags(const UnlinkedInstruction& instruction) >-{ >- return static_cast<PutByIdFlags>(instruction.u.operand); >-} >- >-static ToThisStatus getToThisStatus(const Instruction& instruction) >-{ >- return instruction.u.toThisStatus; >-} >- >-static ToThisStatus getToThisStatus(const UnlinkedInstruction& instruction) >-{ >- return static_cast<ToThisStatus>(instruction.u.operand); >-} >- >-static void* getPointer(const Instruction& instruction) >-{ >- return instruction.u.pointer; >-} >- >-static void* getPointer(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-static StructureChain* getStructureChain(const Instruction& instruction) >-{ >- return instruction.u.structureChain.get(); >-} >- >-static StructureChain* getStructureChain(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-static Structure* getStructure(const Instruction& instruction) >-{ >- return instruction.u.structure.get(); >-} >- >-static Structure* getStructure(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-static LLIntCallLinkInfo* getCallLinkInfo(const Instruction& instruction) >-{ >- return instruction.u.callLinkInfo; >-} >- >-static LLIntCallLinkInfo* getCallLinkInfo(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-static BasicBlockLocation* getBasicBlockLocation(const Instruction& instruction) >-{ >- return instruction.u.basicBlockLocation; >-} >- >-static BasicBlockLocation* getBasicBlockLocation(const UnlinkedInstruction&) >-{ >- return nullptr; >-} >- >-template<class Block> >-void* BytecodeDumper<Block>::actualPointerFor(Special::Pointer) const >-{ >- return nullptr; >-} >- >-template<> >-void* BytecodeDumper<CodeBlock>::actualPointerFor(Special::Pointer pointer) const >-{ >- return block()->globalObject()->actualPointerFor(pointer); >-} >- >-static void beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling) >-{ >- if (hasPrintedProfiling) { >- out.print("; "); >- return; >- } >- >- out.print(" "); >- hasPrintedProfiling = true; >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpValueProfiling(PrintStream&, const typename Block::Instruction*& it, bool&) >-{ >- ++it; >-} >- >-template<> >-void BytecodeDumper<CodeBlock>::dumpValueProfiling(PrintStream& out, const typename CodeBlock::Instruction*& it, bool& hasPrintedProfiling) >-{ >- ConcurrentJSLocker locker(block()->m_lock); >- >- ++it; >- CString description = it->u.profile->briefDescription(locker); >- if (!description.length()) >- return; >- beginDumpProfiling(out, hasPrintedProfiling); >- out.print(description); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpArrayProfiling(PrintStream&, const typename Block::Instruction*& it, bool&) >-{ >- ++it; >-} >- >-template<> >-void BytecodeDumper<CodeBlock>::dumpArrayProfiling(PrintStream& out, const typename CodeBlock::Instruction*& it, bool& hasPrintedProfiling) >-{ >- ConcurrentJSLocker locker(block()->m_lock); >- >- ++it; >- if (!it->u.arrayProfile) >- return; >- CString description = it->u.arrayProfile->briefDescription(locker, block()); >- if (!description.length()) >- return; >- beginDumpProfiling(out, hasPrintedProfiling); >- out.print(description); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpProfilesForBytecodeOffset(PrintStream&, unsigned, bool&) >-{ >-} >- >-static void dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling) >-{ >- if (!profile || !profile->m_counter) >- return; >- >- beginDumpProfiling(out, hasPrintedProfiling); >- out.print(name, profile->m_counter); >-} >- >-static void dumpArithProfile(PrintStream& out, ArithProfile* profile, bool& hasPrintedProfiling) >-{ >- if (!profile) >- return; >- >- beginDumpProfiling(out, hasPrintedProfiling); >- out.print("results: ", *profile); >-} >- >-template<> >-void BytecodeDumper<CodeBlock>::dumpProfilesForBytecodeOffset(PrintStream& out, unsigned location, bool& hasPrintedProfiling) >-{ >- dumpRareCaseProfile(out, "rare case: ", block()->rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling); >- { >- dumpArithProfile(out, block()->arithProfileForBytecodeOffset(location), hasPrintedProfiling); >- } >- >-#if ENABLE(DFG_JIT) >- Vector<DFG::FrequentExitSite> exitSites = block()->unlinkedCodeBlock()->exitProfile().exitSitesFor(location); >- if (!exitSites.isEmpty()) { >- out.print(" !! frequent exits: "); >- CommaPrinter comma; >- for (auto& exitSite : exitSites) >- out.print(comma, exitSite.kind(), " ", exitSite.jitType()); >- } >-#else // ENABLE(DFG_JIT) >- UNUSED_PARAM(location); >-#endif // ENABLE(DFG_JIT) >-} >- > template<class Block> > VM* BytecodeDumper<Block>::vm() const > { >@@ -250,17 +54,12 @@ const Identifier& BytecodeDumper<Block>::identifier(int index) const > return block()->identifier(index); > } > >-template<class Instruction> >-static void printLocationAndOp(PrintStream& out, int location, const Instruction*&, const char* op) >-{ >- out.printf("[%4d] %-17s ", location, op); >-} >- > static ALWAYS_INLINE bool isConstantRegisterIndex(int index) > { > return index >= FirstConstantRegisterIndex; > } > >+/* > NEVER_INLINE static const char* debugHookName(int debugHookType) > { > switch (static_cast<DebugHookType>(debugHookType)) { >@@ -283,6 +82,7 @@ NEVER_INLINE static const char* debugHookName(int debugHookType) > RELEASE_ASSERT_NOT_REACHED(); > return ""; > } >+*/ > > template<class Block> > CString BytecodeDumper<Block>::registerName(int r) const >@@ -293,10 +93,12 @@ CString BytecodeDumper<Block>::registerName(int r) const > return toCString(VirtualRegister(r)); > } > >+/* > static CString idName(int id0, const Identifier& ident) > { > return toCString(ident.impl(), "(@id", id0, ")"); > } >+*/ > > template<class Block> > CString BytecodeDumper<Block>::constantName(int index) const >@@ -306,1473 +108,43 @@ CString BytecodeDumper<Block>::constantName(int index) const > } > > template<class Block> >-void BytecodeDumper<Block>::printUnaryOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) >-{ >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printBinaryOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) >+void BytecodeDumper<Block>::printLocationAndOp(InstructionStream::Offset location, const char* op) > { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >+ m_out.printf("[%4lu] %-17s ", location, op); > } > > template<class Block> >-void BytecodeDumper<Block>::printConditionalJump(PrintStream& out, const typename Block::Instruction*, const typename Block::Instruction*& it, int location, const char* op) >+void BytecodeDumper<Block>::dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&) > { >- int r0 = (++it)->u.operand; >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset); >+ ::JSC::dumpBytecode(this, it.offset(), it.ptr()); >+ m_out.print("\n"); > } > > template<class Block> >-void BytecodeDumper<Block>::printCompareJump(PrintStream& out, const typename Block::Instruction*, const typename Block::Instruction*& it, int location, const char* op) >+void BytecodeDumper<Block>::dumpBytecode(Block* block, PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) > { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); >+ BytecodeDumper dumper(block, out); >+ dumper.dumpBytecode(it, statusMap); > } > > template<class Block> >-void BytecodeDumper<Block>::printGetByIdOp(PrintStream& out, int location, const typename Block::Instruction*& it) >-{ >- const char* op; >- switch (Interpreter::getOpcodeID(*it)) { >- case op_get_by_id: >- op = "get_by_id"; >- break; >- case op_get_by_id_proto_load: >- op = "get_by_id_proto_load"; >- break; >- case op_get_by_id_unset: >- op = "get_by_id_unset"; >- break; >- case op_get_array_length: >- op = "array_length"; >- break; >- default: >- RELEASE_ASSERT_NOT_REACHED(); >-#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) >- op = 0; >-#endif >- } >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- it += 4; // Increment up to the value profiler. >-} >- >-static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident) >-{ >- if (!structure) >- return; >- >- out.printf("%s = %p", name, structure); >- >- PropertyOffset offset = structure->getConcurrently(ident.impl()); >- if (offset != invalidOffset) >- out.printf(" (offset = %d)", offset); >-} >- >-static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident) >-{ >- out.printf("chain = %p: [", chain); >- bool first = true; >- for (WriteBarrier<Structure>* currentStructure = chain->head(); *currentStructure; ++currentStructure) { >- if (first) >- first = false; >- else >- out.printf(", "); >- dumpStructure(out, "struct", currentStructure->get(), ident); >- } >- out.printf("]"); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printGetByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) >-{ >- const auto* instruction = instructionsBegin() + location; >- >- const Identifier& ident = identifier(instruction[3].u.operand); >- >- UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. >- >- if (Interpreter::getOpcodeID(instruction[0]) == op_get_array_length) >- out.printf(" llint(array_length)"); >- else if (StructureID structureID = getStructureID(instruction[4])) { >- Structure* structure = vm()->heap.structureIDTable().get(structureID); >- out.printf(" llint("); >- dumpStructure(out, "struct", structure, ident); >- out.printf(")"); >- if (Interpreter::getOpcodeID(instruction[0]) == op_get_by_id_proto_load) >- out.printf(" proto(%p)", getPointer(instruction[6])); >- } >- >-#if ENABLE(JIT) >- if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { >- StructureStubInfo& stubInfo = *stubPtr; >- if (stubInfo.resetByGC) >- out.print(" (Reset By GC)"); >- >- out.printf(" jit("); >- >- Structure* baseStructure = nullptr; >- PolymorphicAccess* stub = nullptr; >- >- switch (stubInfo.cacheType) { >- case CacheType::GetByIdSelf: >- out.printf("self"); >- baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get(); >- break; >- case CacheType::Stub: >- out.printf("stub"); >- stub = stubInfo.u.stub; >- break; >- case CacheType::Unset: >- out.printf("unset"); >- break; >- case CacheType::ArrayLength: >- out.printf("ArrayLength"); >- break; >- default: >- RELEASE_ASSERT_NOT_REACHED(); >- break; >- } >- >- if (baseStructure) { >- out.printf(", "); >- dumpStructure(out, "struct", baseStructure, ident); >- } >- >- if (stub) >- out.print(", ", *stub); >- >- out.printf(")"); >- } >-#else >- UNUSED_PARAM(statusMap); >-#endif >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printPutByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) >-{ >- const auto* instruction = instructionsBegin() + location; >- >- const Identifier& ident = identifier(instruction[2].u.operand); >- >- UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. >- >- out.print(", ", getPutByIdFlags(instruction[8])); >- >- if (StructureID structureID = getStructureID(instruction[4])) { >- Structure* structure = vm()->heap.structureIDTable().get(structureID); >- out.print(" llint("); >- if (StructureID newStructureID = getStructureID(instruction[6])) { >- Structure* newStructure = vm()->heap.structureIDTable().get(newStructureID); >- dumpStructure(out, "prev", structure, ident); >- out.print(", "); >- dumpStructure(out, "next", newStructure, ident); >- if (StructureChain* chain = getStructureChain(instruction[7])) { >- out.print(", "); >- dumpChain(out, chain, ident); >- } >- } else >- dumpStructure(out, "struct", structure, ident); >- out.print(")"); >- } >- >-#if ENABLE(JIT) >- if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { >- StructureStubInfo& stubInfo = *stubPtr; >- if (stubInfo.resetByGC) >- out.print(" (Reset By GC)"); >- >- out.printf(" jit("); >- >- switch (stubInfo.cacheType) { >- case CacheType::PutByIdReplace: >- out.print("replace, "); >- dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident); >- break; >- case CacheType::Stub: { >- out.print("stub, ", *stubInfo.u.stub); >- break; >- } >- case CacheType::Unset: >- out.printf("unset"); >- break; >- default: >- RELEASE_ASSERT_NOT_REACHED(); >- break; >- } >- out.printf(")"); >- } >-#else >- UNUSED_PARAM(statusMap); >-#endif >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printInByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) >-{ >- const auto* instruction = instructionsBegin() + location; >- >- const Identifier& ident = identifier(instruction[3].u.operand); >- >- UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. >- >-#if ENABLE(JIT) >- if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { >- StructureStubInfo& stubInfo = *stubPtr; >- if (stubInfo.resetByGC) >- out.print(" (Reset By GC)"); >- >- out.printf(" jit("); >- >- Structure* baseStructure = nullptr; >- PolymorphicAccess* stub = nullptr; >- >- switch (stubInfo.cacheType) { >- case CacheType::InByIdSelf: >- out.printf("self"); >- baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get(); >- break; >- case CacheType::Stub: >- out.printf("stub"); >- stub = stubInfo.u.stub; >- break; >- case CacheType::Unset: >- out.printf("unset"); >- break; >- default: >- RELEASE_ASSERT_NOT_REACHED(); >- break; >- } >- >- if (baseStructure) { >- out.printf(", "); >- dumpStructure(out, "struct", baseStructure, ident); >- } >- >- if (stub) >- out.print(", ", *stub); >- >- out.printf(")"); >- } >-#else >- UNUSED_PARAM(out); >- UNUSED_PARAM(statusMap); >-#endif >-} >- >-#if ENABLE(JIT) >-template<typename Block> >-void BytecodeDumper<Block>::dumpCallLinkStatus(PrintStream&, unsigned, const ICStatusMap&) >-{ >-} >- >-template<> >-void BytecodeDumper<CodeBlock>::dumpCallLinkStatus(PrintStream& out, unsigned location, const ICStatusMap& statusMap) >-{ >- if (block()->jitType() != JITCode::FTLJIT) >- out.print(" status(", CallLinkStatus::computeFor(block(), location, statusMap), ")"); >-} >-#endif >- >-template<class Block> >-void BytecodeDumper<Block>::printCallOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const ICStatusMap& statusMap) >-{ >- int dst = (++it)->u.operand; >- int func = (++it)->u.operand; >- int argCount = (++it)->u.operand; >- int registerOffset = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.print(registerName(dst), ", ", registerName(func), ", ", argCount, ", ", registerOffset); >- out.print(" (this at ", virtualRegisterForArgument(0, -registerOffset), ")"); >- if (cacheDumpMode == DumpCaches) { >- LLIntCallLinkInfo* callLinkInfo = getCallLinkInfo(it[1]); >- if (callLinkInfo->lastSeenCallee) { >- JSObject* object = callLinkInfo->lastSeenCallee.get(); >- if (auto* function = jsDynamicCast<JSFunction*>(*vm(), object)) >- out.printf(" llint(%p, exec %p)", function, function->executable()); >- else >- out.printf(" llint(%p)", object); >- } >-#if ENABLE(JIT) >- if (CallLinkInfo* info = statusMap.get(CodeOrigin(location)).callLinkInfo) { >- if (info->haveLastSeenCallee()) { >- JSObject* object = info->lastSeenCallee(); >- if (auto* function = jsDynamicCast<JSFunction*>(*vm(), object)) >- out.printf(" jit(%p, exec %p)", function, function->executable()); >- else >- out.printf(" jit(%p)", object); >- } >- } >- >- dumpCallLinkStatus(out, location, statusMap); >-#else >- UNUSED_PARAM(statusMap); >-#endif >- } >- ++it; >- ++it; >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- dumpValueProfiling(out, it, hasPrintedProfiling); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printPutByIdOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) >-{ >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, op); >- out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data()); >- it += 5; >-} >- >-template<class Block> >-void BytecodeDumper<Block>::printLocationOpAndRegisterOperand(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op, int operand) >-{ >- printLocationAndOp(out, location, it, op); >- out.printf("%s", registerName(operand).data()); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpBytecode(PrintStream& out, const typename Block::Instruction* begin, const typename Block::Instruction*& it, const ICStatusMap& statusMap) >-{ >- int location = it - begin; >- bool hasPrintedProfiling = false; >- OpcodeID opcode = Interpreter::getOpcodeID(*it); >- switch (opcode) { >- case op_enter: { >- printLocationAndOp(out, location, it, "enter"); >- break; >- } >- case op_get_scope: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "get_scope", r0); >- break; >- } >- case op_create_direct_arguments: { >- int r0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_direct_arguments"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_create_scoped_arguments: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_scoped_arguments"); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >- break; >- } >- case op_create_cloned_arguments: { >- int r0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_cloned_arguments"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_argument_count: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "argument_count", r0); >- break; >- } >- case op_get_argument: { >- int r0 = (++it)->u.operand; >- int index = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "argument", r0); >- out.printf(", %d", index); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_create_rest: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- unsigned argumentOffset = (++it)->u.unsignedValue; >- printLocationAndOp(out, location, it, "create_rest"); >- out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data()); >- out.printf("ArgumentsOffset: %u", argumentOffset); >- break; >- } >- case op_get_rest_length: { >- int r0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_rest_length"); >- out.printf("%s, ", registerName(r0).data()); >- unsigned argumentOffset = (++it)->u.unsignedValue; >- out.printf("ArgumentsOffset: %u", argumentOffset); >- break; >- } >- case op_create_this: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- unsigned inferredInlineCapacity = (++it)->u.operand; >- unsigned cachedFunction = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_this"); >- out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction); >- break; >- } >- case op_to_this: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "to_this", r0); >- Structure* structure = getStructure(*(++it)); >- if (structure) >- out.print(", cache(struct = ", RawPointer(structure), ")"); >- out.print(", ", getToThisStatus(*(++it))); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_check_tdz: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "op_check_tdz", r0); >- break; >- } >- case op_new_object: { >- int r0 = (++it)->u.operand; >- unsigned inferredInlineCapacity = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_object"); >- out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity); >- ++it; // Skip object allocation profile. >- break; >- } >- case op_new_array: { >- int dst = (++it)->u.operand; >- int argv = (++it)->u.operand; >- int argc = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_array"); >- out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc); >- ++it; // Skip array allocation profile. >- break; >- } >- case op_new_array_with_spread: { >- int dst = (++it)->u.operand; >- int argv = (++it)->u.operand; >- int argc = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_array_with_spread"); >- out.printf("%s, %s, %d, ", registerName(dst).data(), registerName(argv).data(), argc); >- unsigned bitVectorIndex = (++it)->u.unsignedValue; >- const BitVector& bitVector = block()->bitVector(bitVectorIndex); >- out.print("BitVector:", bitVectorIndex, ":"); >- for (unsigned i = 0; i < static_cast<unsigned>(argc); i++) { >- if (bitVector.get(i)) >- out.print("1"); >- else >- out.print("0"); >- } >- break; >- } >- case op_spread: { >- int dst = (++it)->u.operand; >- int arg = (++it)->u.operand; >- printLocationAndOp(out, location, it, "spread"); >- out.printf("%s, %s", registerName(dst).data(), registerName(arg).data()); >- break; >- } >- case op_new_array_with_size: { >- int dst = (++it)->u.operand; >- int length = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_array_with_size"); >- out.printf("%s, %s", registerName(dst).data(), registerName(length).data()); >- ++it; // Skip array allocation profile. >- break; >- } >- case op_new_array_buffer: { >- int dst = (++it)->u.operand; >- int array = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_array_buffer"); >- out.printf("%s, %s", registerName(dst).data(), registerName(array).data()); >- ++it; // Skip array allocation profile. >- break; >- } >- case op_new_regexp: { >- int r0 = (++it)->u.operand; >- int re0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_regexp"); >- out.printf("%s, %s", registerName(r0).data(), registerName(re0).data()); >- break; >- } >- case op_mov: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "mov"); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >- break; >- } >- case op_profile_type: { >- int r0 = (++it)->u.operand; >- ++it; >- ++it; >- ++it; >- ++it; >- printLocationAndOp(out, location, it, "op_profile_type"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_profile_control_flow: { >- BasicBlockLocation* basicBlockLocation = getBasicBlockLocation(*(++it)); >- printLocationAndOp(out, location, it, "profile_control_flow"); >- if (basicBlockLocation) >- out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset()); >- break; >- } >- case op_not: { >- printUnaryOp(out, location, it, "not"); >- break; >- } >- case op_eq: { >- printBinaryOp(out, location, it, "eq"); >- break; >- } >- case op_eq_null: { >- printUnaryOp(out, location, it, "eq_null"); >- break; >- } >- case op_neq: { >- printBinaryOp(out, location, it, "neq"); >- break; >- } >- case op_neq_null: { >- printUnaryOp(out, location, it, "neq_null"); >- break; >- } >- case op_stricteq: { >- printBinaryOp(out, location, it, "stricteq"); >- break; >- } >- case op_nstricteq: { >- printBinaryOp(out, location, it, "nstricteq"); >- break; >- } >- case op_less: { >- printBinaryOp(out, location, it, "less"); >- break; >- } >- case op_lesseq: { >- printBinaryOp(out, location, it, "lesseq"); >- break; >- } >- case op_greater: { >- printBinaryOp(out, location, it, "greater"); >- break; >- } >- case op_greatereq: { >- printBinaryOp(out, location, it, "greatereq"); >- break; >- } >- case op_below: { >- printBinaryOp(out, location, it, "below"); >- break; >- } >- case op_beloweq: { >- printBinaryOp(out, location, it, "beloweq"); >- break; >- } >- case op_inc: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "inc", r0); >- break; >- } >- case op_dec: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "dec", r0); >- break; >- } >- case op_to_number: { >- printUnaryOp(out, location, it, "to_number"); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_to_string: { >- printUnaryOp(out, location, it, "to_string"); >- break; >- } >- case op_to_object: { >- printUnaryOp(out, location, it, "to_object"); >- int id0 = (++it)->u.operand; >- out.printf(" %s", idName(id0, identifier(id0)).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_negate: { >- printUnaryOp(out, location, it, "negate"); >- ++it; // op_negate has an extra operand for the ArithProfile. >- break; >- } >- case op_add: { >- printBinaryOp(out, location, it, "add"); >- ++it; >- break; >- } >- case op_mul: { >- printBinaryOp(out, location, it, "mul"); >- ++it; >- break; >- } >- case op_div: { >- printBinaryOp(out, location, it, "div"); >- ++it; >- break; >- } >- case op_mod: { >- printBinaryOp(out, location, it, "mod"); >- break; >- } >- case op_pow: { >- printBinaryOp(out, location, it, "pow"); >- break; >- } >- case op_sub: { >- printBinaryOp(out, location, it, "sub"); >- ++it; >- break; >- } >- case op_lshift: { >- printBinaryOp(out, location, it, "lshift"); >- break; >- } >- case op_rshift: { >- printBinaryOp(out, location, it, "rshift"); >- break; >- } >- case op_urshift: { >- printBinaryOp(out, location, it, "urshift"); >- break; >- } >- case op_bitand: { >- printBinaryOp(out, location, it, "bitand"); >- ++it; >- break; >- } >- case op_bitxor: { >- printBinaryOp(out, location, it, "bitxor"); >- ++it; >- break; >- } >- case op_bitor: { >- printBinaryOp(out, location, it, "bitor"); >- ++it; >- break; >- } >- case op_overrides_has_instance: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "overrides_has_instance"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- break; >- } >- case op_instanceof: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "instanceof"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- break; >- } >- case op_instanceof_custom: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "instanceof_custom"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); >- break; >- } >- case op_unsigned: { >- printUnaryOp(out, location, it, "unsigned"); >- break; >- } >- case op_typeof: { >- printUnaryOp(out, location, it, "typeof"); >- break; >- } >- case op_is_empty: { >- printUnaryOp(out, location, it, "is_empty"); >- break; >- } >- case op_is_undefined: { >- printUnaryOp(out, location, it, "is_undefined"); >- break; >- } >- case op_is_boolean: { >- printUnaryOp(out, location, it, "is_boolean"); >- break; >- } >- case op_is_number: { >- printUnaryOp(out, location, it, "is_number"); >- break; >- } >- case op_is_cell_with_type: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int type = (++it)->u.operand; >- printLocationAndOp(out, location, it, "is_cell_with_type"); >- out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), type); >- break; >- } >- case op_is_object: { >- printUnaryOp(out, location, it, "is_object"); >- break; >- } >- case op_is_object_or_null: { >- printUnaryOp(out, location, it, "is_object_or_null"); >- break; >- } >- case op_is_function: { >- printUnaryOp(out, location, it, "is_function"); >- break; >- } >- case op_in_by_id: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "in_by_id"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- printInByIdCacheStatus(out, location, statusMap); >- break; >- } >- case op_in_by_val: { >- printBinaryOp(out, location, it, "in_by_val"); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_try_get_by_id: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "try_get_by_id"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_get_by_id_direct: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_by_id_direct"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- it += 2; // Increment up to the value profiler. >- printGetByIdCacheStatus(out, location, statusMap); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: >- case op_get_array_length: { >- printGetByIdOp(out, location, it); >- printGetByIdCacheStatus(out, location, statusMap); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_get_by_id_with_this: { >- printLocationAndOp(out, location, it, "get_by_id_with_this"); >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), idName(id0, identifier(id0)).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_get_by_val_with_this: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_by_val_with_this"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_by_id: { >- printPutByIdOp(out, location, it, "put_by_id"); >- printPutByIdCacheStatus(out, location, statusMap); >- break; >- } >- case op_put_by_id_with_this: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_by_id_with_this"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), registerName(r2).data()); >- break; >- } >- case op_put_by_val_with_this: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_by_val_with_this"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); >- break; >- } >- case op_put_getter_by_id: { >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_getter_by_id"); >- out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data()); >- break; >- } >- case op_put_setter_by_id: { >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_setter_by_id"); >- out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data()); >- break; >- } >- case op_put_getter_setter_by_id: { >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_getter_setter_by_id"); >- out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data()); >- break; >- } >- case op_put_getter_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_getter_by_val"); >- out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data()); >- break; >- } >- case op_put_setter_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int n0 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_setter_by_val"); >- out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data()); >- break; >- } >- case op_define_data_property: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "define_data_property"); >- out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); >- break; >- } >- case op_define_accessor_property: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- int r3 = (++it)->u.operand; >- int r4 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "define_accessor_property"); >- out.printf("%s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data()); >- break; >- } >- case op_del_by_id: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "del_by_id"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); >- break; >- } >- case op_get_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_by_val"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_by_val"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_by_val_direct: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_by_val_direct"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_del_by_val: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int r2 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "del_by_val"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); >- break; >- } >- case op_jmp: { >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, "jmp"); >- out.printf("%d(->%d)", offset, location + offset); >- break; >- } >- case op_jtrue: { >- printConditionalJump(out, begin, it, location, "jtrue"); >- break; >- } >- case op_jfalse: { >- printConditionalJump(out, begin, it, location, "jfalse"); >- break; >- } >- case op_jeq_null: { >- printConditionalJump(out, begin, it, location, "jeq_null"); >- break; >- } >- case op_jneq_null: { >- printConditionalJump(out, begin, it, location, "jneq_null"); >- break; >- } >- case op_jneq_ptr: { >- int r0 = (++it)->u.operand; >- Special::Pointer pointer = getSpecialPointer(*(++it)); >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, "jneq_ptr"); >- out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, actualPointerFor(pointer), offset, location + offset); >- ++it; >- break; >- } >- case op_jless: { >- printCompareJump(out, begin, it, location, "jless"); >- break; >- } >- case op_jlesseq: { >- printCompareJump(out, begin, it, location, "jlesseq"); >- break; >- } >- case op_jgreater: { >- printCompareJump(out, begin, it, location, "jgreater"); >- break; >- } >- case op_jgreatereq: { >- printCompareJump(out, begin, it, location, "jgreatereq"); >- break; >- } >- case op_jnless: { >- printCompareJump(out, begin, it, location, "jnless"); >- break; >- } >- case op_jnlesseq: { >- printCompareJump(out, begin, it, location, "jnlesseq"); >- break; >- } >- case op_jngreater: { >- printCompareJump(out, begin, it, location, "jngreater"); >- break; >- } >- case op_jngreatereq: { >- printCompareJump(out, begin, it, location, "jngreatereq"); >- break; >- } >- case op_jeq: { >- printCompareJump(out, begin, it, location, "jeq"); >- break; >- } >- case op_jneq: { >- printCompareJump(out, begin, it, location, "jneq"); >- break; >- } >- case op_jstricteq: { >- printCompareJump(out, begin, it, location, "jstricteq"); >- break; >- } >- case op_jnstricteq: { >- printCompareJump(out, begin, it, location, "jnstricteq"); >- break; >- } >- case op_jbelow: { >- printCompareJump(out, begin, it, location, "jbelow"); >- break; >- } >- case op_jbeloweq: { >- printCompareJump(out, begin, it, location, "jbeloweq"); >- break; >- } >- case op_loop_hint: { >- printLocationAndOp(out, location, it, "loop_hint"); >- break; >- } >- case op_check_traps: { >- printLocationAndOp(out, location, it, "check_traps"); >- break; >- } >- case op_nop: { >- printLocationAndOp(out, location, it, "nop"); >- break; >- } >- case op_super_sampler_begin: { >- printLocationAndOp(out, location, it, "super_sampler_begin"); >- break; >- } >- case op_super_sampler_end: { >- printLocationAndOp(out, location, it, "super_sampler_end"); >- break; >- } >- case op_log_shadow_chicken_prologue: { >- int r0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "log_shadow_chicken_prologue"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_log_shadow_chicken_tail: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "log_shadow_chicken_tail"); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >- break; >- } >- case op_switch_imm: { >- int tableIndex = (++it)->u.operand; >- int defaultTarget = (++it)->u.operand; >- int scrutineeRegister = (++it)->u.operand; >- printLocationAndOp(out, location, it, "switch_imm"); >- out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); >- break; >- } >- case op_switch_char: { >- int tableIndex = (++it)->u.operand; >- int defaultTarget = (++it)->u.operand; >- int scrutineeRegister = (++it)->u.operand; >- printLocationAndOp(out, location, it, "switch_char"); >- out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); >- break; >- } >- case op_switch_string: { >- int tableIndex = (++it)->u.operand; >- int defaultTarget = (++it)->u.operand; >- int scrutineeRegister = (++it)->u.operand; >- printLocationAndOp(out, location, it, "switch_string"); >- out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data()); >- break; >- } >- case op_new_func: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_func"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_generator_func: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_generator_func"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_async_func: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_async_func"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_async_generator_func: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_async_generator_func"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_func_exp: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_func_exp"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_generator_func_exp: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_generator_func_exp"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_async_func_exp: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "new_async_func_exp"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_new_async_generator_func_exp: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int f0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "op_new_async_generator_func_exp"); >- out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); >- break; >- } >- case op_set_function_name: { >- int funcReg = (++it)->u.operand; >- int nameReg = (++it)->u.operand; >- printLocationAndOp(out, location, it, "set_function_name"); >- out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data()); >- break; >- } >- case op_call: { >- printCallOp(out, location, it, "call", DumpCaches, hasPrintedProfiling, statusMap); >- break; >- } >- case op_tail_call: { >- printCallOp(out, location, it, "tail_call", DumpCaches, hasPrintedProfiling, statusMap); >- break; >- } >- case op_call_eval: { >- printCallOp(out, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, statusMap); >- break; >- } >- >- case op_construct_varargs: >- case op_call_varargs: >- case op_tail_call_varargs: >- case op_tail_call_forward_arguments: { >- int result = (++it)->u.operand; >- int callee = (++it)->u.operand; >- int thisValue = (++it)->u.operand; >- int arguments = (++it)->u.operand; >- int firstFreeRegister = (++it)->u.operand; >- int varArgOffset = (++it)->u.operand; >- ++it; >- const char* opName; >- if (opcode == op_call_varargs) >- opName = "call_varargs"; >- else if (opcode == op_construct_varargs) >- opName = "construct_varargs"; >- else if (opcode == op_tail_call_varargs) >- opName = "tail_call_varargs"; >- else if (opcode == op_tail_call_forward_arguments) >- opName = "tail_call_forward_arguments"; >- else >- RELEASE_ASSERT_NOT_REACHED(); >- >- printLocationAndOp(out, location, it, opName); >- out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- >- case op_ret: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "ret", r0); >- break; >- } >- case op_construct: { >- printCallOp(out, location, it, "construct", DumpCaches, hasPrintedProfiling, statusMap); >- break; >- } >- case op_strcat: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int count = (++it)->u.operand; >- printLocationAndOp(out, location, it, "strcat"); >- out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count); >- break; >- } >- case op_to_primitive: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "to_primitive"); >- out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); >- break; >- } >- case op_get_enumerable_length: { >- int dst = it[1].u.operand; >- int base = it[2].u.operand; >- printLocationAndOp(out, location, it, "op_get_enumerable_length"); >- out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); >- it += OPCODE_LENGTH(op_get_enumerable_length) - 1; >- break; >- } >- case op_has_indexed_property: { >- int dst = (++it)->u.operand; >- int base = (++it)->u.operand; >- int propertyName = (++it)->u.operand; >- printLocationAndOp(out, location, it, "op_has_indexed_property"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data()); >- dumpArrayProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_has_structure_property: { >- int dst = it[1].u.operand; >- int base = it[2].u.operand; >- int propertyName = it[3].u.operand; >- int enumerator = it[4].u.operand; >- printLocationAndOp(out, location, it, "op_has_structure_property"); >- out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data()); >- it += OPCODE_LENGTH(op_has_structure_property) - 1; >- break; >- } >- case op_has_generic_property: { >- int dst = it[1].u.operand; >- int base = it[2].u.operand; >- int propertyName = it[3].u.operand; >- printLocationAndOp(out, location, it, "op_has_generic_property"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data()); >- it += OPCODE_LENGTH(op_has_generic_property) - 1; >- break; >- } >- case op_get_direct_pname: { >- int dst = (++it)->u.operand; >- int base = (++it)->u.operand; >- int propertyName = (++it)->u.operand; >- int index = (++it)->u.operand; >- int enumerator = (++it)->u.operand; >- printLocationAndOp(out, location, it, "op_get_direct_pname"); >- out.printf("%s, %s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data()); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- >- } >- case op_get_property_enumerator: { >- int dst = it[1].u.operand; >- int base = it[2].u.operand; >- printLocationAndOp(out, location, it, "op_get_property_enumerator"); >- out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); >- it += OPCODE_LENGTH(op_get_property_enumerator) - 1; >- break; >- } >- case op_enumerator_structure_pname: { >- int dst = it[1].u.operand; >- int enumerator = it[2].u.operand; >- int index = it[3].u.operand; >- printLocationAndOp(out, location, it, "op_enumerator_structure_pname"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); >- it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1; >- break; >- } >- case op_enumerator_generic_pname: { >- int dst = it[1].u.operand; >- int enumerator = it[2].u.operand; >- int index = it[3].u.operand; >- printLocationAndOp(out, location, it, "op_enumerator_generic_pname"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); >- it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1; >- break; >- } >- case op_to_index_string: { >- int dst = it[1].u.operand; >- int index = it[2].u.operand; >- printLocationAndOp(out, location, it, "op_to_index_string"); >- out.printf("%s, %s", registerName(dst).data(), registerName(index).data()); >- it += OPCODE_LENGTH(op_to_index_string) - 1; >- break; >- } >- case op_push_with_scope: { >- int dst = (++it)->u.operand; >- int newScope = (++it)->u.operand; >- int currentScope = (++it)->u.operand; >- printLocationAndOp(out, location, it, "push_with_scope"); >- out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data()); >- break; >- } >- case op_get_parent_scope: { >- int dst = (++it)->u.operand; >- int parentScope = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_parent_scope"); >- out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data()); >- break; >- } >- case op_create_lexical_environment: { >- int dst = (++it)->u.operand; >- int scope = (++it)->u.operand; >- int symbolTable = (++it)->u.operand; >- int initialValue = (++it)->u.operand; >- printLocationAndOp(out, location, it, "create_lexical_environment"); >- out.printf("%s, %s, %s, %s", >- registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data()); >- break; >- } >- case op_catch: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- void* pointer = getPointer(*(++it)); >- printLocationAndOp(out, location, it, "catch"); >- out.printf("%s, %s, %p", registerName(r0).data(), registerName(r1).data(), pointer); >- break; >- } >- case op_throw: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "throw", r0); >- break; >- } >- case op_throw_static_error: { >- int r0 = (++it)->u.operand; >- ErrorType k1 = static_cast<ErrorType>((++it)->u.unsignedValue); >- printLocationAndOp(out, location, it, "throw_static_error"); >- out.printf("%s, ", registerName(r0).data()); >- out.print(k1); >- break; >- } >- case op_debug: { >- int debugHookType = (++it)->u.operand; >- int hasBreakpointFlag = (++it)->u.operand; >- printLocationAndOp(out, location, it, "debug"); >- out.printf("%s, %d", debugHookName(debugHookType), hasBreakpointFlag); >- break; >- } >- case op_identity_with_profile: { >- int r0 = (++it)->u.operand; >- ++it; // Profile top half >- ++it; // Profile bottom half >- printLocationAndOp(out, location, it, "identity_with_profile"); >- out.printf("%s", registerName(r0).data()); >- break; >- } >- case op_unreachable: { >- printLocationAndOp(out, location, it, "unreachable"); >- break; >- } >- case op_end: { >- int r0 = (++it)->u.operand; >- printLocationOpAndRegisterOperand(out, location, it, "end", r0); >- break; >- } >- case op_resolve_scope_for_hoisting_func_decl_in_eval: { >- int r0 = (++it)->u.operand; >- int scope = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "resolve_scope_for_hoisting_func_decl_in_eval"); >- out.printf("%s, %s, %s", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data()); >- break; >- } >- case op_resolve_scope: { >- int r0 = (++it)->u.operand; >- int scope = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand); >- int depth = (++it)->u.operand; >- void* pointer = getPointer(*(++it)); >- printLocationAndOp(out, location, it, "resolve_scope"); >- out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer); >- break; >- } >- case op_get_from_scope: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand); >- ++it; // Structure >- int operand = (++it)->u.operand; // Operand >- printLocationAndOp(out, location, it, "get_from_scope"); >- out.print(registerName(r0), ", ", registerName(r1)); >- if (static_cast<unsigned>(id0) == UINT_MAX) >- out.print(", anonymous"); >- else >- out.print(", ", idName(id0, identifier(id0))); >- out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_to_scope: { >- int r0 = (++it)->u.operand; >- int id0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand); >- ++it; // Structure >- int operand = (++it)->u.operand; // Operand >- printLocationAndOp(out, location, it, "put_to_scope"); >- out.print(registerName(r0)); >- if (static_cast<unsigned>(id0) == UINT_MAX) >- out.print(", anonymous"); >- else >- out.print(", ", idName(id0, identifier(id0))); >- out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand); >- break; >- } >- case op_get_from_arguments: { >- int r0 = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- int offset = (++it)->u.operand; >- printLocationAndOp(out, location, it, "get_from_arguments"); >- out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset); >- dumpValueProfiling(out, it, hasPrintedProfiling); >- break; >- } >- case op_put_to_arguments: { >- int r0 = (++it)->u.operand; >- int offset = (++it)->u.operand; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "put_to_arguments"); >- out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data()); >- break; >- } >- case op_yield: { >- int r0 = (++it)->u.operand; >- unsigned yieldPoint = (++it)->u.unsignedValue; >- int r1 = (++it)->u.operand; >- printLocationAndOp(out, location, it, "yield"); >- out.printf("%s, %u, %s", registerName(r0).data(), yieldPoint, registerName(r1).data()); >- break; >- } >- default: >- RELEASE_ASSERT_NOT_REACHED(); >- } >- dumpProfilesForBytecodeOffset(out, location, hasPrintedProfiling); >- out.print("\n"); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpBytecode(Block* block, PrintStream& out, const typename Block::Instruction* begin, const typename Block::Instruction*& it, const ICStatusMap& statusMap) >-{ >- BytecodeDumper dumper(block, begin); >- dumper.dumpBytecode(out, begin, it, statusMap); >-} >- >-template<class Block> >-void BytecodeDumper<Block>::dumpIdentifiers(PrintStream& out) >+void BytecodeDumper<Block>::dumpIdentifiers() > { > if (size_t count = block()->numberOfIdentifiers()) { >- out.printf("\nIdentifiers:\n"); >+ m_out.printf("\nIdentifiers:\n"); > size_t i = 0; > do { >- out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data()); >+ m_out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data()); > ++i; > } while (i != count); > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpConstants(PrintStream& out) >+void BytecodeDumper<Block>::dumpConstants() > { > if (!block()->constantRegisters().isEmpty()) { >- out.printf("\nConstants:\n"); >+ m_out.printf("\nConstants:\n"); > size_t i = 0; > for (const auto& constant : block()->constantRegisters()) { > const char* sourceCodeRepresentationDescription = nullptr; >@@ -1787,94 +159,94 @@ void BytecodeDumper<Block>::dumpConstants(PrintStream& out) > sourceCodeRepresentationDescription = ""; > break; > } >- out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(constant.get()).data(), sourceCodeRepresentationDescription); >+ m_out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(constant.get()).data(), sourceCodeRepresentationDescription); > ++i; > } > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpExceptionHandlers(PrintStream& out) >+void BytecodeDumper<Block>::dumpExceptionHandlers() > { > if (unsigned count = block()->numberOfExceptionHandlers()) { >- out.printf("\nException Handlers:\n"); >+ m_out.printf("\nException Handlers:\n"); > unsigned i = 0; > do { > const auto& handler = block()->exceptionHandler(i); >- out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n", i + 1, handler.start, handler.end, handler.target, handler.typeName()); >+ m_out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n", i + 1, handler.start, handler.end, handler.target, handler.typeName()); > ++i; > } while (i < count); > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpSwitchJumpTables(PrintStream& out) >+void BytecodeDumper<Block>::dumpSwitchJumpTables() > { > if (unsigned count = block()->numberOfSwitchJumpTables()) { >- out.printf("Switch Jump Tables:\n"); >+ m_out.printf("Switch Jump Tables:\n"); > unsigned i = 0; > do { >- out.printf(" %1d = {\n", i); >+ m_out.printf(" %1d = {\n", i); > const auto& switchJumpTable = block()->switchJumpTable(i); > int entry = 0; > auto end = switchJumpTable.branchOffsets.end(); > for (auto iter = switchJumpTable.branchOffsets.begin(); iter != end; ++iter, ++entry) { > if (!*iter) > continue; >- out.printf("\t\t%4d => %04d\n", entry + switchJumpTable.min, *iter); >+ m_out.printf("\t\t%4d => %04d\n", entry + switchJumpTable.min, *iter); > } >- out.printf(" }\n"); >+ m_out.printf(" }\n"); > ++i; > } while (i < count); > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpStringSwitchJumpTables(PrintStream& out) >+void BytecodeDumper<Block>::dumpStringSwitchJumpTables() > { > if (unsigned count = block()->numberOfStringSwitchJumpTables()) { >- out.printf("\nString Switch Jump Tables:\n"); >+ m_out.printf("\nString Switch Jump Tables:\n"); > unsigned i = 0; > do { >- out.printf(" %1d = {\n", i); >+ m_out.printf(" %1d = {\n", i); > const auto& stringSwitchJumpTable = block()->stringSwitchJumpTable(i); > auto end = stringSwitchJumpTable.offsetTable.end(); > for (auto iter = stringSwitchJumpTable.offsetTable.begin(); iter != end; ++iter) >- out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset); >- out.printf(" }\n"); >+ m_out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset); >+ m_out.printf(" }\n"); > ++i; > } while (i < count); > } > } > > template<class Block> >-void BytecodeDumper<Block>::dumpBlock(Block* block, const typename Block::UnpackedInstructions& instructions, PrintStream& out, const ICStatusMap& statusMap) >+void BytecodeDumper<Block>::dumpBlock(Block* block, const InstructionStream& instructions, PrintStream& out, const ICStatusMap& statusMap) > { > size_t instructionCount = 0; > >- for (size_t i = 0; i < instructions.size(); i += opcodeLengths[Interpreter::getOpcodeID(instructions[i])]) >+ for (const auto& instruction : instructions) { >+ UNUSED_PARAM(instruction); > ++instructionCount; >+ } > > out.print(*block); > out.printf( >- ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)", >- static_cast<unsigned long>(instructions.size()), >- static_cast<unsigned long>(instructions.size() * sizeof(Instruction)), >+ ": %lu instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)", >+ static_cast<unsigned long>(instructionCount), >+ static_cast<unsigned long>(instructions.sizeInBytes()), > block->numParameters(), block->numCalleeLocals(), block->numVars()); > out.print("; scope at ", block->scopeRegister()); > out.printf("\n"); > >- const auto* begin = instructions.begin(); >- const auto* end = instructions.end(); >- BytecodeDumper<Block> dumper(block, begin); >- for (const auto* it = begin; it != end; ++it) >- dumper.dumpBytecode(out, begin, it, statusMap); >+ BytecodeDumper<Block> dumper(block, out); >+ for (const auto& it : instructions) >+ dumper.dumpBytecode(it, statusMap); > >- dumper.dumpIdentifiers(out); >- dumper.dumpConstants(out); >- dumper.dumpExceptionHandlers(out); >- dumper.dumpSwitchJumpTables(out); >- dumper.dumpStringSwitchJumpTables(out); >+ dumper.dumpIdentifiers(); >+ dumper.dumpConstants(); >+ dumper.dumpExceptionHandlers(); >+ dumper.dumpSwitchJumpTables(); >+ dumper.dumpStringSwitchJumpTables(); > > out.printf("\n"); > } >diff --git a/Source/JavaScriptCore/bytecode/BytecodeDumper.h b/Source/JavaScriptCore/bytecode/BytecodeDumper.h >index d811a8d7267cb33ab0a574ca684e29d95c4513b7..37d23bf608762710c0b3c3bb49600ddf7c8ee61c 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeDumper.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeDumper.h >@@ -28,6 +28,7 @@ > > #include "CallLinkInfo.h" > #include "ICStatusMap.h" >+#include "InstructionStream.h" > #include "StructureStubInfo.h" > > namespace JSC { >@@ -37,20 +38,30 @@ struct Instruction; > template<class Block> > class BytecodeDumper { > public: >- typedef typename Block::Instruction Instruction; >+ static void dumpBytecode(Block*, PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap = ICStatusMap()); >+ static void dumpBlock(Block*, const InstructionStream&, PrintStream& out, const ICStatusMap& statusMap = ICStatusMap()); > >- static void dumpBytecode(Block*, PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap& statusMap = ICStatusMap()); >- static void dumpBlock(Block*, const typename Block::UnpackedInstructions&, PrintStream& out, const ICStatusMap& statusMap = ICStatusMap()); >+ void printLocationAndOp(InstructionStream::Offset location, const char* op); >+ >+ template<typename T> >+ void dumpOperand(T operand) >+ { >+ m_out.print(", "); >+ dumpValue(operand); >+ } >+ >+ void dumpValue(VirtualRegister reg) { m_out.printf("%s", registerName(reg.offset()).data()); } >+ template<typename T> >+ void dumpValue(T v) { m_out.print(v); } > > private: >- BytecodeDumper(Block* block, const Instruction* instructionsBegin) >+ BytecodeDumper(Block* block, PrintStream& out) > : m_block(block) >- , m_instructionsBegin(instructionsBegin) >+ , m_out(out) > { > } > > Block* block() const { return m_block; } >- const Instruction* instructionsBegin() const { return m_instructionsBegin; } > > ALWAYS_INLINE VM* vm() const; > >@@ -59,25 +70,13 @@ private: > > const Identifier& identifier(int index) const; > >- void dumpIdentifiers(PrintStream& out); >- void dumpConstants(PrintStream& out); >- void dumpExceptionHandlers(PrintStream& out); >- void dumpSwitchJumpTables(PrintStream& out); >- void dumpStringSwitchJumpTables(PrintStream& out); >- >- void printUnaryOp(PrintStream& out, int location, const Instruction*& it, const char* op); >- void printBinaryOp(PrintStream& out, int location, const Instruction*& it, const char* op); >- void printConditionalJump(PrintStream& out, const Instruction*, const Instruction*& it, int location, const char* op); >- void printCompareJump(PrintStream& out, const Instruction*, const Instruction*& it, int location, const char* op); >- void printGetByIdOp(PrintStream& out, int location, const Instruction*& it); >- void printGetByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); >- void printPutByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); >- void printInByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); >- enum CacheDumpMode { DumpCaches, DontDumpCaches }; >- void printCallOp(PrintStream& out, int location, const Instruction*& it, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const ICStatusMap&); >- void printPutByIdOp(PrintStream& out, int location, const Instruction*& it, const char* op); >- void printLocationOpAndRegisterOperand(PrintStream& out, int location, const Instruction*& it, const char* op, int operand); >- void dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap&); >+ void dumpIdentifiers(); >+ void dumpConstants(); >+ void dumpExceptionHandlers(); >+ void dumpSwitchJumpTables(); >+ void dumpStringSwitchJumpTables(); >+ >+ void dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&); > > void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); > void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); >@@ -90,7 +89,7 @@ private: > #endif > > Block* m_block; >- const Instruction* m_instructionsBegin; >+ PrintStream& m_out; > }; > > } >diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp >index e91b5b5d8356118136796276004e204828eb27c3..f4b301deb8f5164bc5738ac720b355b1321d77bc 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp >@@ -30,6 +30,7 @@ > #include "BytecodeDumper.h" > #include "BytecodeLivenessAnalysisInlines.h" > #include "BytecodeRewriter.h" >+#include "BytecodeStructs.h" > #include "BytecodeUseDef.h" > #include "IdentifierInlines.h" > #include "InterpreterInlines.h" >@@ -43,8 +44,8 @@ > namespace JSC { > > struct YieldData { >- size_t point { 0 }; >- int argument { 0 }; >+ InstructionStream::Offset point { 0 }; >+ VirtualRegister argument { 0 }; > FastBitVector liveness; > }; > >@@ -52,29 +53,31 @@ class BytecodeGeneratorification { > public: > typedef Vector<YieldData> Yields; > >- BytecodeGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) >- : m_codeBlock(codeBlock) >+ BytecodeGeneratorification(BytecodeGenerator& bytecodeGenerator, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) >+ : m_bytecodeGenerator(bytecodeGenerator) >+ , m_codeBlock(codeBlock) > , m_instructions(instructions) > , m_graph(m_codeBlock, m_instructions) > , m_generatorFrameSymbolTable(*codeBlock->vm(), generatorFrameSymbolTable) > , m_generatorFrameSymbolTableIndex(generatorFrameSymbolTableIndex) > { > for (BytecodeBasicBlock* block : m_graph) { >- for (unsigned bytecodeOffset : block->offsets()) { >- const UnlinkedInstruction* pc = &instructions[bytecodeOffset]; >- switch (pc->u.opcode) { >+ for (const auto offset : block->offsets()) { >+ const auto instruction = m_instructions.at(offset); >+ switch (instruction->opcodeID()) { > case op_enter: { >- m_enterPoint = bytecodeOffset; >+ m_enterPoint = instruction.offset(); > break; > } > > case op_yield: { >- unsigned liveCalleeLocalsIndex = pc[2].u.unsignedValue; >+ auto bytecode = instruction->as<OpYield>(); >+ unsigned liveCalleeLocalsIndex = bytecode.yieldPoint; > if (liveCalleeLocalsIndex >= m_yields.size()) > m_yields.resize(liveCalleeLocalsIndex + 1); > YieldData& data = m_yields[liveCalleeLocalsIndex]; >- data.point = bytecodeOffset; >- data.argument = pc[3].u.operand; >+ data.point = instruction.offset(); >+ data.argument = bytecode.argument; > break; > } > >@@ -105,9 +108,14 @@ public: > return m_yields; > } > >- unsigned enterPoint() const >+ InstructionStream::Ref enterPoint() const > { >- return m_enterPoint; >+ return m_instructions.at(m_enterPoint); >+ } >+ >+ const InstructionStream& instructions() const >+ { >+ return m_instructions; > } > > private: >@@ -138,9 +146,10 @@ private: > return storage; > } > >- unsigned m_enterPoint { 0 }; >+ BytecodeGenerator& m_bytecodeGenerator; >+ InstructionStream::Offset m_enterPoint; > UnlinkedCodeBlock* m_codeBlock; >- UnlinkedCodeBlock::UnpackedInstructions& m_instructions; >+ InstructionStreamWriter& m_instructions; > BytecodeGraph m_graph; > Vector<std::optional<Storage>> m_storages; > Yields m_yields; >@@ -155,7 +164,7 @@ public: > { > } > >- void run(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions) >+ void run(UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions) > { > // Perform modified liveness analysis to determine which locals are live at the merge points. > // This produces the conservative results for the question, "which variables should be saved and resumed?". >@@ -163,7 +172,7 @@ public: > runLivenessFixpoint(codeBlock, instructions, m_generatorification.graph()); > > for (YieldData& data : m_generatorification.yields()) >- data.liveness = getLivenessInfoAtBytecodeOffset(codeBlock, instructions, m_generatorification.graph(), data.point + opcodeLength(op_yield)); >+ data.liveness = getLivenessInfoAtBytecodeOffset(codeBlock, instructions, m_generatorification.graph(), m_generatorification.instructions().at(data.point).next().offset()); > } > > private: >@@ -179,85 +188,80 @@ void BytecodeGeneratorification::run() > pass.run(m_codeBlock, m_instructions); > } > >- BytecodeRewriter rewriter(m_graph, m_codeBlock, m_instructions); >+ BytecodeRewriter rewriter(m_bytecodeGenerator, m_graph, m_codeBlock, m_instructions); > > // Setup the global switch for the generator. > { >- unsigned nextToEnterPoint = enterPoint() + opcodeLength(op_enter); >+ auto nextToEnterPoint = enterPoint().next(); > unsigned switchTableIndex = m_codeBlock->numberOfSwitchJumpTables(); > VirtualRegister state = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::State)); > auto& jumpTable = m_codeBlock->addSwitchJumpTable(); > jumpTable.min = 0; > jumpTable.branchOffsets.resize(m_yields.size() + 1); > jumpTable.branchOffsets.fill(0); >- jumpTable.add(0, nextToEnterPoint); >+ jumpTable.add(0, nextToEnterPoint.offset()); > for (unsigned i = 0; i < m_yields.size(); ++i) > jumpTable.add(i + 1, m_yields[i].point); > > rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) { >- fragment.appendInstruction(op_switch_imm, switchTableIndex, nextToEnterPoint, state.offset()); >+ fragment.appendInstruction<OpSwitchImm>(switchTableIndex, nextToEnterPoint.offset(), state); > }); > } > > for (const YieldData& data : m_yields) { > VirtualRegister scope = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::Frame)); > >+ auto instruction = m_instructions.at(data.point); > // Emit save sequence. >- rewriter.insertFragmentBefore(data.point, [&](BytecodeRewriter::Fragment& fragment) { >+ rewriter.insertFragmentBefore(instruction, [&](BytecodeRewriter::Fragment& fragment) { > data.liveness.forEachSetBit([&](size_t index) { > VirtualRegister operand = virtualRegisterForLocal(index); > Storage storage = storageForGeneratorLocal(index); > >- fragment.appendInstruction( >- op_put_to_scope, >- scope.offset(), // scope >+ fragment.appendInstruction<OpPutToScope>( >+ scope, // scope > storage.identifierIndex, // identifier >- operand.offset(), // value >- GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info >+ operand, // value >+ GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info > m_generatorFrameSymbolTableIndex, // symbol table constant index > storage.scopeOffset.offset() // scope offset > ); > }); > > // Insert op_ret just after save sequence. >- fragment.appendInstruction(op_ret, data.argument); >+ fragment.appendInstruction<OpRet>(data.argument); > }); > > // Emit resume sequence. >- rewriter.insertFragmentAfter(data.point, [&](BytecodeRewriter::Fragment& fragment) { >+ rewriter.insertFragmentAfter(instruction, [&](BytecodeRewriter::Fragment& fragment) { > data.liveness.forEachSetBit([&](size_t index) { > VirtualRegister operand = virtualRegisterForLocal(index); > Storage storage = storageForGeneratorLocal(index); > >- UnlinkedValueProfile profile = m_codeBlock->vm()->canUseJIT() >- ? m_codeBlock->addValueProfile() >- : static_cast<UnlinkedValueProfile>(-1); >- fragment.appendInstruction( >- op_get_from_scope, >- operand.offset(), // dst >- scope.offset(), // scope >+ fragment.appendInstruction<OpGetFromScope>( >+ operand, // dst >+ scope, // scope > storage.identifierIndex, // identifier >- GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info >+ GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info > 0, // local scope depth >- storage.scopeOffset.offset(), // scope offset >- profile // profile >+ storage.scopeOffset.offset() // scope offset > ); > }); > }); > > // Clip the unnecessary bytecodes. >- rewriter.removeBytecode(data.point); >+ rewriter.removeBytecode(instruction); > } > > rewriter.execute(); > } > >-void performGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) >+void performGeneratorification(BytecodeGenerator &bytecodeGenerator, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) > { > if (Options::dumpBytecodesBeforeGeneratorification()) > BytecodeDumper<UnlinkedCodeBlock>::dumpBlock(codeBlock, instructions, WTF::dataFile()); > >- BytecodeGeneratorification pass(codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex); >+ BytecodeGeneratorification pass(bytecodeGenerator, codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex); > pass.run(); > } > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h >index c7b613746086ae34c4c0a22bf4e67c76e6cfd4e9..01f096e041bfdb25f6fae5ddc0614ef7a6f06306 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h >@@ -26,12 +26,14 @@ > > #pragma once > >-#include "UnlinkedCodeBlock.h" >- > namespace JSC { > >+class BytecodeGenerator; >+class InstructionStreamWriter; >+class SymbolTable; >+class UnlinkedCodeBlock; > class SymbolTable; > >-void performGeneratorification(UnlinkedCodeBlock*, UnlinkedCodeBlock::UnpackedInstructions&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex); >+void performGeneratorification(BytecodeGenerator&, UnlinkedCodeBlock*, InstructionStreamWriter&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex); > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/BytecodeGraph.h b/Source/JavaScriptCore/bytecode/BytecodeGraph.h >index c204b41f329718e72ce7db97fae64315e3c73009..ee2da185c3519e555d5ec706e65421158cada5b6 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeGraph.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeGraph.h >@@ -44,20 +44,20 @@ public: > typedef WTF::IndexedContainerIterator<BytecodeGraph> iterator; > > template <typename CodeBlockType> >- inline BytecodeGraph(CodeBlockType*, typename CodeBlockType::UnpackedInstructions&); >+ inline BytecodeGraph(CodeBlockType*, const InstructionStream&); > > WTF::IteratorRange<BasicBlocksVector::reverse_iterator> basicBlocksInReverseOrder() > { > return WTF::makeIteratorRange(m_basicBlocks.rbegin(), m_basicBlocks.rend()); > } > >- static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsigned bytecodeOffset) >+ static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, InstructionStream::Offset bytecodeOffset) > { > unsigned leaderOffset = block->leaderOffset(); > return bytecodeOffset >= leaderOffset && bytecodeOffset < leaderOffset + block->totalLength(); > } > >- BytecodeBasicBlock* findBasicBlockForBytecodeOffset(unsigned bytecodeOffset) >+ BytecodeBasicBlock* findBasicBlockForBytecodeOffset(InstructionStream::Offset bytecodeOffset) > { > /* > for (unsigned i = 0; i < m_basicBlocks.size(); i++) { >@@ -85,7 +85,7 @@ public: > return basicBlock[1].get(); > } > >- BytecodeBasicBlock* findBasicBlockWithLeaderOffset(unsigned leaderOffset) >+ BytecodeBasicBlock* findBasicBlockWithLeaderOffset(InstructionStream::Offset leaderOffset) > { > return (*tryBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(m_basicBlocks, m_basicBlocks.size(), leaderOffset, [] (std::unique_ptr<BytecodeBasicBlock>* basicBlock) { return (*basicBlock)->leaderOffset(); })).get(); > } >@@ -105,9 +105,9 @@ private: > > > template<typename CodeBlockType> >-BytecodeGraph::BytecodeGraph(CodeBlockType* codeBlock, typename CodeBlockType::UnpackedInstructions& instructions) >+BytecodeGraph::BytecodeGraph(CodeBlockType* codeBlock, const InstructionStream& instructions) > { >- BytecodeBasicBlock::compute(codeBlock, instructions.begin(), instructions.size(), m_basicBlocks); >+ BytecodeBasicBlock::compute(codeBlock, instructions, m_basicBlocks); > ASSERT(m_basicBlocks.size()); > } > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeKills.h b/Source/JavaScriptCore/bytecode/BytecodeKills.h >index feab026863afccd9c0156aee017f514336c37fa2..491f47e4bc2d2468a8034489df6e5e192b0bb7f1 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeKills.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeKills.h >@@ -40,36 +40,37 @@ public: > { > } > >+ // TODO: is this dead? > // By convention, we say that non-local operands are never killed. >- bool operandIsKilled(unsigned bytecodeIndex, int operand) const >- { >- ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); >- VirtualRegister reg(operand); >- if (reg.isLocal()) >- return m_killSets[bytecodeIndex].contains(operand); >- return false; >- } >+ //bool operandIsKilled(unsigned bytecodeIndex, int operand) const >+ //{ >+ //ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); >+ //VirtualRegister reg(operand); >+ //if (reg.isLocal()) >+ //return m_killSets[bytecodeIndex].contains(operand); >+ //return false; >+ //} > >- bool operandIsKilled(Instruction* instruction, int operand) const >- { >- return operandIsKilled(m_codeBlock->bytecodeOffset(instruction), operand); >- } >+ //bool operandIsKilled(Instruction* instruction, int operand) const >+ //{ >+ //return operandIsKilled(m_codeBlock->bytecodeOffset(instruction), operand); >+ //} > >- template<typename Functor> >- void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const >- { >- ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); >- m_killSets[bytecodeIndex].forEachLocal( >- [&] (unsigned local) { >- functor(virtualRegisterForLocal(local)); >- }); >- } >+ //template<typename Functor> >+ //void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const >+ //{ >+ //ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size()); >+ //m_killSets[bytecodeIndex].forEachLocal( >+ //[&] (unsigned local) { >+ //functor(virtualRegisterForLocal(local)); >+ //}); >+ //} > >- template<typename Functor> >- void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const >- { >- forEachOperandKilledAt(m_codeBlock->bytecodeOffset(pc), functor); >- } >+ //template<typename Functor> >+ //void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const >+ //{ >+ //forEachOperandKilledAt(m_codeBlock->bytecodeOffset(pc), functor); >+ //} > > private: > friend class BytecodeLivenessAnalysis; >diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.json b/Source/JavaScriptCore/bytecode/BytecodeList.json >deleted file mode 100644 >index f5bdc49a7a671b8de9cb348b7ebba936748b2f42..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/bytecode/BytecodeList.json >+++ /dev/null >@@ -1,236 +0,0 @@ >-[ >- { >- "section" : "Bytecodes", "emitInHFile" : true, "emitInStructsFile" : true, "emitInASMFile" : true, >- "emitOpcodeIDStringValuesInHFile" : true, "macroNameComponent" : "BYTECODE", "asmPrefix" : "llint_", >- "bytecodes" : [ >- { "name" : "op_enter", "length" : 1 }, >- { "name" : "op_get_scope", "length" : 2 }, >- { "name" : "op_create_direct_arguments", "length" : 2 }, >- { "name" : "op_create_scoped_arguments", "length" : 3 }, >- { "name" : "op_create_cloned_arguments", "length" : 2 }, >- { "name" : "op_create_this", "offsets" : >- [{"dst" : "int"}, >- {"callee" : "int"}, >- {"inlineCapacity" : "int"}, >- {"cachedCallee" : "WriteBarrier<JSCell>"}]}, >- { "name" : "op_get_argument", "length" : 4 }, >- { "name" : "op_argument_count", "length" : 2 }, >- { "name" : "op_to_this", "length" : 5 }, >- { "name" : "op_check_tdz", "length" : 2 }, >- { "name" : "op_new_object", "length" : 4 }, >- { "name" : "op_new_array", "length" : 5 }, >- { "name" : "op_new_array_with_size", "length" : 4 }, >- { "name" : "op_new_array_buffer", "offsets" : >- [{"dst" : "int"}, >- {"immutableButterfly" : "int"}, >- {"profile" : "ArrayAllocationProfile*"}]}, >- { "name" : "op_new_array_with_spread", "length" : 5 }, >- { "name" : "op_spread", "length" : 3 }, >- { "name" : "op_new_regexp", "length" : 3 }, >- { "name" : "op_mov", "length" : 3 }, >- { "name" : "op_not", "length" : 3 }, >- { "name" : "op_eq", "length" : 4 }, >- { "name" : "op_eq_null", "length" : 3 }, >- { "name" : "op_neq", "length" : 4 }, >- { "name" : "op_neq_null", "length" : 3 }, >- { "name" : "op_stricteq", "length" : 4 }, >- { "name" : "op_nstricteq", "length" : 4 }, >- { "name" : "op_less", "length" : 4 }, >- { "name" : "op_lesseq", "length" : 4 }, >- { "name" : "op_greater", "length" : 4 }, >- { "name" : "op_greatereq", "length" : 4 }, >- { "name" : "op_below", "length" : 4 }, >- { "name" : "op_beloweq", "length" : 4 }, >- { "name" : "op_inc", "length" : 2 }, >- { "name" : "op_dec", "length" : 2 }, >- { "name" : "op_to_number", "length" : 4 }, >- { "name" : "op_to_string", "length" : 3 }, >- { "name" : "op_to_object", "length" : 5 }, >- { "name" : "op_negate", "length" : 4 }, >- { "name" : "op_add", "length" : 5 }, >- { "name" : "op_mul", "length" : 5 }, >- { "name" : "op_div", "length" : 5 }, >- { "name" : "op_mod", "length" : 4 }, >- { "name" : "op_sub", "length" : 5 }, >- { "name" : "op_pow", "length" : 4 }, >- { "name" : "op_lshift", "length" : 4 }, >- { "name" : "op_rshift", "length" : 4 }, >- { "name" : "op_urshift", "length" : 4 }, >- { "name" : "op_unsigned", "length" : 3 }, >- { "name" : "op_bitand", "length" : 5 }, >- { "name" : "op_bitxor", "length" : 5 }, >- { "name" : "op_bitor", "length" : 5 }, >- { "name" : "op_identity_with_profile", "length" : 4 }, >- { "name" : "op_overrides_has_instance", "offsets" : >- [{"dst" : "int"}, >- {"constructor" : "int"}, >- {"hasInstanceValue" : "int"}] }, >- { "name" : "op_instanceof", "offsets" : >- [{"dst" : "int"}, >- {"value" : "int"}, >- {"prototype" : "int"}] }, >- { "name" : "op_instanceof_custom", "offsets" : >- [{"dst" : "int"}, >- {"value" : "int"}, >- {"constructor" : "int"}, >- {"hasInstanceValue" : "int"}] }, >- { "name" : "op_typeof", "length" : 3 }, >- { "name" : "op_is_empty", "length" : 3 }, >- { "name" : "op_is_undefined", "length" : 3 }, >- { "name" : "op_is_boolean", "length" : 3 }, >- { "name" : "op_is_number", "length" : 3 }, >- { "name" : "op_is_object", "length" : 3 }, >- { "name" : "op_is_object_or_null", "length" : 3 }, >- { "name" : "op_is_function", "length" : 3 }, >- { "name" : "op_is_cell_with_type", "length" : 4 }, >- { "name" : "op_in_by_val", "length" : 5 }, >- { "name" : "op_in_by_id", "length" : 4 }, >- { "name" : "op_get_array_length", "length" : 9 }, >- { "name" : "op_get_by_id", "length" : 9 }, >- { "name" : "op_get_by_id_proto_load", "length" : 9 }, >- { "name" : "op_get_by_id_unset", "length" : 9 }, >- { "name" : "op_get_by_id_with_this", "length" : 6 }, >- { "name" : "op_get_by_val_with_this", "length" : 6 }, >- { "name" : "op_get_by_id_direct", "length" : 7 }, >- { "name" : "op_try_get_by_id", "length" : 5 }, >- { "name" : "op_put_by_id", "length" : 9 }, >- { "name" : "op_put_by_id_with_this", "length" : 5 }, >- { "name" : "op_del_by_id", "length" : 4 }, >- { "name" : "op_get_by_val", "length" : 6 }, >- { "name" : "op_put_by_val", "length" : 5 }, >- { "name" : "op_put_by_val_with_this", "length" : 5 }, >- { "name" : "op_put_by_val_direct", "length" : 5 }, >- { "name" : "op_del_by_val", "length" : 4 }, >- { "name" : "op_put_getter_by_id", "length" : 5 }, >- { "name" : "op_put_setter_by_id", "length" : 5 }, >- { "name" : "op_put_getter_setter_by_id", "length" : 6 }, >- { "name" : "op_put_getter_by_val", "length" : 5 }, >- { "name" : "op_put_setter_by_val", "length" : 5 }, >- { "name" : "op_define_data_property", "length" : 5 }, >- { "name" : "op_define_accessor_property", "length" : 6 }, >- { "name" : "op_jmp", "length" : 2 }, >- { "name" : "op_jtrue", "length" : 3 }, >- { "name" : "op_jfalse", "length" : 3 }, >- { "name" : "op_jeq_null", "length" : 3 }, >- { "name" : "op_jneq_null", "length" : 3 }, >- { "name" : "op_jneq_ptr", "length" : 5 }, >- { "name" : "op_jeq", "length" : 4 }, >- { "name" : "op_jstricteq", "length" : 4 }, >- { "name" : "op_jneq", "length" : 4 }, >- { "name" : "op_jnstricteq", "length" : 4 }, >- { "name" : "op_jless", "length" : 4 }, >- { "name" : "op_jlesseq", "length" : 4 }, >- { "name" : "op_jgreater", "length" : 4 }, >- { "name" : "op_jgreatereq", "length" : 4 }, >- { "name" : "op_jnless", "length" : 4 }, >- { "name" : "op_jnlesseq", "length" : 4 }, >- { "name" : "op_jngreater", "length" : 4 }, >- { "name" : "op_jngreatereq", "length" : 4 }, >- { "name" : "op_jbelow", "length" : 4 }, >- { "name" : "op_jbeloweq", "length" : 4 }, >- { "name" : "op_loop_hint", "length" : 1 }, >- { "name" : "op_switch_imm", "length" : 4 }, >- { "name" : "op_switch_char", "length" : 4 }, >- { "name" : "op_switch_string", "length" : 4 }, >- { "name" : "op_new_func", "length" : 4 }, >- { "name" : "op_new_func_exp", "length" : 4 }, >- { "name" : "op_new_generator_func", "length" : 4 }, >- { "name" : "op_new_generator_func_exp", "length" : 4 }, >- { "name" : "op_new_async_func", "length" : 4 }, >- { "name" : "op_new_async_func_exp", "length" : 4 }, >- { "name" : "op_new_async_generator_func", "length" : 4 }, >- { "name" : "op_new_async_generator_func_exp", "length" : 4 }, >- { "name" : "op_set_function_name", "length" : 3 }, >- { "name" : "op_call", "length" : 9 }, >- { "name" : "op_tail_call", "length" : 9 }, >- { "name" : "op_call_eval", "length" : 9 }, >- { "name" : "op_call_varargs", "length" : 9 }, >- { "name" : "op_tail_call_varargs", "length" : 9 }, >- { "name" : "op_tail_call_forward_arguments", "length" : 9 }, >- { "name" : "op_ret", "length" : 2 }, >- { "name" : "op_construct", "length" : 9 }, >- { "name" : "op_construct_varargs", "length" : 9 }, >- { "name" : "op_strcat", "length" : 4 }, >- { "name" : "op_to_primitive", "length" : 3 }, >- { "name" : "op_resolve_scope", "length" : 7 }, >- { "name" : "op_get_from_scope", "length" : 8 }, >- { "name" : "op_put_to_scope", "length" : 7 }, >- { "name" : "op_get_from_arguments", "length" : 5 }, >- { "name" : "op_put_to_arguments", "length" : 4 }, >- { "name" : "op_push_with_scope", "length" : 4 }, >- { "name" : "op_create_lexical_environment", "length" : 5 }, >- { "name" : "op_get_parent_scope", "length" : 3 }, >- { "name" : "op_catch", "length" : 4 }, >- { "name" : "op_throw", "length" : 2 }, >- { "name" : "op_throw_static_error", "length" : 3 }, >- { "name" : "op_debug", "length" : 3 }, >- { "name" : "op_end", "length" : 2 }, >- { "name" : "op_profile_type", "length" : 6 }, >- { "name" : "op_profile_control_flow", "length" : 2 }, >- { "name" : "op_get_enumerable_length", "length" : 3 }, >- { "name" : "op_has_indexed_property", "length" : 5 }, >- { "name" : "op_has_structure_property", "length" : 5 }, >- { "name" : "op_has_generic_property", "length" : 4 }, >- { "name" : "op_get_direct_pname", "length" : 7 }, >- { "name" : "op_get_property_enumerator", "length" : 3 }, >- { "name" : "op_enumerator_structure_pname", "length" : 4 }, >- { "name" : "op_enumerator_generic_pname", "length" : 4 }, >- { "name" : "op_to_index_string", "length" : 3 }, >- { "name" : "op_unreachable", "length" : 1 }, >- { "name" : "op_create_rest", "length": 4 }, >- { "name" : "op_get_rest_length", "length": 3 }, >- { "name" : "op_yield", "length" : 4 }, >- { "name" : "op_check_traps", "length" : 1 }, >- { "name" : "op_log_shadow_chicken_prologue", "length" : 2}, >- { "name" : "op_log_shadow_chicken_tail", "length" : 3}, >- { "name" : "op_resolve_scope_for_hoisting_func_decl_in_eval", "length" : 4 }, >- { "name" : "op_nop", "length" : 1 }, >- { "name" : "op_super_sampler_begin", "length" : 1 }, >- { "name" : "op_super_sampler_end", "length" : 1 } >- ] >- }, >- { >- "section" : "CLoopHelpers", "emitInHFile" : true, "emitInStructsFile" : false, "emitInASMFile" : false, >- "emitOpcodeIDStringValuesInHFile" : false, "defaultLength" : 1, "macroNameComponent" : "CLOOP_BYTECODE_HELPER", >- "bytecodes" : [ >- { "name" : "llint_entry" }, >- { "name" : "getHostCallReturnValue" }, >- { "name" : "llint_return_to_host" }, >- { "name" : "llint_vm_entry_to_javascript" }, >- { "name" : "llint_vm_entry_to_native" }, >- { "name" : "llint_cloop_did_return_from_js_1" }, >- { "name" : "llint_cloop_did_return_from_js_2" }, >- { "name" : "llint_cloop_did_return_from_js_3" }, >- { "name" : "llint_cloop_did_return_from_js_4" }, >- { "name" : "llint_cloop_did_return_from_js_5" }, >- { "name" : "llint_cloop_did_return_from_js_6" }, >- { "name" : "llint_cloop_did_return_from_js_7" }, >- { "name" : "llint_cloop_did_return_from_js_8" }, >- { "name" : "llint_cloop_did_return_from_js_9" }, >- { "name" : "llint_cloop_did_return_from_js_10" }, >- { "name" : "llint_cloop_did_return_from_js_11" }, >- { "name" : "llint_cloop_did_return_from_js_12" } >- ] >- }, >- { >- "section" : "NativeHelpers", "emitInHFile" : true, "emitInStructsFile" : false, "emitInASMFile" : true, >- "emitOpcodeIDStringValuesInHFile" : false, "defaultLength" : 1, "macroNameComponent" : "BYTECODE_HELPER", >- "bytecodes" : [ >- { "name" : "llint_program_prologue" }, >- { "name" : "llint_eval_prologue" }, >- { "name" : "llint_module_program_prologue" }, >- { "name" : "llint_function_for_call_prologue" }, >- { "name" : "llint_function_for_construct_prologue" }, >- { "name" : "llint_function_for_call_arity_check" }, >- { "name" : "llint_function_for_construct_arity_check" }, >- { "name" : "llint_generic_return_point" }, >- { "name" : "llint_throw_from_slow_path_trampoline" }, >- { "name" : "llint_throw_during_call_trampoline" }, >- { "name" : "llint_native_call_trampoline" }, >- { "name" : "llint_native_construct_trampoline" }, >- { "name" : "llint_internal_function_call_trampoline" }, >- { "name" : "llint_internal_function_construct_trampoline" }, >- { "name" : "handleUncaughtException" } >- ] >- } >-] >diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.rb b/Source/JavaScriptCore/bytecode/BytecodeList.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..37ff2f0eeddd192d315770c94a263534ce2f93da >--- /dev/null >+++ b/Source/JavaScriptCore/bytecode/BytecodeList.rb >@@ -0,0 +1,1112 @@ >+types [ >+ :VirtualRegister, >+ >+ :BasicBlockLocation, >+ :DebugHookType, >+ :ErrorType, >+ :GetByIdMode, >+ :GetByIdModeMetadata, >+ :GetPutInfo, >+ :IndexingType, >+ :JSCell, >+ :JSGlobalLexicalEnvironment, >+ :JSGlobalObject, >+ :JSModuleEnvironment, >+ :JSObject, >+ :JSScope, >+ :JSType, >+ :JSValue, >+ :LLIntCallLinkInfo, >+ :OperandTypes, >+ :ProfileTypeBytecodeFlag, >+ :PropertyOffset, >+ :PutByIdFlags, >+ :ResolveType, >+ :ScopeOffset, >+ :Structure, >+ :StructureID, >+ :StructureChain, >+ :SymbolTable, >+ :ToThisStatus, >+ :TypeLocation, >+ :WatchpointSet, >+ >+ :ValueProfile, >+ :ValueProfileAndOperandBuffer, >+ :ArithProfile, >+ :ArrayProfile, >+ :ArrayAllocationProfile, >+ :ObjectAllocationProfile, >+] >+ >+namespace :Special do >+ types [ :Pointer ] >+end >+ >+templates [ >+ :WriteBarrier, >+ :WriteBarrierBase, >+] >+ >+begin_section :Bytecodes, >+ emit_in_h_file: true, >+ emit_in_structs_file: true, >+ emit_in_asm_file: true, >+ emit_opcode_id_string_values_in_h_file: true, >+ macro_name_component: :BYTECODE, >+ asm_prefix: "llint_", >+ op_prefix: "op_" >+ >+op :wide >+ >+op :enter >+ >+op :get_scope, >+ args: { >+ dst: VirtualRegister >+ } >+ >+op :create_direct_arguments, >+ args: { >+ dst: VirtualRegister, >+ } >+ >+op :create_scoped_arguments, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ } >+ >+op :create_cloned_arguments, >+ args: { >+ dst: VirtualRegister, >+ } >+ >+op :create_this, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ inlineCapacity: unsigned, >+ }, >+ metadata: { >+ cachedCallee: WriteBarrier[JSCell] >+ } >+ >+op :get_argument, >+ args: { >+ dst: VirtualRegister, >+ index: int, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :argument_count, >+ args: { >+ dst: VirtualRegister, >+ } >+ >+op :to_this, >+ args: { >+ srcDst: VirtualRegister, >+ }, >+ metadata: { >+ cachedStructure: WriteBarrierBase[Structure], >+ toThisStatus: ToThisStatus, >+ profile: ValueProfile, >+ } >+ >+op :check_tdz, >+ args: { >+ target: VirtualRegister, >+ } >+ >+op :new_object, >+ args: { >+ dst: VirtualRegister, >+ inlineCapacity: unsigned, >+ }, >+ metadata: { >+ allocationProfile: ObjectAllocationProfile, >+ } >+ >+op :new_array, >+ args: { >+ dst: VirtualRegister, >+ argv: VirtualRegister, >+ argc: unsigned, >+ recommendedIndexingType: IndexingType, >+ }, >+ metadata: { >+ allocationProfile: ArrayAllocationProfile, >+ }, >+ metadata_initializers: { >+ allocationProfile: :recommendedIndexingType, >+ } >+ >+op :new_array_with_size, >+ args: { >+ dst: VirtualRegister, >+ length: VirtualRegister, >+ }, >+ metadata: { >+ allocationProfile: ArrayAllocationProfile, >+ } >+ >+op :new_array_buffer, >+ args: { >+ dst: VirtualRegister, >+ immutableButterfly: VirtualRegister, >+ recommendedIndexingType: IndexingType >+ }, >+ metadata: { >+ allocationProfile: ArrayAllocationProfile, >+ }, >+ metadata_initializers: { >+ allocationProfile: :recommendedIndexingType, >+ } >+ >+op :new_array_with_spread, >+ args: { >+ dst: VirtualRegister, >+ argv: VirtualRegister, >+ argc: unsigned, >+ bitVector: unsigned, >+ } >+ >+op :spread, >+ args: { >+ dst: VirtualRegister, >+ argument: VirtualRegister, >+ } >+ >+op :new_regexp, >+ args: { >+ dst: VirtualRegister, >+ regexp: VirtualRegister, >+ } >+ >+op :mov, >+ args: { >+ dst: VirtualRegister, >+ src: VirtualRegister, >+ } >+ >+op_group :BinaryOp, >+ [ >+ :eq, >+ :neq, >+ :stricteq, >+ :nstricteq, >+ :less, >+ :lesseq, >+ :greater, >+ :greatereq, >+ :below, >+ :beloweq, >+ :mod, >+ :pow, >+ :lshift, >+ :rshift, >+ :urshift, >+ ], >+ args: { >+ dst: VirtualRegister, >+ lhs: VirtualRegister, >+ rhs: VirtualRegister, >+ } >+ >+op_group :ProfiledBinaryOp, >+ [ >+ :add, >+ :mul, >+ :div, >+ :sub, >+ :bitand, >+ :bitxor, >+ :bitor, >+ ], >+ args: { >+ dst: VirtualRegister, >+ lhs: VirtualRegister, >+ rhs: VirtualRegister, >+ operandTypes: OperandTypes, >+ }, >+ metadata: { >+ arithProfile: ArithProfile >+ }, >+ metadata_initializers: { >+ arithProfile: :operandTypes >+ } >+ >+op_group :UnaryOp, >+ [ >+ :eq_null, >+ :neq_null, >+ :to_string, >+ :unsigned, >+ :is_empty, >+ :is_undefined, >+ :is_boolean, >+ :is_number, >+ :is_object, >+ :is_object_or_null, >+ :is_function, >+ ], >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ } >+ >+op :inc, >+ args: { >+ srcDst: VirtualRegister, >+ } >+ >+op :dec, >+ args: { >+ srcDst: VirtualRegister, >+ } >+ >+op :to_object, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ message: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :to_number, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :negate, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ operandTypes: OperandTypes, >+ }, >+ metadata: { >+ arithProfile: ArithProfile, >+ }, >+ metadata_initializers: { >+ arithProfile: :operandTypes >+ } >+ >+op :not, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ } >+ >+ >+op :identity_with_profile, >+ args: { >+ srcDst: VirtualRegister, >+ topProfile: unsigned, >+ bottomProfile: unsigned, >+ } >+ >+op :overrides_has_instance, >+ args: { >+ dst: VirtualRegister, >+ constructor: VirtualRegister, >+ hasInstanceValue: VirtualRegister, >+ } >+ >+op :instanceof, >+ args: { >+ dst: VirtualRegister, >+ value: VirtualRegister, >+ prototype: VirtualRegister, >+ } >+ >+op :instanceof_custom, >+ args: { >+ dst: VirtualRegister, >+ value: VirtualRegister, >+ constructor: VirtualRegister, >+ hasInstanceValue: VirtualRegister, >+ } >+ >+op :typeof, >+ args: { >+ dst: VirtualRegister, >+ value: VirtualRegister, >+ } >+ >+op :is_cell_with_type, >+ args: { >+ dst: VirtualRegister, >+ operand: VirtualRegister, >+ type: JSType, >+ } >+ >+op :in_by_val, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :in_by_id, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ } >+ >+op :get_by_id, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ mode: GetByIdMode, >+ hitCountForLLIntCaching: unsigned, >+ structure: StructureID, >+ modeMetadata: GetByIdModeMetadata, >+ profile: ValueProfile, >+ } >+ >+op :get_by_id_with_this, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ thisValue: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :get_by_val_with_this, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ thisValue: VirtualRegister, >+ property: VirtualRegister, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :get_by_id_direct, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, # not used in llint >+ structure: StructureID, >+ offset: unsigned, >+ } >+ >+op :try_get_by_id, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :put_by_id, >+ args: { >+ base: VirtualRegister, >+ property: unsigned, >+ value: VirtualRegister, >+ flags: PutByIdFlags, >+ }, >+ metadata: { >+ oldStructure: StructureID, >+ offset: unsigned, >+ newStructure: StructureID, >+ structureChain: WriteBarrierBase[StructureChain], >+ flags: PutByIdFlags, >+ } >+ >+op :put_by_id_with_this, >+ args: { >+ base: VirtualRegister, >+ thisValue: VirtualRegister, >+ property: unsigned, >+ value: VirtualRegister, >+ } >+ >+op :del_by_id, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: unsigned, >+ } >+ >+op :get_by_val, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ arrayProfile: ArrayProfile, >+ } >+ >+op :put_by_val, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ value: VirtualRegister, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :put_by_val_with_this, >+ args: { >+ base: VirtualRegister, >+ thisValue: VirtualRegister, >+ property: VirtualRegister, >+ value: VirtualRegister, >+ } >+ >+op :put_by_val_direct, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ value: VirtualRegister, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :del_by_val, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ } >+ >+op :put_getter_by_id, >+ args: { >+ base: VirtualRegister, >+ property: unsigned, >+ attributes: unsigned, >+ accessor: VirtualRegister, >+ } >+ >+op :put_setter_by_id, >+ args: { >+ base: VirtualRegister, >+ property: unsigned, >+ attributes: unsigned, >+ accessor: VirtualRegister, >+ } >+ >+op :put_getter_setter_by_id, >+ args: { >+ base: VirtualRegister, >+ property: unsigned, >+ attributes: unsigned, >+ getter: VirtualRegister, >+ setter: VirtualRegister, >+ } >+ >+op :put_getter_by_val, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ attributes: unsigned, >+ accessor: VirtualRegister, >+ } >+ >+op :put_setter_by_val, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ attributes: unsigned, >+ accessor: VirtualRegister, >+ } >+ >+op :define_data_property, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ value: VirtualRegister, >+ attributes: VirtualRegister, >+ } >+ >+op :define_accessor_property, >+ args: { >+ base: VirtualRegister, >+ property: VirtualRegister, >+ getter: VirtualRegister, >+ setter: VirtualRegister, >+ attributes: VirtualRegister, >+ } >+ >+op :jmp, >+ args: { >+ target: int, >+ } >+ >+op :jtrue, >+ args: { >+ condition: VirtualRegister, >+ target: int, >+ } >+ >+op :jfalse, >+ args: { >+ condition: VirtualRegister, >+ target: int, >+ } >+ >+op :jeq_null, >+ args: { >+ condition: VirtualRegister, >+ target: int, >+ } >+ >+op :jneq_null, >+ args: { >+ condition: VirtualRegister, >+ target: int, >+ } >+ >+op :jneq_ptr, >+ args: { >+ condition: VirtualRegister, >+ specialPointer: Special::Pointer, >+ target: int, >+ }, >+ metadata: { >+ hasJumped: bool, >+ } >+ >+op_group :BinaryJmp, >+ [ >+ :jeq, >+ :jstricteq, >+ :jneq, >+ :jnstricteq, >+ :jless, >+ :jlesseq, >+ :jgreater, >+ :jgreatereq, >+ :jnless, >+ :jnlesseq, >+ :jngreater, >+ :jngreatereq, >+ :jbelow, >+ :jbeloweq, >+ ], >+ args: { >+ lhs: VirtualRegister, >+ rhs: VirtualRegister, >+ target: int, >+ } >+ >+op :loop_hint >+ >+op_group :SwitchValue, >+ [ >+ :switch_imm, >+ :switch_char, >+ :switch_string, >+ ], >+ args: { >+ tableIndex: int, >+ defaultOffset: int, >+ scrutinee: VirtualRegister, >+ } >+ >+op_group :NewFunction, >+ [ >+ :new_func, >+ :new_func_exp, >+ :new_generator_func, >+ :new_generator_func_exp, >+ :new_async_func, >+ :new_async_func_exp, >+ :new_async_generator_func, >+ :new_async_generator_func_exp, >+ ], >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ functionDecl: int, >+ } >+ >+op :set_function_name, >+ args: { >+ function: VirtualRegister, >+ name: VirtualRegister, >+ } >+ >+# op_call variations >+op :call, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ argc: unsigned, >+ argv: unsigned, >+ }, >+ metadata: { >+ callLinkInfo: LLIntCallLinkInfo, >+ # ? there was an extra slot here >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :tail_call, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ argc: unsigned, >+ argv: unsigned, >+ }, >+ metadata: { >+ callLinkInfo: LLIntCallLinkInfo, >+ # ? there was an extra slot here >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :call_eval, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ argc: unsigned, >+ argv: unsigned, >+ }, >+ metadata: { >+ callLinkInfo: LLIntCallLinkInfo, >+ # ? there was an extra slot here >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :call_varargs, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ thisValue?: VirtualRegister, >+ arguments?: VirtualRegister, >+ firstFree: VirtualRegister, >+ firstVarArg: int, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :tail_call_varargs, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ thisValue?: VirtualRegister, >+ arguments?: VirtualRegister, >+ firstFree: VirtualRegister, >+ firstVarArg: int, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :tail_call_forward_arguments, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ thisValue?: VirtualRegister, >+ arguments?: VirtualRegister, >+ firstFree: VirtualRegister, >+ firstVarArg: int, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :construct, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ argc: unsigned, >+ argv: unsigned, >+ }, >+ metadata: { >+ callLinkInfo: LLIntCallLinkInfo, >+ # ? there was an extra slot here >+ # ? empty slot here >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :construct_varargs, >+ args: { >+ dst: VirtualRegister, >+ callee: VirtualRegister, >+ thisValue?: VirtualRegister, >+ arguments?: VirtualRegister, >+ firstFree: VirtualRegister, >+ firstVarArg: int, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ profile: ValueProfile, >+ } >+ >+op :ret, >+ args: { >+ value: VirtualRegister, >+ } >+ >+op :strcat, >+ args: { >+ dst: VirtualRegister, >+ src: VirtualRegister, >+ count: int, >+ } >+ >+op :to_primitive, >+ args: { >+ dst: VirtualRegister, >+ src: VirtualRegister, >+ } >+ >+op :resolve_scope, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ var: unsigned, >+ resolveType: ResolveType, >+ localScopeDepth: unsigned, >+ }, >+ metadata: { >+ resolveType: ResolveType, >+ >+ # TODO: this two should be an union >+ globalObject: JSGlobalObject.*, >+ globalLexicalEnvironment: JSGlobalLexicalEnvironment.*, >+ >+ # not seen yet >+ localScopeDepth: unsigned, >+ symbolTable: WriteBarrierBase[SymbolTable], >+ scope: WriteBarrierBase[JSScope], >+ moduleEnvironment: WriteBarrierBase[JSModuleEnvironment], >+ }, >+ metadata_initializers: { >+ resolveType: :resolveType, >+ localScopeDepth: :localScopeDepth, >+ } >+ >+op :get_from_scope, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ var: unsigned, >+ getPutInfo: GetPutInfo, >+ localScopeDepth: int, >+ offset: unsigned, # TODO: this should be ScopeOffset >+ }, >+ metadata: { >+ profile: ValueProfile, >+ getPutInfo: GetPutInfo, >+ watchpointSet: WatchpointSet.*, >+ structure: WriteBarrierBase[Structure], >+ operand: uintptr_t, >+ }, >+ metadata_initializers: { >+ getPutInfo: :getPutInfo, >+ operand: :offset, >+ } >+ >+op :put_to_scope, >+ args: { >+ scope: VirtualRegister, >+ var: unsigned, >+ value: VirtualRegister, >+ getPutInfo: GetPutInfo, >+ localScopeDepth: int, >+ offset: unsigned, #TODO: this should be ScopeOffset >+ }, >+ metadata: { >+ getPutInfo: GetPutInfo, >+ structure: WriteBarrierBase[Structure], >+ watchpointSet: WatchpointSet.*, >+ operand: uintptr_t, >+ }, >+ metadata_initializers: { >+ getPutInfo: :getPutInfo, >+ operand: :offset, >+ } >+ >+op :get_from_arguments, >+ args: { >+ dst: VirtualRegister, >+ arguments: VirtualRegister, >+ index: unsigned, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :put_to_arguments, >+ args: { >+ arguments: VirtualRegister, >+ index: unsigned, >+ value: VirtualRegister, >+ } >+ >+op :push_with_scope, >+ args: { >+ dst: VirtualRegister, >+ currentScope: VirtualRegister, >+ newScope: VirtualRegister, >+ } >+ >+op :create_lexical_environment, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ symbolTable: VirtualRegister, >+ initialValue: VirtualRegister, >+ } >+ >+op :get_parent_scope, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ } >+ >+op :catch, >+ args: { >+ exception: VirtualRegister, >+ thrownValue: VirtualRegister, >+ }, >+ metadata: { >+ buffer: ValueProfileAndOperandBuffer.*, >+ } >+ >+op :throw, >+ args: { >+ value: VirtualRegister, >+ } >+ >+op :throw_static_error, >+ args: { >+ message: VirtualRegister, >+ errorType: ErrorType, >+ } >+ >+op :debug, >+ args: { >+ debugHookType: DebugHookType, >+ hasBreakpoint: bool, >+ } >+ >+op :end, >+ args: { >+ value: VirtualRegister, >+ } >+ >+op :profile_type, >+ args: { >+ target: VirtualRegister, >+ symbolTableOrScopeDepth: int, >+ flag: ProfileTypeBytecodeFlag, >+ identifier?: unsigned, >+ resolveType: ResolveType, >+ }, >+ metadata: { >+ typeLocation: TypeLocation.*, >+ } >+ >+op :profile_control_flow, >+ args: { >+ textOffset: int, >+ }, >+ metadata: { >+ basicBlockLocation: BasicBlockLocation.*, >+ } >+ >+op :get_enumerable_length, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ } >+ >+op :has_indexed_property, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ }, >+ metadata: { >+ arrayProfile: ArrayProfile, >+ } >+ >+op :has_structure_property, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ enumerator: VirtualRegister, >+ } >+ >+op :has_generic_property, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ } >+ >+op :get_direct_pname, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ property: VirtualRegister, >+ index: VirtualRegister, >+ enumerator: VirtualRegister, >+ }, >+ metadata: { >+ profile: ValueProfile, >+ } >+ >+op :get_property_enumerator, >+ args: { >+ dst: VirtualRegister, >+ base: VirtualRegister, >+ } >+ >+op :enumerator_structure_pname, >+ args: { >+ dst: VirtualRegister, >+ enumerator: VirtualRegister, >+ index: VirtualRegister, >+ } >+ >+op :enumerator_generic_pname, >+ args: { >+ dst: VirtualRegister, >+ enumerator: VirtualRegister, >+ index: VirtualRegister, >+ } >+ >+op :to_index_string, >+ args: { >+ dst: VirtualRegister, >+ index: VirtualRegister, >+ } >+ >+op :unreachable >+ >+op :create_rest, >+ args: { >+ dst: VirtualRegister, >+ arraySize: VirtualRegister, >+ numParametersToSkip: unsigned, >+ } >+ >+op :get_rest_length, >+ args: { >+ dst: VirtualRegister, >+ numParametersToSkip: unsigned, >+ } >+ >+op :yield, >+ args: { >+ generator: VirtualRegister, >+ yieldPoint: unsigned, >+ argument: VirtualRegister, >+ } >+ >+op :check_traps >+ >+op :log_shadow_chicken_prologue, >+ args: { >+ scope: VirtualRegister, >+ } >+ >+op :log_shadow_chicken_tail, >+ args: { >+ thisValue: VirtualRegister, >+ scope: VirtualRegister, >+ } >+ >+op :resolve_scope_for_hoisting_func_decl_in_eval, >+ args: { >+ dst: VirtualRegister, >+ scope: VirtualRegister, >+ property: unsigned, >+ } >+ >+op :nop >+ >+op :super_sampler_begin >+ >+op :super_sampler_end >+ >+end_section :Bytecodes >+ >+begin_section :CLoopHelpers, >+ emit_in_h_file: true, >+ macro_name_component: :CLOOP_BYTECODE_HELPER >+ >+op :llint_entry >+op :getHostCallReturnValue >+op :llint_return_to_host >+op :llint_vm_entry_to_javascript >+op :llint_vm_entry_to_native >+op :llint_cloop_did_return_from_js_1 >+op :llint_cloop_did_return_from_js_2 >+op :llint_cloop_did_return_from_js_3 >+op :llint_cloop_did_return_from_js_4 >+op :llint_cloop_did_return_from_js_5 >+op :llint_cloop_did_return_from_js_6 >+op :llint_cloop_did_return_from_js_7 >+op :llint_cloop_did_return_from_js_8 >+op :llint_cloop_did_return_from_js_9 >+op :llint_cloop_did_return_from_js_10 >+op :llint_cloop_did_return_from_js_11 >+op :llint_cloop_did_return_from_js_12 >+ >+end_section :CLoopHelpers >+ >+begin_section :NativeHelpers, >+ emit_in_h_file: true, >+ emit_in_asm_file: true, >+ macro_name_component: :BYTECODE_HELPER >+ >+op :llint_program_prologue >+op :llint_eval_prologue >+op :llint_module_program_prologue >+op :llint_function_for_call_prologue >+op :llint_function_for_construct_prologue >+op :llint_function_for_call_arity_check >+op :llint_function_for_construct_arity_check >+op :llint_generic_return_point >+op :llint_throw_from_slow_path_trampoline >+op :llint_throw_during_call_trampoline >+op :llint_native_call_trampoline >+op :llint_native_construct_trampoline >+op :llint_internal_function_call_trampoline >+op :llint_internal_function_construct_trampoline >+op :handleUncaughtException >+ >+end_section :NativeHelpers >diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp >index e0169dfb498ee644d610aaf4df90b4845fabae2f..330c7c7a98dee70b1961ced0e1c5c24277b25e8b 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp >@@ -119,7 +119,7 @@ void BytecodeLivenessAnalysis::computeKills(CodeBlock* codeBlock, BytecodeKills& > void BytecodeLivenessAnalysis::dumpResults(CodeBlock* codeBlock) > { > dataLog("\nDumping bytecode liveness for ", *codeBlock, ":\n"); >- Instruction* instructionsBegin = codeBlock->instructions().begin(); >+ const auto& instructions = codeBlock->instructions(); > unsigned i = 0; > > unsigned numberOfBlocks = m_graph.size(); >@@ -167,17 +167,15 @@ void BytecodeLivenessAnalysis::dumpResults(CodeBlock* codeBlock) > continue; > } > for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { >- const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset]; >+ const auto currentInstruction = instructions.at(bytecodeOffset); > > dataLogF("Live variables:"); > FastBitVector liveBefore = getLivenessInfoAtBytecodeOffset(codeBlock, bytecodeOffset); > dumpBitVector(liveBefore); > dataLogF("\n"); >- codeBlock->dumpBytecode(WTF::dataFile(), instructionsBegin, currentInstruction); >+ codeBlock->dumpBytecode(WTF::dataFile(), currentInstruction); > >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); >- unsigned opcodeLength = opcodeLengths[opcodeID]; >- bytecodeOffset += opcodeLength; >+ bytecodeOffset += currentInstruction->size(); > } > > dataLogF("Live variables:"); >diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h >index 64b175625b88258c014a0d0eaae56c7cc3e718a0..7bef4cf1dce2a1a7fc183afeae21dd5914f8f949 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h >@@ -37,9 +37,9 @@ class FullBytecodeLiveness; > > class BytecodeLivenessPropagation { > protected: >- template<typename CodeBlockType, typename Instructions, typename UseFunctor, typename DefFunctor> void stepOverInstruction(CodeBlockType*, const Instructions&, BytecodeGraph&, unsigned bytecodeOffset, const UseFunctor&, const DefFunctor&); >+ template<typename CodeBlockType, typename UseFunctor, typename DefFunctor> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, InstructionStream::Offset bytecodeOffset, const UseFunctor&, const DefFunctor&); > >- template<typename CodeBlockType, typename Instructions> void stepOverInstruction(CodeBlockType*, const Instructions&, BytecodeGraph&, unsigned bytecodeOffset, FastBitVector& out); >+ template<typename CodeBlockType> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, InstructionStream::Offset bytecodeOffset, FastBitVector& out); > > template<typename CodeBlockType, typename Instructions> bool computeLocalLivenessForBytecodeOffset(CodeBlockType*, const Instructions&, BytecodeGraph&, BytecodeBasicBlock*, unsigned targetOffset, FastBitVector& result); > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h >index 15e847fc3c50a2ee82b0600e7ee9d988565ada59..71ff56f7ea68a2f455cf678b3c79a2d7bab9b39d 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h >@@ -51,18 +51,17 @@ inline bool operandIsLive(const FastBitVector& out, int operand) > return operandIsAlwaysLive(operand) || operandThatIsNotAlwaysLiveIsLive(out, operand); > } > >-inline bool isValidRegisterForLiveness(int operand) >+inline bool isValidRegisterForLiveness(VirtualRegister operand) > { >- VirtualRegister virtualReg(operand); >- if (virtualReg.isConstant()) >+ if (operand.isConstant()) > return false; >- return virtualReg.isLocal(); >+ return operand.isLocal(); > } > > // Simplified interface to bytecode use/def, which determines defs first and then uses, and includes > // exception handlers in the uses. >-template<typename CodeBlockType, typename Instructions, typename UseFunctor, typename DefFunctor> >-inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const Instructions& instructions, BytecodeGraph& graph, unsigned bytecodeOffset, const UseFunctor& use, const DefFunctor& def) >+template<typename CodeBlockType, typename UseFunctor, typename DefFunctor> >+inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, InstructionStream::Offset bytecodeOffset, const UseFunctor& use, const DefFunctor& def) > { > // This abstractly execute the instruction in reverse. Instructions logically first use operands and > // then define operands. This logical ordering is necessary for operations that use and def the same >@@ -79,22 +78,21 @@ inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* code > // uses before defs, then the add operation above would appear to not have loc1 live, since we'd > // first add it to the out set (the use), and then we'd remove it (the def). > >- auto* instructionsBegin = instructions.begin(); >- auto* instruction = &instructionsBegin[bytecodeOffset]; >- OpcodeID opcodeID = Interpreter::getOpcodeID(*instruction); >+ auto* instruction = instructions.at(bytecodeOffset).ptr(); >+ OpcodeID opcodeID = instruction->opcodeID(); > > computeDefsForBytecodeOffset( > codeBlock, opcodeID, instruction, >- [&] (CodeBlockType*, const typename CodeBlockType::Instruction*, OpcodeID, int operand) { >+ [&] (VirtualRegister operand) { > if (isValidRegisterForLiveness(operand)) >- def(VirtualRegister(operand).toLocal()); >+ def(operand.toLocal()); > }); > > computeUsesForBytecodeOffset( > codeBlock, opcodeID, instruction, >- [&] (CodeBlockType*, const typename CodeBlockType::Instruction*, OpcodeID, int operand) { >+ [&] (VirtualRegister operand) { > if (isValidRegisterForLiveness(operand)) >- use(VirtualRegister(operand).toLocal()); >+ use(operand.toLocal()); > }); > > // If we have an exception handler, we want the live-in variables of the >@@ -106,8 +104,8 @@ inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* code > } > } > >-template<typename CodeBlockType, typename Instructions> >-inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const Instructions& instructions, BytecodeGraph& graph, unsigned bytecodeOffset, FastBitVector& out) >+template<typename CodeBlockType> >+inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, InstructionStream::Offset bytecodeOffset, FastBitVector& out) > { > stepOverInstruction( > codeBlock, instructions, graph, bytecodeOffset, >diff --git a/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp b/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp >index cb278cf10519eb61f17c69011bfb35701e645c76..40bba2e270a54ea2e96631f35fe01030283de546 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp >@@ -38,13 +38,13 @@ void BytecodeRewriter::applyModification() > for (size_t insertionIndex = m_insertions.size(); insertionIndex--;) { > Insertion& insertion = m_insertions[insertionIndex]; > if (insertion.type == Insertion::Type::Remove) >- m_instructions.remove(insertion.index.bytecodeOffset, insertion.length()); >+ m_writer.m_instructions.remove(insertion.index.bytecodeOffset, insertion.length()); > else { > if (insertion.includeBranch == IncludeBranch::Yes) { > int finalOffset = insertion.index.bytecodeOffset + calculateDifference(m_insertions.begin(), m_insertions.begin() + insertionIndex); > adjustJumpTargetsInFragment(finalOffset, insertion); > } >- m_instructions.insertVector(insertion.index.bytecodeOffset, insertion.instructions); >+ m_writer.m_instructions.insertVector(insertion.index.bytecodeOffset, insertion.instructions.m_instructions); > } > } > m_insertions.clear(); >@@ -56,28 +56,23 @@ void BytecodeRewriter::execute() > return lhs.index < rhs.index; > }); > >- m_codeBlock->applyModification(*this, m_instructions); >+ m_codeBlock->applyModification(*this, m_writer); > } > > void BytecodeRewriter::adjustJumpTargetsInFragment(unsigned finalOffset, Insertion& insertion) > { >- auto& fragment = insertion.instructions; >- UnlinkedInstruction* instructionsBegin = fragment.data(); >- for (unsigned fragmentOffset = 0, fragmentCount = fragment.size(); fragmentOffset < fragmentCount;) { >- UnlinkedInstruction& instruction = fragment[fragmentOffset]; >- OpcodeID opcodeID = instruction.u.opcode; >- if (isBranch(opcodeID)) { >- unsigned bytecodeOffset = finalOffset + fragmentOffset; >- extractStoredJumpTargetsForBytecodeOffset(m_codeBlock, instructionsBegin, fragmentOffset, [&](int32_t& label) { >+ for (auto& instruction : insertion.instructions) { >+ if (isBranch(instruction->opcodeID())) { >+ unsigned bytecodeOffset = finalOffset + instruction.offset(); >+ updateStoredJumpTargetsForInstruction(m_codeBlock, instruction, [&](int32_t label) { > int absoluteOffset = adjustAbsoluteOffset(label); >- label = absoluteOffset - static_cast<int>(bytecodeOffset); >+ return absoluteOffset - static_cast<int>(bytecodeOffset); > }); > } >- fragmentOffset += opcodeLength(opcodeID); > } > } > >-void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, Vector<UnlinkedInstruction>&& fragment) >+void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, InstructionStreamWriter&& writer) > { > ASSERT(insertionPoint.position == Position::Before || insertionPoint.position == Position::After); > m_insertions.append(Insertion { >@@ -85,7 +80,7 @@ void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch i > Insertion::Type::Insert, > includeBranch, > 0, >- WTFMove(fragment) >+ WTFMove(writer) > }); > } > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeRewriter.h b/Source/JavaScriptCore/bytecode/BytecodeRewriter.h >index a4723867ad0ce23692a3e5644ec94d1a4b4c2b54..0d5f352ac94ab2999adf3a9a6b331edd2c324cd6 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeRewriter.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeRewriter.h >@@ -26,6 +26,7 @@ > > #pragma once > >+#include "BytecodeGenerator.h" > #include "BytecodeGraph.h" > #include "Bytecodes.h" > #include "Opcode.h" >@@ -93,10 +94,10 @@ public: > }; > > struct InsertionPoint { >- int bytecodeOffset; >+ InstructionStream::Offset bytecodeOffset; > Position position; > >- InsertionPoint(int offset, Position pos) >+ InsertionPoint(InstructionStream::Offset offset, Position pos) > : bytecodeOffset(offset) > , position(pos) > { >@@ -130,85 +131,86 @@ private: > Type type; > IncludeBranch includeBranch; > size_t removeLength; >- Vector<UnlinkedInstruction> instructions; >+ InstructionStreamWriter instructions; > }; > > public: > class Fragment { > WTF_MAKE_NONCOPYABLE(Fragment); > public: >- Fragment(Vector<UnlinkedInstruction>& fragment, IncludeBranch& includeBranch) >- : m_fragment(fragment) >+ Fragment(BytecodeGenerator& bytecodeGenerator, InstructionStreamWriter& writer, IncludeBranch& includeBranch) >+ : m_bytecodeGenerator(bytecodeGenerator) >+ , m_writer(writer) > , m_includeBranch(includeBranch) > { > } > >- template<class... Args> >- void appendInstruction(OpcodeID opcodeID, Args... args) >+ template<class Op, class... Args> >+ void appendInstruction(Args... args) > { >- if (isBranch(opcodeID)) >+ if (isBranch(Op::opcodeID())) > m_includeBranch = IncludeBranch::Yes; > >- UnlinkedInstruction instructions[sizeof...(args) + 1] = { >- UnlinkedInstruction(opcodeID), >- UnlinkedInstruction(args)... >- }; >- m_fragment.append(instructions, sizeof...(args) + 1); >+ m_bytecodeGenerator.withWriter(m_writer, [&] { >+ Op::emit(&m_bytecodeGenerator, std::forward<Args>(args)...); >+ }); > } > > private: >- Vector<UnlinkedInstruction>& m_fragment; >+ BytecodeGenerator& m_bytecodeGenerator; >+ InstructionStreamWriter& m_writer; > IncludeBranch& m_includeBranch; > }; > >- BytecodeRewriter(BytecodeGraph& graph, UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions) >- : m_graph(graph) >+ BytecodeRewriter(BytecodeGenerator& bytecodeGenerator, BytecodeGraph& graph, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& writer) >+ : m_bytecodeGenerator(bytecodeGenerator) >+ , m_graph(graph) > , m_codeBlock(codeBlock) >- , m_instructions(instructions) >+ , m_writer(writer) > { > } > > template<class Function> >- void insertFragmentBefore(unsigned bytecodeOffset, Function function) >+ void insertFragmentBefore(const InstructionStream::Ref& instruction, Function function) > { > IncludeBranch includeBranch = IncludeBranch::No; >- Vector<UnlinkedInstruction> instructions; >- Fragment fragment(instructions, includeBranch); >+ InstructionStreamWriter writer; >+ Fragment fragment(m_bytecodeGenerator, writer, includeBranch); > function(fragment); >- insertImpl(InsertionPoint(bytecodeOffset, Position::Before), includeBranch, WTFMove(instructions)); >+ insertImpl(InsertionPoint(instruction.offset(), Position::Before), includeBranch, WTFMove(writer)); > } > > template<class Function> >- void insertFragmentAfter(unsigned bytecodeOffset, Function function) >+ void insertFragmentAfter(const InstructionStream::Ref& instruction, Function function) > { > IncludeBranch includeBranch = IncludeBranch::No; >- Vector<UnlinkedInstruction> instructions; >- Fragment fragment(instructions, includeBranch); >+ InstructionStreamWriter writer; >+ Fragment fragment(m_bytecodeGenerator, writer, includeBranch); > function(fragment); >- insertImpl(InsertionPoint(bytecodeOffset, Position::After), includeBranch, WTFMove(instructions)); >+ insertImpl(InsertionPoint(instruction.offset(), Position::After), includeBranch, WTFMove(writer)); > } > >- void removeBytecode(unsigned bytecodeOffset) >+ void removeBytecode(const InstructionStream::Ref& instruction) > { >- m_insertions.append(Insertion { InsertionPoint(bytecodeOffset, Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, opcodeLength(m_instructions[bytecodeOffset].u.opcode), { } }); >+ m_insertions.append(Insertion { InsertionPoint(instruction.offset(), Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, instruction->size(), { } }); > } > > void execute(); > > BytecodeGraph& graph() { return m_graph; } > >- int adjustAbsoluteOffset(int absoluteOffset) >+ int adjustAbsoluteOffset(InstructionStream::Offset absoluteOffset) > { > return adjustJumpTarget(InsertionPoint(0, Position::EntryPoint), InsertionPoint(absoluteOffset, Position::LabelPoint)); > } > >- int adjustJumpTarget(int originalBytecodeOffset, int originalJumpTarget) >+ InstructionStream::Offset adjustJumpTarget(InstructionStream::Offset originalBytecodeOffset, InstructionStream::Offset originalJumpTarget) > { > return adjustJumpTarget(InsertionPoint(originalBytecodeOffset, Position::LabelPoint), InsertionPoint(originalJumpTarget, Position::LabelPoint)); > } > > private: >- void insertImpl(InsertionPoint, IncludeBranch, Vector<UnlinkedInstruction>&& fragment); >+ void insertImpl(InsertionPoint, IncludeBranch, InstructionStreamWriter&& fragment); > > friend class UnlinkedCodeBlock; > void applyModification(); >@@ -217,9 +219,10 @@ private: > int adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint); > template<typename Iterator> int calculateDifference(Iterator begin, Iterator end); > >+ BytecodeGenerator& m_bytecodeGenerator; > BytecodeGraph& m_graph; > UnlinkedCodeBlock* m_codeBlock; >- UnlinkedCodeBlock::UnpackedInstructions& m_instructions; >+ InstructionStreamWriter& m_writer; > Vector<Insertion, 8> m_insertions; > }; > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >index 3e3771f5b773ca3c3837dddb3d2c1470b2324f32..593b7e7bd113a00f03ed76111c9b005140bb3242 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >@@ -26,17 +26,50 @@ > #pragma once > > #include "CodeBlock.h" >+#include "Instruction.h" >+#include <wtf/Forward.h> > > namespace JSC { > >-template<typename Block, typename Functor, typename Instruction> >-void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor) >+#define CALL_FUNCTOR(__arg) \ >+ functor(__bytecode.__arg); >+ >+#define USES_OR_DEFS(__opcode, __args...) \ >+ case __opcode::opcodeID(): { \ >+ auto __bytecode = instruction->as<__opcode>(); \ >+ WTF_LAZY_FOR_EACH_TERM(CALL_FUNCTOR, __args) \ >+ return; \ >+ } >+ >+#define USES USES_OR_DEFS >+#define DEFS USES_OR_DEFS >+ >+template<typename Block, typename Functor> >+void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor) > { > if (opcodeID != op_enter && (codeBlock->wasCompiledWithDebuggingOpcodes() || codeBlock->usesEval()) && codeBlock->scopeRegister().isValid()) >- functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset()); >+ functor(codeBlock->scopeRegister()); >+ >+ auto handleNewArrayLike = [&](auto op) { >+ int base = op.argv.offset(); >+ for (int i = 0; i < static_cast<int>(op.argc); i++) >+ functor(VirtualRegister { base - i }); >+ }; >+ >+ auto handleOpCallLike = [&](auto op) { >+ functor(op.callee); >+ int lastArg = -static_cast<int>(op.argv) + CallFrame::thisArgumentOffset(); >+ for (int i = 0; i < static_cast<int>(op.argc); i++) >+ functor(VirtualRegister { lastArg + i }); >+ if (opcodeID == op_call_eval) >+ functor(codeBlock->scopeRegister()); >+ return; >+ }; > > switch (opcodeID) { > // No uses. >+ case op_wide: >+ ASSERT_NOT_REACHED(); > case op_new_regexp: > case op_debug: > case op_jneq_ptr: >@@ -57,282 +90,209 @@ void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instructi > case op_super_sampler_begin: > case op_super_sampler_end: > return; >- case op_get_scope: >- case op_to_this: >- case op_check_tdz: >- case op_identity_with_profile: >- case op_profile_type: >- case op_throw: >- case op_throw_static_error: >- case op_end: >- case op_ret: >- case op_jtrue: >- case op_jfalse: >- case op_jeq_null: >- case op_jneq_null: >- case op_dec: >- case op_inc: >- case op_log_shadow_chicken_prologue: { >- ASSERT(opcodeLengths[opcodeID] > 1); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- return; >- } >- case op_jlesseq: >- case op_jgreater: >- case op_jgreatereq: >- case op_jnless: >- case op_jnlesseq: >- case op_jngreater: >- case op_jngreatereq: >- case op_jless: >- case op_jeq: >- case op_jneq: >- case op_jstricteq: >- case op_jnstricteq: >- case op_jbelow: >- case op_jbeloweq: >- case op_set_function_name: >- case op_log_shadow_chicken_tail: { >- ASSERT(opcodeLengths[opcodeID] > 2); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- return; >- } >- case op_put_by_val_direct: >- case op_put_by_val: { >- ASSERT(opcodeLengths[opcodeID] > 3); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- return; >- } >- case op_put_by_id: >- case op_put_to_scope: >- case op_put_to_arguments: { >- ASSERT(opcodeLengths[opcodeID] > 3); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- return; >- } >- case op_put_by_id_with_this: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_put_by_val_with_this: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_put_getter_by_id: >- case op_put_setter_by_id: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_put_getter_setter_by_id: { >- ASSERT(opcodeLengths[opcodeID] > 5); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); >- return; >- } >- case op_put_getter_by_val: >- case op_put_setter_by_val: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_define_data_property: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_define_accessor_property: { >- ASSERT(opcodeLengths[opcodeID] > 5); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); >- return; >- } >- case op_spread: >- case op_get_property_enumerator: >- case op_get_enumerable_length: >- case op_new_func_exp: >- case op_new_generator_func_exp: >- case op_new_async_func_exp: >- case op_to_index_string: >- case op_create_lexical_environment: >- case op_resolve_scope: >- case op_resolve_scope_for_hoisting_func_decl_in_eval: >- case op_get_from_scope: >- case op_to_primitive: >- case op_try_get_by_id: >- case op_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: >- case op_get_by_id_direct: >- case op_get_array_length: >- case op_in_by_id: >- case op_typeof: >- case op_is_empty: >- case op_is_undefined: >- case op_is_boolean: >- case op_is_number: >- case op_is_object: >- case op_is_object_or_null: >- case op_is_cell_with_type: >- case op_is_function: >- case op_to_number: >- case op_to_string: >- case op_to_object: >- case op_negate: >- case op_neq_null: >- case op_eq_null: >- case op_not: >- case op_mov: >- case op_new_array_with_size: >- case op_create_this: >- case op_del_by_id: >- case op_unsigned: >- case op_new_func: >- case op_new_async_generator_func: >- case op_new_async_generator_func_exp: >- case op_new_generator_func: >- case op_new_async_func: >- case op_get_parent_scope: >- case op_create_scoped_arguments: >- case op_create_rest: >- case op_get_from_arguments: >- case op_new_array_buffer: { >- ASSERT(opcodeLengths[opcodeID] > 2); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- return; >- } >- case op_has_generic_property: >- case op_has_indexed_property: >- case op_enumerator_structure_pname: >- case op_enumerator_generic_pname: >- case op_get_by_val: >- case op_in_by_val: >- case op_overrides_has_instance: >- case op_instanceof: >- case op_add: >- case op_mul: >- case op_div: >- case op_mod: >- case op_sub: >- case op_pow: >- case op_lshift: >- case op_rshift: >- case op_urshift: >- case op_bitand: >- case op_bitxor: >- case op_bitor: >- case op_less: >- case op_lesseq: >- case op_greater: >- case op_greatereq: >- case op_below: >- case op_beloweq: >- case op_nstricteq: >- case op_stricteq: >- case op_neq: >- case op_eq: >- case op_push_with_scope: >- case op_get_by_id_with_this: >- case op_del_by_val: >- case op_tail_call_forward_arguments: { >- ASSERT(opcodeLengths[opcodeID] > 3); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- return; >- } >- case op_get_by_val_with_this: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_instanceof_custom: >- case op_has_structure_property: >- case op_construct_varargs: >- case op_call_varargs: >- case op_tail_call_varargs: { >- ASSERT(opcodeLengths[opcodeID] > 4); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- return; >- } >- case op_get_direct_pname: { >- ASSERT(opcodeLengths[opcodeID] > 5); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); >- return; >- } >- case op_switch_string: >- case op_switch_char: >- case op_switch_imm: { >- ASSERT(opcodeLengths[opcodeID] > 3); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >- return; >- } >+ // functor(instruction[1].u.operand); >+ USES(OpGetScope, dst) >+ USES(OpToThis, srcDst) >+ USES(OpCheckTdz, target) >+ USES(OpIdentityWithProfile, srcDst) >+ USES(OpProfileType, target); >+ USES(OpThrow, value) >+ USES(OpThrowStaticError, message) >+ USES(OpEnd, value) >+ USES(OpRet, value) >+ USES(OpJtrue, condition) >+ USES(OpJfalse, condition) >+ USES(OpJeqNull, condition) >+ USES(OpJneqNull, condition) >+ USES(OpDec, srcDst) >+ USES(OpInc, srcDst) >+ USES(OpLogShadowChickenPrologue, scope) >+ >+ // functor(instruction[1].u.operand); >+ // functor(instruction[2].u.operand); >+ USES(OpJless, lhs, rhs) >+ USES(OpJlesseq, lhs, rhs) >+ USES(OpJgreater, lhs, rhs) >+ USES(OpJgreatereq, lhs, rhs) >+ USES(OpJnless, lhs, rhs) >+ USES(OpJnlesseq, lhs, rhs) >+ USES(OpJngreater, lhs, rhs) >+ USES(OpJngreatereq, lhs, rhs) >+ USES(OpJeq, lhs, rhs) >+ USES(OpJneq, lhs, rhs) >+ USES(OpJstricteq, lhs, rhs) >+ USES(OpJnstricteq, lhs, rhs) >+ USES(OpJbelow, lhs, rhs) >+ USES(OpJbeloweq, lhs, rhs) >+ USES(OpSetFunctionName, function, name) >+ USES(OpLogShadowChickenTail, thisValue, scope) >+ >+ // functor(instruction[1].u.operand); >+ // functor(instruction[2].u.operand); >+ // functor(instruction[3].u.operand); >+ USES(OpPutByVal, base, property, value) >+ USES(OpPutByValDirect, base, property, value) >+ >+ USES(OpPutById, base, value) >+ USES(OpPutToScope, scope, value) >+ USES(OpPutToArguments, arguments, value) >+ >+ USES(OpPutByIdWithThis, base, thisValue, value) >+ >+ USES(OpPutByValWithThis, base, thisValue, property, value) >+ >+ USES(OpPutGetterById, base, accessor) >+ USES(OpPutSetterById, base, accessor) >+ >+ USES(OpPutGetterSetterById, base, getter, setter) >+ >+ USES(OpPutGetterByVal, base, property, accessor) >+ USES(OpPutSetterByVal, base, property, accessor) >+ >+ USES(OpDefineDataProperty, base, property, value, attributes) >+ >+ USES(OpDefineAccessorProperty, base, property, getter, setter, attributes) >+ >+ // functor(instruction[2].u.operand); >+ USES(OpSpread, argument) >+ USES(OpGetPropertyEnumerator, base) >+ USES(OpGetEnumerableLength, base) >+ USES(OpNewFuncExp, scope) >+ USES(OpNewGeneratorFuncExp, scope) >+ USES(OpNewAsyncFuncExp, scope) >+ USES(OpToIndexString, index) >+ USES(OpCreateLexicalEnvironment, scope) >+ USES(OpResolveScope, scope) >+ USES(OpResolveScopeForHoistingFuncDeclInEval, scope) >+ USES(OpGetFromScope, scope) >+ USES(OpToPrimitive, src) >+ USES(OpTryGetById, base) >+ USES(OpGetById, base) >+ USES(OpGetByIdDirect, base) >+ USES(OpInById, base) >+ USES(OpTypeof, value) >+ USES(OpIsEmpty, operand) >+ USES(OpIsUndefined, operand) >+ USES(OpIsBoolean, operand) >+ USES(OpIsNumber, operand) >+ USES(OpIsObject, operand) >+ USES(OpIsObjectOrNull, operand) >+ USES(OpIsCellWithType, operand) >+ USES(OpIsFunction, operand) >+ USES(OpToNumber, operand) >+ USES(OpToString, operand) >+ USES(OpToObject, operand) >+ USES(OpNegate, operand) >+ USES(OpEqNull, operand) >+ USES(OpNeqNull, operand) >+ USES(OpNot, operand) >+ USES(OpUnsigned, operand) >+ USES(OpMov, src) >+ USES(OpNewArrayWithSize, length) >+ USES(OpCreateThis, callee) >+ USES(OpDelById, base) >+ USES(OpNewFunc, scope) >+ USES(OpNewAsyncGeneratorFunc, scope) >+ USES(OpNewAsyncGeneratorFuncExp, scope) >+ USES(OpNewGeneratorFunc, scope) >+ USES(OpNewAsyncFunc, scope) >+ USES(OpGetParentScope, scope) >+ USES(OpCreateScopedArguments, scope) >+ USES(OpCreateRest, arraySize) >+ USES(OpGetFromArguments, arguments) >+ USES(OpNewArrayBuffer, immutableButterfly) >+ >+ // functor(instruction[2].u.operand); >+ // functor(instruction[3].u.operand); >+ USES(OpHasGenericProperty, base, property) >+ USES(OpHasIndexedProperty, base, property) >+ USES(OpEnumeratorStructurePname, enumerator, index) >+ USES(OpEnumeratorGenericPname, enumerator, index) >+ USES(OpGetByVal, base, property) >+ USES(OpInByVal, base, property) >+ USES(OpOverridesHasInstance, constructor, hasInstanceValue) >+ USES(OpInstanceof, value, prototype) >+ USES(OpAdd, lhs, rhs) >+ USES(OpMul, lhs, rhs) >+ USES(OpDiv, lhs, rhs) >+ USES(OpMod, lhs, rhs) >+ USES(OpSub, lhs, rhs) >+ USES(OpPow, lhs, rhs) >+ USES(OpLshift, lhs, rhs) >+ USES(OpRshift, lhs, rhs) >+ USES(OpUrshift, lhs, rhs) >+ USES(OpBitand, lhs, rhs) >+ USES(OpBitxor, lhs, rhs) >+ USES(OpBitor, lhs, rhs) >+ USES(OpLess, lhs, rhs) >+ USES(OpLesseq, lhs, rhs) >+ USES(OpGreater, lhs, rhs) >+ USES(OpGreatereq, lhs, rhs) >+ USES(OpBelow, lhs, rhs) >+ USES(OpBeloweq, lhs, rhs) >+ USES(OpNstricteq, lhs, rhs) >+ USES(OpStricteq, lhs, rhs) >+ USES(OpNeq, lhs, rhs) >+ USES(OpEq, lhs, rhs) >+ USES(OpPushWithScope, currentScope, newScope) >+ USES(OpGetByIdWithThis, base, thisValue) >+ USES(OpDelByVal, base, property) >+ USES(OpTailCallForwardArguments, callee, thisValue) >+ >+ // functor(instruction[2].u.operand); >+ // functor(instruction[3].u.operand); >+ // functor(instruction[4].u.operand); >+ USES(OpGetByValWithThis, base, thisValue, property) >+ USES(OpInstanceofCustom, value, constructor, hasInstanceValue) >+ USES(OpHasStructureProperty, base, property, enumerator) >+ USES(OpConstructVarargs, callee, thisValue, arguments) >+ USES(OpCallVarargs, callee, thisValue, arguments) >+ USES(OpTailCallVarargs, callee, thisValue, arguments) >+ >+ USES(OpGetDirectPname, base, property, index, enumerator) >+ >+ USES(OpSwitchString, scrutinee) >+ USES(OpSwitchChar, scrutinee) >+ USES(OpSwitchImm, scrutinee) >+ >+ USES(OpYield, generator, argument) >+ > case op_new_array_with_spread: >+ handleNewArrayLike(instruction->as<OpNewArrayWithSpread>()); >+ return; > case op_new_array: >- case op_strcat: { >- int base = instruction[2].u.operand; >- int count = instruction[3].u.operand; >- for (int i = 0; i < count; i++) >- functor(codeBlock, instruction, opcodeID, base - i); >+ handleNewArrayLike(instruction->as<OpNewArray>()); > return; >- } >+ case op_strcat: >+ handleNewArrayLike(instruction->as<OpNewArray>()); >+ return; >+ > case op_construct: >+ handleOpCallLike(instruction->as<OpConstruct>()); >+ return; > case op_call_eval: >+ handleOpCallLike(instruction->as<OpCallEval>()); >+ return; > case op_call: >- case op_tail_call: { >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- int argCount = instruction[3].u.operand; >- int registerOffset = -instruction[4].u.operand; >- int lastArg = registerOffset + CallFrame::thisArgumentOffset(); >- for (int i = 0; i < argCount; i++) >- functor(codeBlock, instruction, opcodeID, lastArg + i); >- if (opcodeID == op_call_eval) >- functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset()); >+ handleOpCallLike(instruction->as<OpCall>()); > return; >- } >- case op_yield: { >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); >+ case op_tail_call: >+ handleOpCallLike(instruction->as<OpTailCall>()); > return; >- } >+ > default: > RELEASE_ASSERT_NOT_REACHED(); > break; > } > } > >-template<typename Block, typename Instruction, typename Functor> >-void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor) >+template<typename Block, typename Functor> >+void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor) > { > switch (opcodeID) { > // These don't define anything. >+ case op_wide: >+ ASSERT_NOT_REACHED(); > case op_put_to_scope: > case op_end: > case op_throw: >@@ -392,133 +352,125 @@ void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instructi > #undef LLINT_HELPER_OPCODES > return; > // These all have a single destination for the first argument. >- case op_argument_count: >- case op_to_index_string: >- case op_get_enumerable_length: >- case op_has_indexed_property: >- case op_has_structure_property: >- case op_has_generic_property: >- case op_get_direct_pname: >- case op_get_property_enumerator: >- case op_enumerator_structure_pname: >- case op_enumerator_generic_pname: >- case op_get_parent_scope: >- case op_push_with_scope: >- case op_create_lexical_environment: >- case op_resolve_scope: >- case op_resolve_scope_for_hoisting_func_decl_in_eval: >- case op_strcat: >- case op_to_primitive: >- case op_create_this: >- case op_new_array: >- case op_new_array_with_spread: >- case op_spread: >- case op_new_array_buffer: >- case op_new_array_with_size: >- case op_new_regexp: >- case op_new_func: >- case op_new_func_exp: >- case op_new_generator_func: >- case op_new_generator_func_exp: >- case op_new_async_generator_func: >- case op_new_async_generator_func_exp: >- case op_new_async_func: >- case op_new_async_func_exp: >- case op_call_varargs: >- case op_tail_call_varargs: >- case op_tail_call_forward_arguments: >- case op_construct_varargs: >- case op_get_from_scope: >- case op_call: >- case op_tail_call: >- case op_call_eval: >- case op_construct: >- case op_try_get_by_id: >- case op_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: >- case op_get_by_id_direct: >- case op_get_by_id_with_this: >- case op_get_by_val_with_this: >- case op_get_array_length: >- case op_overrides_has_instance: >- case op_instanceof: >- case op_instanceof_custom: >- case op_get_by_val: >- case op_typeof: >- case op_identity_with_profile: >- case op_is_empty: >- case op_is_undefined: >- case op_is_boolean: >- case op_is_number: >- case op_is_object: >- case op_is_object_or_null: >- case op_is_cell_with_type: >- case op_is_function: >- case op_in_by_id: >- case op_in_by_val: >- case op_to_number: >- case op_to_string: >- case op_to_object: >- case op_negate: >- case op_add: >- case op_mul: >- case op_div: >- case op_mod: >- case op_sub: >- case op_pow: >- case op_lshift: >- case op_rshift: >- case op_urshift: >- case op_bitand: >- case op_bitxor: >- case op_bitor: >- case op_inc: >- case op_dec: >- case op_eq: >- case op_neq: >- case op_stricteq: >- case op_nstricteq: >- case op_less: >- case op_lesseq: >- case op_greater: >- case op_greatereq: >- case op_below: >- case op_beloweq: >- case op_neq_null: >- case op_eq_null: >- case op_not: >- case op_mov: >- case op_new_object: >- case op_to_this: >- case op_check_tdz: >- case op_get_scope: >- case op_create_direct_arguments: >- case op_create_scoped_arguments: >- case op_create_cloned_arguments: >- case op_del_by_id: >- case op_del_by_val: >- case op_unsigned: >- case op_get_from_arguments: >- case op_get_argument: >- case op_create_rest: >- case op_get_rest_length: { >- ASSERT(opcodeLengths[opcodeID] > 1); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- return; >- } >- case op_catch: { >- ASSERT(opcodeLengths[opcodeID] > 2); >- functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); >- functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); >- return; >- } >+ DEFS(OpArgumentCount, dst) >+ DEFS(OpToIndexString, dst) >+ DEFS(OpGetEnumerableLength, dst) >+ DEFS(OpHasIndexedProperty, dst) >+ DEFS(OpHasStructureProperty, dst) >+ DEFS(OpHasGenericProperty, dst) >+ DEFS(OpGetDirectPname, dst) >+ DEFS(OpGetPropertyEnumerator, dst) >+ DEFS(OpEnumeratorStructurePname, dst) >+ DEFS(OpEnumeratorGenericPname, dst) >+ DEFS(OpGetParentScope, dst) >+ DEFS(OpPushWithScope, dst) >+ DEFS(OpCreateLexicalEnvironment, dst) >+ DEFS(OpResolveScope, dst) >+ DEFS(OpResolveScopeForHoistingFuncDeclInEval, dst) >+ DEFS(OpStrcat, dst) >+ DEFS(OpToPrimitive, dst) >+ DEFS(OpCreateThis, dst) >+ DEFS(OpNewArray, dst) >+ DEFS(OpNewArrayWithSpread, dst) >+ DEFS(OpSpread, dst) >+ DEFS(OpNewArrayBuffer, dst) >+ DEFS(OpNewArrayWithSize, dst) >+ DEFS(OpNewRegexp, dst) >+ DEFS(OpNewFunc, dst) >+ DEFS(OpNewFuncExp, dst) >+ DEFS(OpNewGeneratorFunc, dst) >+ DEFS(OpNewGeneratorFuncExp, dst) >+ DEFS(OpNewAsyncGeneratorFunc, dst) >+ DEFS(OpNewAsyncGeneratorFuncExp, dst) >+ DEFS(OpNewAsyncFunc, dst) >+ DEFS(OpNewAsyncFuncExp, dst) >+ DEFS(OpCallVarargs, dst) >+ DEFS(OpTailCallVarargs, dst) >+ DEFS(OpTailCallForwardArguments, dst) >+ DEFS(OpConstructVarargs, dst) >+ DEFS(OpGetFromScope, dst) >+ DEFS(OpCall, dst) >+ DEFS(OpTailCall, dst) >+ DEFS(OpCallEval, dst) >+ DEFS(OpConstruct, dst) >+ DEFS(OpTryGetById, dst) >+ DEFS(OpGetById, dst) >+ DEFS(OpGetByIdDirect, dst) >+ DEFS(OpGetByIdWithThis, dst) >+ DEFS(OpGetByValWithThis, dst) >+ DEFS(OpOverridesHasInstance, dst) >+ DEFS(OpInstanceof, dst) >+ DEFS(OpInstanceofCustom, dst) >+ DEFS(OpGetByVal, dst) >+ DEFS(OpTypeof, dst) >+ DEFS(OpIdentityWithProfile, srcDst) >+ DEFS(OpIsEmpty, dst) >+ DEFS(OpIsUndefined, dst) >+ DEFS(OpIsBoolean, dst) >+ DEFS(OpIsNumber, dst) >+ DEFS(OpIsObject, dst) >+ DEFS(OpIsObjectOrNull, dst) >+ DEFS(OpIsCellWithType, dst) >+ DEFS(OpIsFunction, dst) >+ DEFS(OpInById, dst) >+ DEFS(OpInByVal, dst) >+ DEFS(OpToNumber, dst) >+ DEFS(OpToString, dst) >+ DEFS(OpToObject, dst) >+ DEFS(OpNegate, dst) >+ DEFS(OpAdd, dst) >+ DEFS(OpMul, dst) >+ DEFS(OpDiv, dst) >+ DEFS(OpMod, dst) >+ DEFS(OpSub, dst) >+ DEFS(OpPow, dst) >+ DEFS(OpLshift, dst) >+ DEFS(OpRshift, dst) >+ DEFS(OpUrshift, dst) >+ DEFS(OpBitand, dst) >+ DEFS(OpBitxor, dst) >+ DEFS(OpBitor, dst) >+ DEFS(OpInc, srcDst) >+ DEFS(OpDec, srcDst) >+ DEFS(OpEq, dst) >+ DEFS(OpNeq, dst) >+ DEFS(OpStricteq, dst) >+ DEFS(OpNstricteq, dst) >+ DEFS(OpLess, dst) >+ DEFS(OpLesseq, dst) >+ DEFS(OpGreater, dst) >+ DEFS(OpGreatereq, dst) >+ DEFS(OpBelow, dst) >+ DEFS(OpBeloweq, dst) >+ DEFS(OpNeqNull, dst) >+ DEFS(OpEqNull, dst) >+ DEFS(OpNot, dst) >+ DEFS(OpMov, dst) >+ DEFS(OpNewObject, dst) >+ DEFS(OpToThis, srcDst) >+ DEFS(OpCheckTdz, target) >+ DEFS(OpGetScope, dst) >+ DEFS(OpCreateDirectArguments, dst) >+ DEFS(OpCreateScopedArguments, dst) >+ DEFS(OpCreateClonedArguments, dst) >+ DEFS(OpDelById, dst) >+ DEFS(OpDelByVal, dst) >+ DEFS(OpUnsigned, dst) >+ DEFS(OpGetFromArguments, dst) >+ DEFS(OpGetArgument, dst) >+ DEFS(OpCreateRest, dst) >+ DEFS(OpGetRestLength, dst) >+ DEFS(OpCatch, exception, thrownValue) > case op_enter: { > for (unsigned i = codeBlock->numVars(); i--;) >- functor(codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset()); >+ functor(virtualRegisterForLocal(i)); > return; > } > } > } > >+#undef CALL_FUNCTOR >+#undef USES_OR_DEFS >+#undef USES >+#undef DEFS > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp >index aadf3ea32ed158b243076061021dbb9b37343d34..986ec24f0c7c085ac904b58cbc3ced36215d7fd9 100644 >--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp >+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp >@@ -26,6 +26,7 @@ > #include "config.h" > #include "CallLinkStatus.h" > >+#include "BytecodeStructs.h" > #include "CallLinkInfo.h" > #include "CodeBlock.h" > #include "DFGJITCode.h" >@@ -66,12 +67,24 @@ CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker&, CodeB > } > #endif > >- Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; >- OpcodeID op = Interpreter::getOpcodeID(instruction[0].u.opcode); >- if (op != op_call && op != op_construct && op != op_tail_call) >+ auto instruction = profiledBlock->instructions().at(bytecodeIndex); >+ OpcodeID op = instruction->opcodeID(); >+ >+ LLIntCallLinkInfo* callLinkInfo; >+ switch (op) { >+ case op_call: >+ callLinkInfo = &instruction->as<OpCall>().metadata(profiledBlock).callLinkInfo; >+ break; >+ case op_construct: >+ callLinkInfo = &instruction->as<OpConstruct>().metadata(profiledBlock).callLinkInfo; >+ break; >+ case op_tail_call: >+ callLinkInfo = &instruction->as<OpTailCall>().metadata(profiledBlock).callLinkInfo; >+ break; >+ default: > return CallLinkStatus(); >+ } > >- LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo; > > return CallLinkStatus(callLinkInfo->lastSeenCallee.get()); > } >diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp >index d051ab37da10f70fde0fff97f37d201033ab7310..05e7ed2f0f68bd2ed7dbdb754a96cdc1cca74a26 100644 >--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp >+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp >@@ -51,6 +51,7 @@ > #include "FunctionExecutableDump.h" > #include "GetPutInfo.h" > #include "InlineCallFrame.h" >+#include "InstructionStream.h" > #include "InterpreterInlines.h" > #include "IsoCellSetInlines.h" > #include "JIT.h" >@@ -81,7 +82,6 @@ > #include "StructureStubInfo.h" > #include "TypeLocationCache.h" > #include "TypeProfiler.h" >-#include "UnlinkedInstructionStream.h" > #include "VMInlines.h" > #include <wtf/BagToHashMap.h> > #include <wtf/CommaPrinter.h> >@@ -243,15 +243,15 @@ void CodeBlock::dumpBytecode(PrintStream& out) > BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap); > } > >-void CodeBlock::dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap& statusMap) >+void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) > { >- BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin, it, statusMap); >+ BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap); > } > > void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap) > { >- const Instruction* it = &instructions()[bytecodeOffset]; >- dumpBytecode(out, instructions().begin(), it, statusMap); >+ const auto it = instructions().at(bytecodeOffset); >+ dumpBytecode(out, it, statusMap); > } > > #define FOR_EACH_MEMBER_VECTOR(macro) \ >@@ -375,6 +375,7 @@ CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecut > , m_unlinkedCode(*vm, this, unlinkedCodeBlock) > , m_ownerExecutable(*vm, this, ownerExecutable) > , m_poisonedVM(vm) >+ , m_instructions(&unlinkedCodeBlock->instructions()) > , m_thisRegister(unlinkedCodeBlock->thisRegister()) > , m_scopeRegister(unlinkedCodeBlock->scopeRegister()) > , m_source(WTFMove(sourceProvider)) >@@ -403,7 +404,7 @@ CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecut > // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis > // inside UnlinkedCodeBlock. > bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, >- JSScope* scope) >+ JSScope*) > { > Base::finishCreation(vm); > finishCreationCommon(vm); >@@ -513,47 +514,32 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); > #endif > >- // Copy and translate the UnlinkedInstructions >- unsigned instructionCount = unlinkedCodeBlock->instructions().count(); >- UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions()); >- > // Bookkeep the strongly referenced module environments. > HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments; > >- RefCountedArray<Instruction> instructions(instructionCount); >- >- unsigned valueProfileCount = 0; >- auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) { >- if (!vm.canUseJIT()) { >- ASSERT(vm.noJITValueProfileSingleton); >- instructions[bytecodeOffset + opLength - 1] = vm.noJITValueProfileSingleton.get(); >- return; >- } >- >- unsigned valueProfileIndex = valueProfileCount++; >- ValueProfile* profile = &m_valueProfiles[valueProfileIndex]; >- ASSERT(profile->m_bytecodeOffset == -1); >- profile->m_bytecodeOffset = bytecodeOffset; >- instructions[bytecodeOffset + opLength - 1] = profile; >- }; >- >- for (unsigned i = 0; !instructionReader.atEnd(); ) { >- const UnlinkedInstruction* pc = instructionReader.next(); >- >- unsigned opLength = opcodeLength(pc[0].u.opcode); >- >- instructions[i] = Interpreter::getOpcode(pc[0].u.opcode); >- for (size_t j = 1; j < opLength; ++j) { >- if (sizeof(int32_t) != sizeof(intptr_t)) >- instructions[i + j].u.pointer = 0; >- instructions[i + j].u.operand = pc[j].u.operand; >- } >- switch (pc[0].u.opcode) { >+ //unsigned valueProfileCount = 0; >+ //auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) { >+ //if (!vm.canUseJIT()) { >+ //ASSERT(vm.noJITValueProfileSingleton); >+ ////instructions[bytecodeOffset + opLength - 1] = vm.noJITValueProfileSingleton.get(); >+ //return; >+ //} >+ >+ //unsigned valueProfileIndex = valueProfileCount++; >+ //ValueProfile* profile = &m_valueProfiles[valueProfileIndex]; >+ //ASSERT(profile->m_bytecodeOffset == -1); >+ //profile->m_bytecodeOffset = bytecodeOffset; >+ //instructions[bytecodeOffset + opLength - 1] = profile; >+ //}; >+ >+ for (const auto& instruction : *m_instructions) { >+ switch (instruction->opcodeID()) { > case op_has_indexed_property: { >- int arrayProfileIndex = pc[opLength - 1].u.operand; >- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >+ // TODO: link array profile >+ //int arrayProfileIndex = pc[opLength - 1].u.operand; >+ //m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); > >- instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; >+ //instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; > break; > } > case op_call_varargs: >@@ -561,10 +547,11 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > case op_tail_call_forward_arguments: > case op_construct_varargs: > case op_get_by_val: { >- int arrayProfileIndex = pc[opLength - 2].u.operand; >- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >+ // TODO: link array profile >+ //int arrayProfileIndex = pc[opLength - 2].u.operand; >+ //m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); > >- instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; >+ //instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; > FALLTHROUGH; > } > case op_get_direct_pname: >@@ -577,157 +564,154 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > case op_to_number: > case op_to_object: > case op_get_argument: { >- linkValueProfile(i, opLength); >+ //linkValueProfile(i, opLength); > break; > } > > case op_to_this: { >- linkValueProfile(i, opLength); >+ //linkValueProfile(i, opLength); > break; > } > > case op_in_by_val: > case op_put_by_val: > case op_put_by_val_direct: { >- int arrayProfileIndex = pc[opLength - 1].u.operand; >- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >- instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; >+ //int arrayProfileIndex = pc[opLength - 1].u.operand; >+ //m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >+ //instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; > break; > } > > case op_new_array: > case op_new_array_with_size: > case op_new_array_buffer: { >- unsigned arrayAllocationProfileIndex; >- IndexingType recommendedIndexingType; >- std::tie(arrayAllocationProfileIndex, recommendedIndexingType) = UnlinkedCodeBlock::decompressArrayAllocationProfile(pc[opLength - 1].u.operand); >- >- ArrayAllocationProfile* profile = &m_arrayAllocationProfiles[arrayAllocationProfileIndex]; >- if (pc[0].u.opcode == op_new_array_buffer) >- profile->initializeIndexingMode(recommendedIndexingType); >- instructions[i + opLength - 1] = profile; >+ //unsigned arrayAllocationProfileIndex; >+ //IndexingType recommendedIndexingType; >+ //std::tie(arrayAllocationProfileIndex, recommendedIndexingType) = UnlinkedCodeBlock::decompressArrayAllocationProfile(pc[opLength - 1].u.operand); >+ >+ //ArrayAllocationProfile* profile = &m_arrayAllocationProfiles[arrayAllocationProfileIndex]; >+ //if (pc[0].u.opcode == op_new_array_buffer) >+ //profile->initializeIndexingMode(recommendedIndexingType); >+ //instructions[i + opLength - 1] = profile; > break; > } > > case op_new_object: { >- int objectAllocationProfileIndex = pc[opLength - 1].u.operand; >- ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex]; >- int inferredInlineCapacity = pc[opLength - 2].u.operand; >+ //int objectAllocationProfileIndex = pc[opLength - 1].u.operand; >+ //ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex]; >+ //int inferredInlineCapacity = pc[opLength - 2].u.operand; > >- instructions[i + opLength - 1] = objectAllocationProfile; >- objectAllocationProfile->initializeProfile(vm, >- m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity); >+ //instructions[i + opLength - 1] = objectAllocationProfile; >+ //objectAllocationProfile->initializeProfile(vm, >+ //m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity); > break; > } > > case op_call: > case op_tail_call: > case op_call_eval: { >- linkValueProfile(i, opLength); >- int arrayProfileIndex = pc[opLength - 2].u.operand; >- m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >- instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; >- instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; >+ //linkValueProfile(i, opLength); >+ //int arrayProfileIndex = pc[opLength - 2].u.operand; >+ //m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); >+ //instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; >+ //instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; > break; > } > case op_construct: { >- instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; >- linkValueProfile(i, opLength); >+ //instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; >+ //linkValueProfile(i, opLength); > break; > } >- case op_get_array_length: >- CRASH(); >- > case op_resolve_scope: { >- const Identifier& ident = identifier(pc[3].u.operand); >- ResolveType type = static_cast<ResolveType>(pc[4].u.operand); >- RELEASE_ASSERT(type != LocalClosureVar); >- int localScopeDepth = pc[5].u.operand; >- >- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization); >- RETURN_IF_EXCEPTION(throwScope, false); >- >- instructions[i + 4].u.operand = op.type; >- instructions[i + 5].u.operand = op.depth; >- if (op.lexicalEnvironment) { >- if (op.type == ModuleVar) { >- // Keep the linked module environment strongly referenced. >- if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) >- addConstant(op.lexicalEnvironment); >- instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment); >- } else >- instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); >- } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) >- instructions[i + 6].u.jsCell.set(vm, this, constantScope); >- else >- instructions[i + 6].u.pointer = nullptr; >+ //const Identifier& ident = identifier(pc[3].u.operand); >+ //ResolveType type = static_cast<ResolveType>(pc[4].u.operand); >+ //RELEASE_ASSERT(type != LocalClosureVar); >+ //int localScopeDepth = pc[5].u.operand; >+ >+ //ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization); >+ //RETURN_IF_EXCEPTION(throwScope, false); >+ >+ //instructions[i + 4].u.operand = op.type; >+ //instructions[i + 5].u.operand = op.depth; >+ //if (op.lexicalEnvironment) { >+ //if (op.type == ModuleVar) { >+ //// Keep the linked module environment strongly referenced. >+ //if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) >+ //addConstant(op.lexicalEnvironment); >+ //instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment); >+ //} else >+ //instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); >+ //} else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) >+ //instructions[i + 6].u.jsCell.set(vm, this, constantScope); >+ //else >+ //instructions[i + 6].u.pointer = nullptr; > break; > } > > case op_get_from_scope: { >- linkValueProfile(i, opLength); >+ //linkValueProfile(i, opLength); > > // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand > >- int localScopeDepth = pc[5].u.operand; >- instructions[i + 5].u.pointer = nullptr; >- >- GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >- ASSERT(!isInitialization(getPutInfo.initializationMode())); >- if (getPutInfo.resolveType() == LocalClosureVar) { >- instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); >- break; >- } >- >- const Identifier& ident = identifier(pc[3].u.operand); >- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization); >- RETURN_IF_EXCEPTION(throwScope, false); >- >- instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); >- if (op.type == ModuleVar) >- instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); >- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) >- instructions[i + 5].u.watchpointSet = op.watchpointSet; >- else if (op.structure) >- instructions[i + 5].u.structure.set(vm, this, op.structure); >- instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); >+ //int localScopeDepth = pc[5].u.operand; >+ //instructions[i + 5].u.pointer = nullptr; >+ >+ //GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >+ //ASSERT(!isInitialization(getPutInfo.initializationMode())); >+ //if (getPutInfo.resolveType() == LocalClosureVar) { >+ //instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); >+ //break; >+ //} >+ >+ //const Identifier& ident = identifier(pc[3].u.operand); >+ //ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, getPutInfo.resolveType(), InitializationMode::NotInitialization); >+ //RETURN_IF_EXCEPTION(throwScope, false); >+ >+ //instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); >+ //if (op.type == ModuleVar) >+ //instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); >+ //if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) >+ //instructions[i + 5].u.watchpointSet = op.watchpointSet; >+ //else if (op.structure) >+ //instructions[i + 5].u.structure.set(vm, this, op.structure); >+ //instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); > break; > } > > case op_put_to_scope: { > // put_to_scope scope, id, value, GetPutInfo, Structure, Operand >- GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >- if (getPutInfo.resolveType() == LocalClosureVar) { >- // Only do watching if the property we're putting to is not anonymous. >- if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) { >- int symbolTableIndex = pc[5].u.operand; >- SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); >- const Identifier& ident = identifier(pc[2].u.operand); >- ConcurrentJSLocker locker(symbolTable->m_lock); >- auto iter = symbolTable->find(locker, ident.impl()); >- ASSERT(iter != symbolTable->end(locker)); >- iter->value.prepareToWatch(); >- instructions[i + 5].u.watchpointSet = iter->value.watchpointSet(); >- } else >- instructions[i + 5].u.watchpointSet = nullptr; >- break; >- } >- >- const Identifier& ident = identifier(pc[2].u.operand); >- int localScopeDepth = pc[5].u.operand; >- instructions[i + 5].u.pointer = nullptr; >- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode()); >- RETURN_IF_EXCEPTION(throwScope, false); >- >- instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); >- if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) >- instructions[i + 5].u.watchpointSet = op.watchpointSet; >- else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { >- if (op.watchpointSet) >- op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident)); >- } else if (op.structure) >- instructions[i + 5].u.structure.set(vm, this, op.structure); >- instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); >+ //GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >+ //if (getPutInfo.resolveType() == LocalClosureVar) { >+ //// Only do watching if the property we're putting to is not anonymous. >+ //if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) { >+ //int symbolTableIndex = pc[5].u.operand; >+ //SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); >+ //const Identifier& ident = identifier(pc[2].u.operand); >+ //ConcurrentJSLocker locker(symbolTable->m_lock); >+ //auto iter = symbolTable->find(locker, ident.impl()); >+ //ASSERT(iter != symbolTable->end(locker)); >+ //iter->value.prepareToWatch(); >+ //instructions[i + 5].u.watchpointSet = iter->value.watchpointSet(); >+ //} else >+ //instructions[i + 5].u.watchpointSet = nullptr; >+ //break; >+ //} >+ >+ //const Identifier& ident = identifier(pc[2].u.operand); >+ //int localScopeDepth = pc[5].u.operand; >+ //instructions[i + 5].u.pointer = nullptr; >+ //ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode()); >+ //RETURN_IF_EXCEPTION(throwScope, false); >+ >+ //instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand(); >+ //if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) >+ //instructions[i + 5].u.watchpointSet = op.watchpointSet; >+ //else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { >+ //if (op.watchpointSet) >+ //op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident)); >+ //} else if (op.structure) >+ //instructions[i + 5].u.structure.set(vm, this, op.structure); >+ //instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); > > break; > } >@@ -735,98 +719,98 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > case op_profile_type: { > RELEASE_ASSERT(vm.typeProfiler()); > // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? >- size_t instructionOffset = i + opLength - 1; >- unsigned divotStart, divotEnd; >- GlobalVariableID globalVariableID = 0; >- RefPtr<TypeSet> globalTypeSet; >- bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); >- VirtualRegister profileRegister(pc[1].u.operand); >- ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand); >- SymbolTable* symbolTable = nullptr; >- >- switch (flag) { >- case ProfileTypeBytecodeClosureVar: { >- const Identifier& ident = identifier(pc[4].u.operand); >- int localScopeDepth = pc[2].u.operand; >- ResolveType type = static_cast<ResolveType>(pc[5].u.operand); >- // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because >- // we're abstractly "read"ing from a JSScope. >- ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization); >- RETURN_IF_EXCEPTION(throwScope, false); >- >- if (op.type == ClosureVar || op.type == ModuleVar) >- symbolTable = op.lexicalEnvironment->symbolTable(); >- else if (op.type == GlobalVar) >- symbolTable = m_globalObject.get()->symbolTable(); >- >- UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl(); >- if (symbolTable) { >- ConcurrentJSLocker locker(symbolTable->m_lock); >- // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. >- symbolTable->prepareForTypeProfiling(locker); >- globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm); >- globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm); >- } else >- globalVariableID = TypeProfilerNoGlobalIDExists; >- >- break; >- } >- case ProfileTypeBytecodeLocallyResolved: { >- int symbolTableIndex = pc[2].u.operand; >- SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); >- const Identifier& ident = identifier(pc[4].u.operand); >- ConcurrentJSLocker locker(symbolTable->m_lock); >- // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. >- globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm); >- globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm); >- >- break; >- } >- case ProfileTypeBytecodeDoesNotHaveGlobalID: >- case ProfileTypeBytecodeFunctionArgument: { >- globalVariableID = TypeProfilerNoGlobalIDExists; >- break; >- } >- case ProfileTypeBytecodeFunctionReturnStatement: { >- RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); >- globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); >- globalVariableID = TypeProfilerReturnStatement; >- if (!shouldAnalyze) { >- // Because a return statement can be added implicitly to return undefined at the end of a function, >- // and these nodes don't emit expression ranges because they aren't in the actual source text of >- // the user's program, give the type profiler some range to identify these return statements. >- // Currently, the text offset that is used as identification is "f" in the function keyword >- // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. >- divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(); >- shouldAnalyze = true; >- } >- break; >- } >- } >- >- std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, >- ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm); >- TypeLocation* location = locationPair.first; >- bool isNewLocation = locationPair.second; >- >- if (flag == ProfileTypeBytecodeFunctionReturnStatement) >- location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(); >- >- if (shouldAnalyze && isNewLocation) >- vm.typeProfiler()->insertNewLocation(location); >- >- instructions[i + 2].u.location = location; >+ //size_t instructionOffset = i + opLength - 1; >+ //unsigned divotStart, divotEnd; >+ //GlobalVariableID globalVariableID = 0; >+ //RefPtr<TypeSet> globalTypeSet; >+ //bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); >+ //VirtualRegister profileRegister(pc[1].u.operand); >+ //ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand); >+ //SymbolTable* symbolTable = nullptr; >+ >+ //switch (flag) { >+ //case ProfileTypeBytecodeClosureVar: { >+ //const Identifier& ident = identifier(pc[4].u.operand); >+ //int localScopeDepth = pc[2].u.operand; >+ //ResolveType type = static_cast<ResolveType>(pc[5].u.operand); >+ //// Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because >+ //// we're abstractly "read"ing from a JSScope. >+ //ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization); >+ //RETURN_IF_EXCEPTION(throwScope, false); >+ >+ //if (op.type == ClosureVar || op.type == ModuleVar) >+ //symbolTable = op.lexicalEnvironment->symbolTable(); >+ //else if (op.type == GlobalVar) >+ //symbolTable = m_globalObject.get()->symbolTable(); >+ >+ //UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl(); >+ //if (symbolTable) { >+ //ConcurrentJSLocker locker(symbolTable->m_lock); >+ //// If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. >+ //symbolTable->prepareForTypeProfiling(locker); >+ //globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm); >+ //globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm); >+ //} else >+ //globalVariableID = TypeProfilerNoGlobalIDExists; >+ >+ //break; >+ //} >+ //case ProfileTypeBytecodeLocallyResolved: { >+ //int symbolTableIndex = pc[2].u.operand; >+ //SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); >+ //const Identifier& ident = identifier(pc[4].u.operand); >+ //ConcurrentJSLocker locker(symbolTable->m_lock); >+ //// If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. >+ //globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm); >+ //globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm); >+ >+ //break; >+ //} >+ //case ProfileTypeBytecodeDoesNotHaveGlobalID: >+ //case ProfileTypeBytecodeFunctionArgument: { >+ //globalVariableID = TypeProfilerNoGlobalIDExists; >+ //break; >+ //} >+ //case ProfileTypeBytecodeFunctionReturnStatement: { >+ //RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); >+ //globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); >+ //globalVariableID = TypeProfilerReturnStatement; >+ //if (!shouldAnalyze) { >+ //// Because a return statement can be added implicitly to return undefined at the end of a function, >+ //// and these nodes don't emit expression ranges because they aren't in the actual source text of >+ //// the user's program, give the type profiler some range to identify these return statements. >+ //// Currently, the text offset that is used as identification is "f" in the function keyword >+ //// and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. >+ //divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(); >+ //shouldAnalyze = true; >+ //} >+ //break; >+ //} >+ //} >+ >+ //std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, >+ //ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm); >+ //TypeLocation* location = locationPair.first; >+ //bool isNewLocation = locationPair.second; >+ >+ //if (flag == ProfileTypeBytecodeFunctionReturnStatement) >+ //location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(); >+ >+ //if (shouldAnalyze && isNewLocation) >+ //vm.typeProfiler()->insertNewLocation(location); >+ >+ //instructions[i + 2].u.location = location; > break; > } > > case op_debug: { >- if (pc[1].u.unsignedValue == DidReachBreakpoint) >+ if (instruction->as<OpDebug>().debugHookType == DidReachBreakpoint) > m_hasDebuggerStatement = true; > break; > } > > case op_create_rest: { >- int numberOfArgumentsToSkip = instructions[i + 3].u.operand; >+ int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().numParametersToSkip; > ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0); > // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT."); > m_numberOfArgumentsToSkip = numberOfArgumentsToSkip; >@@ -836,14 +820,10 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > default: > break; > } >- >- i += opLength; > } > > if (vm.controlFlowProfiler()) >- insertBasicBlockBoundariesForControlFlowProfiler(instructions); >- >- m_instructions = WTFMove(instructions); >+ insertBasicBlockBoundariesForControlFlowProfiler(); > > // Set optimization thresholds only after m_instructions is initialized, since these > // rely on the instruction count (and are in theory permitted to also inspect the >@@ -859,7 +839,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > if (Options::dumpGeneratedBytecodes()) > dumpBytecode(); > >- heap()->reportExtraMemoryAllocated(m_instructions.size() * sizeof(Instruction)); >+ heap()->reportExtraMemoryAllocated(m_instructions->sizeInBytes()); > > return true; > } >@@ -998,7 +978,7 @@ CodeBlock* CodeBlock::specialOSREntryBlockOrNull() > size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm) > { > CodeBlock* thisObject = jsCast<CodeBlock*>(cell); >- size_t extraMemoryAllocated = thisObject->m_instructions.size() * sizeof(Instruction); >+ size_t extraMemoryAllocated = thisObject->m_instructions->sizeInBytes(); > if (thisObject->m_jitCode) > extraMemoryAllocated += thisObject->m_jitCode->size(); > return Base::estimatedSize(cell, vm) + extraMemoryAllocated; >@@ -1021,15 +1001,8 @@ void CodeBlock::visitChildren(SlotVisitor& visitor) > > if (m_jitCode) > visitor.reportExtraMemoryVisited(m_jitCode->size()); >- if (m_instructions.size()) { >- unsigned refCount = m_instructions.refCount(); >- if (!refCount) { >- dataLog("CodeBlock: ", RawPointer(this), "\n"); >- dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n"); >- dataLog("refCount: ", refCount, "\n"); >- RELEASE_ASSERT_NOT_REACHED(); >- } >- visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount); >+ if (m_instructions->sizeInBytes()) { >+ visitor.reportExtraMemoryVisited(m_instructions->sizeInBytes()); > } > > stronglyVisitStrongReferences(locker, visitor); >@@ -1129,13 +1102,13 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& vis > VM& vm = *m_poisonedVM; > > if (jitType() == JITCode::InterpreterThunk) { >- const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); >+ const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); > for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { >- Instruction* instruction = &instructions()[propertyAccessInstructions[i]]; >- switch (Interpreter::getOpcodeID(instruction[0])) { >- case op_put_by_id: { >- StructureID oldStructureID = instruction[4].u.structureID; >- StructureID newStructureID = instruction[6].u.structureID; >+ auto instruction = m_instructions->at(propertyAccessInstructions[i]); >+ if (instruction->is<OpPutById>()) { >+ auto& metadata = instruction->as<OpPutById>().metadata(this); >+ StructureID oldStructureID = metadata.oldStructure; >+ StructureID newStructureID = metadata.newStructure; > if (!oldStructureID || !newStructureID) > break; > Structure* oldStructure = >@@ -1146,9 +1119,6 @@ void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& vis > visitor.appendUnbarriered(newStructure); > break; > } >- default: >- break; >- } > } > } > >@@ -1243,75 +1213,71 @@ void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visito > #endif // ENABLE(DFG_JIT) > } > >-void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction) >-{ >- instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id); >- instruction[4].u.pointer = nullptr; >- instruction[5].u.pointer = nullptr; >- instruction[6].u.pointer = nullptr; >-} >- > void CodeBlock::finalizeLLIntInlineCaches() > { > VM& vm = *m_poisonedVM; >- const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); >+ const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); >+ >+ auto handleGetPutFromScope = [](auto& metadata) { >+ GetPutInfo getPutInfo = metadata.getPutInfo; >+ if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks >+ || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) >+ return; >+ WriteBarrierBase<Structure>& structure = metadata.structure; >+ if (!structure || Heap::isMarked(structure.get())) >+ return; >+ if (Options::verboseOSR()) >+ dataLogF("Clearing scope access with structure %p.\n", structure.get()); >+ structure.clear(); >+ }; >+ > for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { >- Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]]; >- switch (Interpreter::getOpcodeID(curInstruction[0])) { >+ const auto curInstruction = m_instructions->at(propertyAccessInstructions[i]); >+ switch (curInstruction->opcodeID()) { > case op_get_by_id: { >- StructureID oldStructureID = curInstruction[4].u.structureID; >+ auto& metadata = curInstruction->as<OpGetById>().metadata(this); >+ StructureID oldStructureID = metadata.structure; > if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) > break; > if (Options::verboseOSR()) > dataLogF("Clearing LLInt property access.\n"); >- clearLLIntGetByIdCache(curInstruction); >- break; >- } >- case op_get_by_id_direct: { >- StructureID oldStructureID = curInstruction[4].u.structureID; >- if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) >- break; >- if (Options::verboseOSR()) >- dataLogF("Clearing LLInt property access.\n"); >- curInstruction[4].u.pointer = nullptr; >- curInstruction[5].u.pointer = nullptr; >+ LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata); > break; > } > case op_put_by_id: { >- StructureID oldStructureID = curInstruction[4].u.structureID; >- StructureID newStructureID = curInstruction[6].u.structureID; >- StructureChain* chain = curInstruction[7].u.structureChain.get(); >+ auto& metadata = curInstruction->as<OpPutById>().metadata(this); >+ StructureID oldStructureID = metadata.oldStructure; >+ StructureID newStructureID = metadata.newStructure; >+ StructureChain* chain = metadata.structureChain.get(); > if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) > && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID))) > && (!chain || Heap::isMarked(chain))) > break; > if (Options::verboseOSR()) > dataLogF("Clearing LLInt put transition.\n"); >- curInstruction[4].u.structureID = 0; >- curInstruction[5].u.operand = 0; >- curInstruction[6].u.structureID = 0; >- curInstruction[7].u.structureChain.clear(); >+ metadata.oldStructure = 0; >+ metadata.offset = 0; >+ metadata.newStructure = 0; >+ metadata.structureChain.clear(); > break; > } > // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418 > // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution. > case op_resolve_scope_for_hoisting_func_decl_in_eval: > break; >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: >- case op_get_array_length: >- break; >- case op_to_this: >- if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get())) >+ case op_to_this: { >+ auto& metadata = curInstruction->as<OpToThis>().metadata(this); >+ if (!metadata.cachedStructure || Heap::isMarked(metadata.cachedStructure.get())) > break; > if (Options::verboseOSR()) >- dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get()); >- curInstruction[2].u.structure.clear(); >- curInstruction[3].u.toThisStatus = merge( >- curInstruction[3].u.toThisStatus, ToThisClearedByGC); >+ dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.cachedStructure.get()); >+ metadata.cachedStructure.clear(); >+ metadata.toThisStatus = merge(metadata.toThisStatus, ToThisClearedByGC); > break; >+ } > case op_create_this: { >- auto& cacheWriteBarrier = curInstruction[4].u.jsCell; >+ auto& metadata = curInstruction->as<OpCreateThis>().metadata(this); >+ auto& cacheWriteBarrier = metadata.cachedCallee; > if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects()) > break; > JSCell* cachedFunction = cacheWriteBarrier.get(); >@@ -1326,7 +1292,8 @@ void CodeBlock::finalizeLLIntInlineCaches() > // Right now this isn't strictly necessary. Any symbol tables that this will refer to > // are for outer functions, and we refer to those functions strongly, and they refer > // to the symbol table strongly. But it's nice to be on the safe side. >- WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable; >+ auto metadata = curInstruction->as<OpResolveScope>().metadata(this); >+ WriteBarrierBase<SymbolTable>& symbolTable = metadata.symbolTable; > if (!symbolTable || Heap::isMarked(symbolTable.get())) > break; > if (Options::verboseOSR()) >@@ -1335,22 +1302,14 @@ void CodeBlock::finalizeLLIntInlineCaches() > break; > } > case op_get_from_scope: >- case op_put_to_scope: { >- GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand); >- if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks >- || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) >- continue; >- WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure; >- if (!structure || Heap::isMarked(structure.get())) >- break; >- if (Options::verboseOSR()) >- dataLogF("Clearing scope access with structure %p.\n", structure.get()); >- structure.clear(); >+ handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this)); >+ break; >+ case op_put_to_scope: >+ handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this)); > break; >- } > default: >- OpcodeID opcodeID = Interpreter::getOpcodeID(curInstruction[0]); >- ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); >+ OpcodeID opcodeID = curInstruction->opcodeID(); >+ ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %lu", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); > } > } > >@@ -1358,12 +1317,12 @@ void CodeBlock::finalizeLLIntInlineCaches() > // then cleared the cache without GCing in between. > m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool { > auto clear = [&] () { >- Instruction* instruction = std::get<1>(pair.key); >- OpcodeID opcode = Interpreter::getOpcodeID(*instruction); >- if (opcode == op_get_by_id_proto_load || opcode == op_get_by_id_unset) { >+ const Instruction* instruction = std::get<1>(pair.key); >+ OpcodeID opcode = instruction->opcodeID(); >+ if (opcode == op_get_by_id) { > if (Options::verboseOSR()) > dataLogF("Clearing LLInt property access.\n"); >- clearLLIntGetByIdCache(instruction); >+ LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this)); > } > return true; > }; >@@ -1463,22 +1422,22 @@ StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType) > return m_stubInfos.add(accessType); > } > >-JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, Instruction* instruction) >+JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction) > { > return m_addICs.add(arithProfile, instruction); > } > >-JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, Instruction* instruction) >+JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction) > { > return m_mulICs.add(arithProfile, instruction); > } > >-JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, Instruction* instruction) >+JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction) > { > return m_subICs.add(arithProfile, instruction); > } > >-JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, Instruction* instruction) >+JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction) > { > return m_negICs.add(arithProfile, instruction); > } >@@ -1693,9 +1652,33 @@ CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex origina > #endif > } > >-void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned bytecodeOffset) >+ >+ >+void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset) >+{ >+ auto instruction = m_instructions->at(bytecodeOffset); >+ OpCatch op = instruction->as<OpCatch>(); >+ auto& metadata = op.metadata(this); >+ if (!!metadata.buffer) { >+#if !ASSERT_DISABLED >+ ConcurrentJSLocker locker(m_lock); >+ bool found = false; >+ for (auto& profile : m_catchProfiles) { >+ if (profile.get() == metadata.buffer) { >+ found = true; >+ break; >+ } >+ } >+ ASSERT(found); >+#endif >+ return; >+ } >+ >+ ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset); >+} >+ >+void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset) > { >- ASSERT(Interpreter::getOpcodeID(m_instructions[bytecodeOffset]) == op_catch); > BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis(); > > // We get the live-out set of variables at op_catch, not the live-in. This >@@ -1722,7 +1705,7 @@ void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned byte > // the compiler thread reads fully initialized data. > WTF::storeStoreFence(); > >- m_instructions[bytecodeOffset + 3].u.pointer = profiles.get(); >+ op.metadata(this).buffer = profiles.get(); > > { > ConcurrentJSLocker locker(m_lock); >@@ -1773,20 +1756,15 @@ void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& d > > bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column) > { >- const Instruction* begin = instructions().begin(); >- const Instruction* end = instructions().end(); >- for (const Instruction* it = begin; it != end;) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(*it); >- if (opcodeID == op_debug) { >- unsigned bytecodeOffset = it - begin; >+ for (const auto& it : *m_instructions) { >+ if (it->is<OpDebug>()) { > int unused; > unsigned opDebugLine; > unsigned opDebugColumn; >- expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn); >+ expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn); > if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn)) > return true; > } >- it += opcodeLengths[opcodeID]; > } > return false; > } >@@ -2764,7 +2742,7 @@ size_t CodeBlock::predictedMachineCodeSize() > if (multiplier < 0 || multiplier > 1000) > return 0; > >- double doubleResult = multiplier * m_instructions.size(); >+ double doubleResult = multiplier * m_instructions->size(); > > // Be even more paranoid: silently reject values that won't fit into a size_t. If > // the function is so huge that we can't even fit it into virtual memory then we >@@ -2808,14 +2786,6 @@ ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset) > getValueProfileBytecodeOffset<ValueProfile>); > } > >-ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset) >-{ >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructions()[bytecodeOffset]); >- unsigned length = opcodeLength(opcodeID); >- ASSERT(!!tryGetValueProfileForBytecodeOffset(bytecodeOffset)); >- return *instructions()[bytecodeOffset + length - 1].u.profile; >-} >- > void CodeBlock::validate() > { > BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint. >@@ -2849,9 +2819,9 @@ void CodeBlock::validate() > } > } > >- for (unsigned bytecodeOffset = 0; bytecodeOffset < m_instructions.size(); ) { >- OpcodeID opcode = Interpreter::getOpcodeID(m_instructions[bytecodeOffset]); >- if (!!baselineAlternative()->handlerForBytecodeOffset(bytecodeOffset)) { >+ for (const auto& instruction : *m_instructions) { >+ OpcodeID opcode = instruction->opcodeID(); >+ if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) { > if (opcode == op_catch || opcode == op_enter) { > // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be > // inside of a try block because they are responsible for bootstrapping state. And they >@@ -2863,7 +2833,6 @@ void CodeBlock::validate() > endValidationDidFail(); > } > } >- bytecodeOffset += opcodeLength(opcode); > } > } > >@@ -2918,25 +2887,30 @@ unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset) > return 0; > } > >-ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset) >+ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset) > { >- return arithProfileForPC(&instructions()[bytecodeOffset]); >+ return arithProfileForPC(m_instructions->at(bytecodeOffset).ptr()); > } > >-ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc) >+ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc) > { >- auto opcodeID = Interpreter::getOpcodeID(pc[0]); >- switch (opcodeID) { >+ switch (pc->opcodeID()) { > case op_negate: >- return bitwise_cast<ArithProfile*>(&pc[3].u.operand); >+ return &pc->as<OpNegate>().metadata(this).arithProfile; > case op_bitor: >+ return &pc->as<OpBitor>().metadata(this).arithProfile; > case op_bitand: >+ return &pc->as<OpBitand>().metadata(this).arithProfile; > case op_bitxor: >+ return &pc->as<OpBitxor>().metadata(this).arithProfile; > case op_add: >+ return &pc->as<OpAdd>().metadata(this).arithProfile; > case op_mul: >+ return &pc->as<OpMul>().metadata(this).arithProfile; > case op_sub: >+ return &pc->as<OpSub>().metadata(this).arithProfile; > case op_div: >- return bitwise_cast<ArithProfile*>(&pc[4].u.operand); >+ return &pc->as<OpDiv>().metadata(this).arithProfile; > default: > break; > } >@@ -2944,7 +2918,7 @@ ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc) > return nullptr; > } > >-bool CodeBlock::couldTakeSpecialFastCase(int bytecodeOffset) >+bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset) > { > if (!hasBaselineJITProfiling()) > return false; >@@ -2963,22 +2937,26 @@ DFG::CapabilityLevel CodeBlock::capabilityLevel() > } > #endif > >-void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>& instructions) >+void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler() > { > if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets()) > return; >- const Vector<size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); >+ const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); > for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) { > // Because op_profile_control_flow is emitted at the beginning of every basic block, finding > // the next op_profile_control_flow will give us the text range of a single basic block. > size_t startIdx = bytecodeOffsets[i]; >- RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[startIdx]) == op_profile_control_flow); >- int basicBlockStartOffset = instructions[startIdx + 1].u.operand; >+ auto instruction = m_instructions->at(startIdx); >+ RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow); >+ auto bytecode = instruction->as<OpProfileControlFlow>(); >+ auto& metadata = bytecode.metadata(this); >+ int basicBlockStartOffset = bytecode.textOffset; > int basicBlockEndOffset; > if (i + 1 < offsetsLength) { > size_t endIdx = bytecodeOffsets[i + 1]; >- RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[endIdx]) == op_profile_control_flow); >- basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1; >+ auto endInstruction = m_instructions->at(endIdx); >+ RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow); >+ basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().textOffset - 1; > } else { > basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace. > basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before. >@@ -3004,7 +2982,7 @@ void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray > // m: op_profile_control_flow > if (basicBlockEndOffset < basicBlockStartOffset) { > RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock. >- instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); >+ metadata.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); > continue; > } > >@@ -3028,7 +3006,7 @@ void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray > for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs) > insertFunctionGaps(executable); > >- instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation; >+ metadata.basicBlockLocation = basicBlockLocation; > } > } > >@@ -3067,7 +3045,7 @@ std::optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex > bytecodeOffset = callSiteIndex.bits(); > #else > Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits()); >- bytecodeOffset = this->bytecodeOffset(instruction); >+ bytecodeOffset = 0; // this->bytecodeOffset(instruction); > #endif > } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) { > #if ENABLE(DFG_JIT) >diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h >index a3a3d263900d3122c09e204f32b57b3e101a32b2..ce5bd8694e18c1b5a6495ccd66cbfde66e60df7b 100644 >--- a/Source/JavaScriptCore/bytecode/CodeBlock.h >+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h >@@ -47,6 +47,7 @@ > #include "HandlerInfo.h" > #include "ICStatusMap.h" > #include "Instruction.h" >+#include "InstructionStream.h" > #include "JITCode.h" > #include "JITCodeMap.h" > #include "JITMathICForwards.h" >@@ -55,7 +56,6 @@ > #include "JSGlobalObject.h" > #include "JumpTable.h" > #include "LLIntCallLinkInfo.h" >-#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h" > #include "LazyOperandValueProfile.h" > #include "ModuleProgramExecutable.h" > #include "ObjectAllocationProfile.h" >@@ -85,10 +85,10 @@ struct OSRExitState; > > class BytecodeLivenessAnalysis; > class CodeBlockSet; >-class ExecState; > class ExecutableToCodeBlockEdge; > class JSModuleEnvironment; > class LLIntOffsetsExtractor; >+class LLIntPrototypeLoadAdaptiveStructureWatchpoint; > class PCToCodeOriginMap; > class RegisterAtOffsetList; > class StructureStubInfo; >@@ -96,6 +96,7 @@ class StructureStubInfo; > enum class AccessType : int8_t; > > struct ArithProfile; >+struct OpCatch; > > enum ReoptimizationMode { DontCountReoptimization, CountReoptimization }; > >@@ -197,7 +198,7 @@ public: > > void dumpBytecode(); > void dumpBytecode(PrintStream&); >- void dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap& = ICStatusMap()); >+ void dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& = ICStatusMap()); > void dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& = ICStatusMap()); > > void dumpExceptionHandlers(PrintStream&); >@@ -242,22 +243,22 @@ public: > void getICStatusMap(ICStatusMap& result); > > #if ENABLE(JIT) >- JITAddIC* addJITAddIC(ArithProfile*, Instruction*); >- JITMulIC* addJITMulIC(ArithProfile*, Instruction*); >- JITNegIC* addJITNegIC(ArithProfile*, Instruction*); >- JITSubIC* addJITSubIC(ArithProfile*, Instruction*); >+ JITAddIC* addJITAddIC(ArithProfile*, const Instruction*); >+ JITMulIC* addJITMulIC(ArithProfile*, const Instruction*); >+ JITNegIC* addJITNegIC(ArithProfile*, const Instruction*); >+ JITSubIC* addJITSubIC(ArithProfile*, const Instruction*); > > template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type> >- JITAddIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITAddIC(profile, instruction); } >+ JITAddIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITAddIC(profile, instruction); } > > template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type> >- JITMulIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITMulIC(profile, instruction); } >+ JITMulIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITMulIC(profile, instruction); } > > template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type> >- JITNegIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITNegIC(profile, instruction); } >+ JITNegIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITNegIC(profile, instruction); } > > template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type> >- JITSubIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITSubIC(profile, instruction); } >+ JITSubIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITSubIC(profile, instruction); } > > StructureStubInfo* addStubInfo(AccessType); > auto stubInfoBegin() { return m_stubInfos.begin(); } >@@ -306,24 +307,20 @@ public: > } > #endif > >- typedef JSC::Instruction Instruction; >- typedef PoisonedRefCountedArray<CodeBlockPoison, Instruction>& UnpackedInstructions; >- >- static void clearLLIntGetByIdCache(Instruction*); >- >- unsigned bytecodeOffset(Instruction* returnAddress) >+ unsigned bytecodeOffset(const Instruction* returnAddress) > { >- RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end()); >- return static_cast<Instruction*>(returnAddress) - instructions().begin(); >+ const auto* instructionsBegin = instructions().at(0).ptr(); >+ const auto* instructionsEnd = reinterpret_cast<const Instruction*>(reinterpret_cast<uintptr_t>(instructionsBegin) + instructions().size()); >+ RELEASE_ASSERT(returnAddress >= instructionsBegin && returnAddress < instructionsEnd); >+ return returnAddress - instructionsBegin;; > } > >- unsigned numberOfInstructions() const { return m_instructions.size(); } >- PoisonedRefCountedArray<CodeBlockPoison, Instruction>& instructions() { return m_instructions; } >- const PoisonedRefCountedArray<CodeBlockPoison, Instruction>& instructions() const { return m_instructions; } >+ unsigned numberOfInstructions() const { return m_instructions->size(); } >+ const InstructionStream& instructions() const { return *m_instructions; } > > size_t predictedMachineCodeSize(); > >- unsigned instructionCount() const { return m_instructions.size(); } >+ unsigned instructionCount() const { return m_instructions->size(); } > > // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) > CodeBlock* newReplacement(); >@@ -425,7 +422,6 @@ public: > > unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } > ValueProfile& valueProfile(int index) { return m_valueProfiles[index]; } >- ValueProfile& valueProfileForBytecodeOffset(int bytecodeOffset); > ValueProfile* tryGetValueProfileForBytecodeOffset(int bytecodeOffset); > SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset) > { >@@ -445,6 +441,12 @@ public: > return valueProfile(index - numberOfArgumentValueProfiles()); > } > >+ template<typename Metadata> >+ Metadata*& metadata(OpcodeID opcodeID, unsigned metadataID) >+ { >+ return *reinterpret_cast<Metadata**>(&m_metadata[opcodeID][metadataID]); >+ } >+ > RareCaseProfile* addRareCaseProfile(int bytecodeOffset); > unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); } > RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset); >@@ -466,10 +468,10 @@ public: > return value >= Options::couldTakeSlowCaseMinimumCount(); > } > >- ArithProfile* arithProfileForBytecodeOffset(int bytecodeOffset); >- ArithProfile* arithProfileForPC(Instruction*); >+ ArithProfile* arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset); >+ ArithProfile* arithProfileForPC(const Instruction*); > >- bool couldTakeSpecialFastCase(int bytecodeOffset); >+ bool couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset); > > unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); } > const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; } >@@ -478,6 +480,7 @@ public: > ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset); > ArrayProfile* getArrayProfile(unsigned bytecodeOffset); > ArrayProfile* getOrAddArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset); >+ > ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset); > > // Exception handling support >@@ -619,7 +622,7 @@ public: > return m_llintExecuteCounter; > } > >- typedef HashMap<std::tuple<Structure*, Instruction*>, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap; >+ typedef HashMap<std::tuple<Structure*, const Instruction*>, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap; > StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; } > > // Functions for controlling when tiered compilation kicks in. This >@@ -849,25 +852,7 @@ public: > > CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite); > >- void ensureCatchLivenessIsComputedForBytecodeOffset(unsigned bytecodeOffset) >- { >- if (!!m_instructions[bytecodeOffset + 3].u.pointer) { >-#if !ASSERT_DISABLED >- ConcurrentJSLocker locker(m_lock); >- bool found = false; >- for (auto& profile : m_catchProfiles) { >- if (profile.get() == m_instructions[bytecodeOffset + 3].u.pointer) { >- found = true; >- break; >- } >- } >- ASSERT(found); >-#endif >- return; >- } >- >- ensureCatchLivenessIsComputedForBytecodeOffsetSlow(bytecodeOffset); >- } >+ void ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset); > > #if ENABLE(JIT) > void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&); >@@ -932,8 +917,8 @@ private: > m_rareData = std::make_unique<RareData>(); > } > >- void insertBasicBlockBoundariesForControlFlowProfiler(RefCountedArray<Instruction>&); >- void ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned); >+ void insertBasicBlockBoundariesForControlFlowProfiler(); >+ void ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch&, InstructionStream::Offset); > > int m_numCalleeLocals; > int m_numVars; >@@ -952,7 +937,7 @@ private: > WriteBarrier<ExecutableToCodeBlockEdge> m_ownerEdge; > Poisoned<CodeBlockPoison, VM*> m_poisonedVM; > >- PoisonedRefCountedArray<CodeBlockPoison, Instruction> m_instructions; >+ const InstructionStream* m_instructions; > VirtualRegister m_thisRegister; > VirtualRegister m_scopeRegister; > mutable CodeBlockHash m_hash; >@@ -987,6 +972,7 @@ private: > RefCountedArray<ValueProfile> m_argumentValueProfiles; > RefCountedArray<ValueProfile> m_valueProfiles; > Vector<std::unique_ptr<ValueProfileAndOperandBuffer>> m_catchProfiles; >+ SegmentedVector<Vector<void*>, 8> m_metadata; > SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; > RefCountedArray<ArrayAllocationProfile> m_arrayAllocationProfiles; > ArrayProfileVector m_arrayProfiles; >diff --git a/Source/JavaScriptCore/bytecode/GetByIdMetadata.h b/Source/JavaScriptCore/bytecode/GetByIdMetadata.h >new file mode 100644 >index 0000000000000000000000000000000000000000..575f26c3587270768f1dd28630816249f50057ee >--- /dev/null >+++ b/Source/JavaScriptCore/bytecode/GetByIdMetadata.h >@@ -0,0 +1,56 @@ >+/* >+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#pragma once >+ >+namespace JSC { >+ >+enum class GetByIdMode : uint8_t { >+ Default = 0, >+ Unset = 1, >+ ProtoLoad = 2, >+ ArrayLength = 3, >+}; >+ >+union GetByIdModeMetadata { >+ GetByIdModeMetadata() >+ { } >+ >+ struct { >+ PropertyOffset cachedOffset; >+ ValueProfile profile; >+ } defaultMode; >+ >+ struct { >+ PropertyOffset cachedOffset; >+ JSObject* cachedSlot; >+ } protoLoadMode; >+ >+ struct { >+ ArrayProfile arrayProfile; >+ } arrayLengthMode; >+}; >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp >index b0946c4e097f58e14d9fdcb7bab9c42ed129ba14..1e0e03da3ef6ccbdb3faa1ea00f1a73ee828355f 100644 >--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp >+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp >@@ -26,6 +26,7 @@ > #include "config.h" > #include "GetByIdStatus.h" > >+#include "BytecodeStructs.h" > #include "CodeBlock.h" > #include "ComplexGetStatus.h" > #include "GetterSetterAccessCase.h" >@@ -55,35 +56,24 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned > { > VM& vm = *profiledBlock->vm(); > >- Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; >+ auto instruction = profiledBlock->instructions().at(bytecodeIndex); > >- switch (Interpreter::getOpcodeID(instruction[0].u.opcode)) { >- case op_get_by_id: >- case op_get_by_id_direct: { >- StructureID structureID = instruction[4].u.structureID; >- if (!structureID) >- return GetByIdStatus(NoInformation, false); >- >- Structure* structure = vm.heap.structureIDTable().get(structureID); >- >- if (structure->takesSlowPathInDFGForImpureProperty()) >- return GetByIdStatus(NoInformation, false); >- >- unsigned attributes; >- PropertyOffset offset = structure->getConcurrently(uid, attributes); >- if (!isValidOffset(offset)) >- return GetByIdStatus(NoInformation, false); >- if (attributes & PropertyAttribute::CustomAccessor) >+ StructureID structureID; >+ switch (instruction->opcodeID()) { >+ case op_get_by_id: { >+ auto& metadata = instruction->as<OpGetById>().metadata(profiledBlock); >+ // FIXME: We should not just bail if we see a get_by_id_proto_load. >+ // https://bugs.webkit.org/show_bug.cgi?id=158039 >+ if (metadata.mode != GetByIdMode::Default) > return GetByIdStatus(NoInformation, false); >- >- return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); >+ structureID = metadata.structure; >+ break; > } >- >- case op_get_array_length: >- case op_try_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: { >- // FIXME: We should not just bail if we see a try_get_by_id or a get_by_id_proto_load. >+ case op_get_by_id_direct: >+ structureID = instruction->as<OpGetByIdDirect>().metadata(profiledBlock).structure; >+ break; >+ case op_try_get_by_id: { >+ // FIXME: We should not just bail if we see a try_get_by_id. > // https://bugs.webkit.org/show_bug.cgi?id=158039 > return GetByIdStatus(NoInformation, false); > } >@@ -93,6 +83,23 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned > return GetByIdStatus(NoInformation, false); > } > } >+ >+ if (!structureID) >+ return GetByIdStatus(NoInformation, false); >+ >+ Structure* structure = vm.heap.structureIDTable().get(structureID); >+ >+ if (structure->takesSlowPathInDFGForImpureProperty()) >+ return GetByIdStatus(NoInformation, false); >+ >+ unsigned attributes; >+ PropertyOffset offset = structure->getConcurrently(uid, attributes); >+ if (!isValidOffset(offset)) >+ return GetByIdStatus(NoInformation, false); >+ if (attributes & PropertyAttribute::CustomAccessor) >+ return GetByIdStatus(NoInformation, false); >+ >+ return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); > } > > GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid, ExitFlag didExit, CallLinkStatus::ExitSiteData callExitSiteData) >diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h >deleted file mode 100644 >index c133578b3263d3029845e48379a35960704a6efd..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/bytecode/Instruction.h >+++ /dev/null >@@ -1,160 +0,0 @@ >-/* >- * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. >- * >- * Redistribution and use in source and binary forms, with or without >- * modification, are permitted provided that the following conditions >- * are met: >- * >- * 1. Redistributions of source code must retain the above copyright >- * notice, this list of conditions and the following disclaimer. >- * 2. Redistributions in binary form must reproduce the above copyright >- * notice, this list of conditions and the following disclaimer in the >- * documentation and/or other materials provided with the distribution. >- * 3. Neither the name of Apple Inc. ("Apple") nor the names of >- * its contributors may be used to endorse or promote products derived >- * from this software without specific prior written permission. >- * >- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- */ >- >-#pragma once >- >-#include "BasicBlockLocation.h" >-#include "PutByIdFlags.h" >-#include "SymbolTable.h" >-#include "TypeLocation.h" >-#include "PropertySlot.h" >-#include "SpecialPointer.h" >-#include "Structure.h" >-#include "StructureChain.h" >-#include "ToThisStatus.h" >-#include <wtf/VectorTraits.h> >- >-namespace JSC { >- >-class ArrayAllocationProfile; >-class ArrayProfile; >-class ObjectAllocationProfile; >-class WatchpointSet; >-struct LLIntCallLinkInfo; >-struct ValueProfile; >- >-#if ENABLE(COMPUTED_GOTO_OPCODES) >-typedef void* Opcode; >-#else >-typedef OpcodeID Opcode; >-#endif >- >-struct Instruction { >- constexpr Instruction() >- : u({ nullptr }) >- { >- } >- >- Instruction(Opcode opcode) >- { >-#if !ENABLE(COMPUTED_GOTO_OPCODES) >- // We have to initialize one of the pointer members to ensure that >- // the entire struct is initialized, when opcode is not a pointer. >- u.jsCell.clear(); >-#endif >- u.opcode = opcode; >- } >- >- Instruction(int operand) >- { >- // We have to initialize one of the pointer members to ensure that >- // the entire struct is initialized in 64-bit. >- u.jsCell.clear(); >- u.operand = operand; >- } >- Instruction(unsigned unsignedValue) >- { >- // We have to initialize one of the pointer members to ensure that >- // the entire struct is initialized in 64-bit. >- u.jsCell.clear(); >- u.unsignedValue = unsignedValue; >- } >- >- Instruction(PutByIdFlags flags) >- { >- u.putByIdFlags = flags; >- } >- >- Instruction(VM& vm, JSCell* owner, Structure* structure) >- { >- u.structure.clear(); >- u.structure.set(vm, owner, structure); >- } >- Instruction(VM& vm, JSCell* owner, StructureChain* structureChain) >- { >- u.structureChain.clear(); >- u.structureChain.set(vm, owner, structureChain); >- } >- Instruction(VM& vm, JSCell* owner, JSCell* jsCell) >- { >- u.jsCell.clear(); >- u.jsCell.set(vm, owner, jsCell); >- } >- >- Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; } >- >- Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; } >- Instruction(ValueProfile* profile) { u.profile = profile; } >- Instruction(ArrayProfile* profile) { u.arrayProfile = profile; } >- Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; } >- Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; } >- Instruction(WriteBarrier<Unknown>* variablePointer) { u.variablePointer = variablePointer; } >- Instruction(Special::Pointer pointer) { u.specialPointer = pointer; } >- Instruction(UniquedStringImpl* uid) { u.uid = uid; } >- Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; } >- >- union { >- void* pointer; >- Opcode opcode; >- int operand; >- unsigned unsignedValue; >- WriteBarrierBase<Structure> structure; >- StructureID structureID; >- WriteBarrierBase<SymbolTable> symbolTable; >- WriteBarrierBase<StructureChain> structureChain; >- WriteBarrierBase<JSCell> jsCell; >- WriteBarrier<Unknown>* variablePointer; >- Special::Pointer specialPointer; >- PropertySlot::GetValueFunc getterFunc; >- LLIntCallLinkInfo* callLinkInfo; >- UniquedStringImpl* uid; >- ValueProfile* profile; >- ArrayProfile* arrayProfile; >- ArrayAllocationProfile* arrayAllocationProfile; >- ObjectAllocationProfile* objectAllocationProfile; >- WatchpointSet* watchpointSet; >- bool* predicatePointer; >- ToThisStatus toThisStatus; >- TypeLocation* location; >- BasicBlockLocation* basicBlockLocation; >- PutByIdFlags putByIdFlags; >- } u; >- >-private: >- Instruction(StructureChain*); >- Instruction(Structure*); >-}; >-static_assert(sizeof(Instruction) == sizeof(void*), ""); >- >-} // namespace JSC >- >-namespace WTF { >- >-template<> struct VectorTraits<JSC::Instruction> : VectorTraitsBase<true, JSC::Instruction> { }; >- >-} // namespace WTF >diff --git a/Source/JavaScriptCore/bytecode/InstructionStream.cpp b/Source/JavaScriptCore/bytecode/InstructionStream.cpp >new file mode 100644 >index 0000000000000000000000000000000000000000..d2816015809c1cf67cd58e5c5974427217ad0be7 >--- /dev/null >+++ b/Source/JavaScriptCore/bytecode/InstructionStream.cpp >@@ -0,0 +1,42 @@ >+/* >+ * Copyright (C) 2014 Apple Inc. All Rights Reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#include "config.h" >+#include "InstructionStream.h" >+ >+#include "Opcode.h" >+ >+namespace JSC { >+ >+InstructionStream::InstructionStream(InstructionBuffer&& instructions) >+ : m_instructions(WTFMove(instructions)) >+{ } >+ >+size_t InstructionStream::sizeInBytes() const >+{ >+ return m_instructions.size(); >+} >+ >+} >diff --git a/Source/JavaScriptCore/bytecode/InstructionStream.h b/Source/JavaScriptCore/bytecode/InstructionStream.h >new file mode 100644 >index 0000000000000000000000000000000000000000..4b190ccc91aa58a908f050da3f78ad7a151542b6 >--- /dev/null >+++ b/Source/JavaScriptCore/bytecode/InstructionStream.h >@@ -0,0 +1,245 @@ >+/* >+ * Copyright (C) 2014 Apple Inc. All Rights Reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+ >+#pragma once >+ >+#include "Instruction.h" >+#include <wtf/Vector.h> >+ >+namespace JSC { >+ >+class InstructionStream { >+ WTF_MAKE_FAST_ALLOCATED; >+ // WTF_MAKE_NONCOPYABLE(InstructionStream); >+ >+ using InstructionBuffer = Vector<uint8_t, 0, UnsafeVectorOverflow>; >+ >+ friend class InstructionStreamWriter; >+public: >+ size_t sizeInBytes() const; >+ >+ using Offset = size_t; >+ >+private: >+ template<class InstructionBuffer> >+ class BaseRef { >+ WTF_MAKE_FAST_ALLOCATED; >+ >+ friend class InstructionStream; >+ >+ public: >+ const Instruction* operator->() const { return unwrap(); } >+ const Instruction* ptr() const { return unwrap(); } >+ >+ bool operator!=(const BaseRef<InstructionBuffer>& other) const >+ { >+ return &m_instructions != &other.m_instructions || m_index != other.m_index; >+ } >+ >+ BaseRef next() const >+ { >+ return BaseRef { m_instructions, m_index + ptr()->size() }; >+ } >+ >+ Offset offset() const >+ { >+ return m_index; >+ } >+ >+ bool isValid() const >+ { >+ return m_index < m_instructions.size(); >+ } >+ >+ private: >+ BaseRef(InstructionBuffer& instructions, size_t index) >+ : m_instructions(instructions) >+ , m_index(index) >+ { } >+ >+ >+ const Instruction* unwrap() const { return reinterpret_cast<const Instruction*>(&m_instructions[m_index]); } >+ >+ InstructionBuffer& m_instructions; >+ protected: >+ Offset m_index; >+ }; >+ >+public: >+ using Ref = BaseRef<const InstructionBuffer>; >+ >+ class MutableRef : public BaseRef<InstructionBuffer> { >+ using BaseRef<InstructionBuffer>::BaseRef; >+ >+ friend class InstructionStreamWriter; >+ public: >+ Ref freeze() const { return Ref { m_instructions, m_index }; } >+ Instruction* operator->() { return unwrap(); } >+ Instruction* ptr() { return unwrap(); } >+ operator Ref() { >+ return Ref { m_instructions, m_index }; >+ } >+ private: >+ Instruction* unwrap() { return reinterpret_cast<Instruction*>(&m_instructions[m_index]); } >+ }; >+ >+private: >+ class iterator : public Ref { >+ friend class InstructionStream; >+ >+ public: >+ using Ref::Ref; >+ >+ Ref& operator*() >+ { >+ return *this; >+ } >+ >+ iterator operator++() >+ { >+ m_index += ptr()->size(); >+ return *this; >+ } >+ }; >+ >+public: >+ iterator begin() const >+ { >+ return iterator { m_instructions, 0 }; >+ } >+ >+ iterator end() const >+ { >+ return iterator { m_instructions, m_instructions.size() }; >+ } >+ >+ const Ref at(Offset offset) const >+ { >+ ASSERT(offset < m_instructions.size()); >+ return Ref { m_instructions, offset }; >+ } >+ >+ size_t size() const >+ { >+ return m_instructions.size(); >+ } >+ >+private: >+ explicit InstructionStream(InstructionBuffer&&); >+ >+protected: >+ InstructionBuffer m_instructions; >+}; >+ >+class InstructionStreamWriter : public InstructionStream { >+ friend class BytecodeRewriter; >+public: >+ InstructionStreamWriter() >+ : InstructionStream({ }) >+ { } >+ >+ MutableRef ref(Offset offset) >+ { >+ ASSERT(offset < m_instructions.size()); >+ return MutableRef { m_instructions, offset }; >+ } >+ >+ >+ void write(uint8_t byte) { ASSERT(!m_finalized); m_instructions.append(byte); } >+ void write(uint32_t i) >+ { >+ ASSERT(!m_finalized); >+ union { >+ uint32_t i; >+ uint8_t bytes[4]; >+ } u { i }; >+#if CPU(BIG_ENDIAN) >+ write(u.bytes[3]); >+ write(u.bytes[2]); >+ write(u.bytes[1]); >+ write(u.bytes[0]); >+#else // !CPU(BIG_ENDIAN) >+ write(u.bytes[0]); >+ write(u.bytes[1]); >+ write(u.bytes[2]); >+ write(u.bytes[3]); >+#endif // !CPU(BIG_ENDIAN) >+ } >+ >+ void rewind(MutableRef& ref) >+ { >+ ASSERT(ref.offset() < m_instructions.size()); >+ m_instructions.shrink(ref.offset()); >+ } >+ >+ std::unique_ptr<InstructionStream> finalize() >+ { >+ m_finalized = true; >+ m_instructions.shrinkToFit(); >+ return std::unique_ptr<InstructionStream> { new InstructionStream(WTFMove(m_instructions)) }; >+ } >+ >+ MutableRef ref() >+ { >+ return MutableRef { m_instructions, m_instructions.size() }; >+ } >+ >+private: >+ class iterator : public MutableRef { >+ friend class InstructionStreamWriter; >+ >+ public: >+ using MutableRef::MutableRef; >+ >+ MutableRef& operator*() >+ { >+ return *this; >+ } >+ >+ iterator operator++() >+ { >+ m_index += ptr()->size(); >+ return *this; >+ } >+ }; >+ >+public: >+ iterator begin() >+ { >+ return iterator { m_instructions, 0 }; >+ } >+ >+ iterator end() >+ { >+ return iterator { m_instructions, m_instructions.size() }; >+ } >+ >+private: >+ bool m_finalized { false }; >+}; >+ >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp >index eecd8fbc59a37eac92af8ab8088797e8f10fb309..de5cb5f47add388a29f049e1901d1e3fe5804677 100644 >--- a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp >+++ b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp >@@ -32,9 +32,9 @@ > > namespace JSC { > >-LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, Instruction* getByIdInstruction) >+LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, OpGetById::Metadata& getByIdMetadata) > : m_key(key) >- , m_getByIdInstruction(getByIdInstruction) >+ , m_getByIdMetadata(getByIdMetadata) > { > RELEASE_ASSERT(key.watchingRequiresStructureTransitionWatchpoint()); > RELEASE_ASSERT(!key.watchingRequiresReplacementWatchpoint()); >@@ -54,7 +54,17 @@ void LLIntPrototypeLoadAdaptiveStructureWatchpoint::fireInternal(VM& vm, const F > return; > } > >- CodeBlock::clearLLIntGetByIdCache(m_getByIdInstruction); >+ clearLLIntGetByIdCache(m_getByIdMetadata); > } > >+void LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(OpGetById::Metadata&) >+{ >+ // TODO >+ //instruction[0].u.opcode = op_get_by_id; >+ //instruction[4].u.pointer = nullptr; >+ //instruction[5].u.pointer = nullptr; >+ //instruction[6].u.pointer = nullptr; >+} >+ >+ > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h >index e0e1be8d07057bd6b611f8a5391d5d87b2a32d54..27a8e317f463a67d5d3e2686aa8d4433d8aab447 100644 >--- a/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h >+++ b/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h >@@ -25,7 +25,7 @@ > > #pragma once > >-#include "Instruction.h" >+#include "BytecodeStructs.h" > #include "ObjectPropertyCondition.h" > #include "Watchpoint.h" > >@@ -34,10 +34,12 @@ namespace JSC { > class LLIntPrototypeLoadAdaptiveStructureWatchpoint : public Watchpoint { > public: > LLIntPrototypeLoadAdaptiveStructureWatchpoint() = default; >- LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, Instruction*); >+ LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, OpGetById::Metadata&); > > void install(VM&); > >+ static void clearLLIntGetByIdCache(OpGetById::Metadata&); >+ > const ObjectPropertyCondition& key() const { return m_key; } > > protected: >@@ -45,7 +47,7 @@ protected: > > private: > ObjectPropertyCondition m_key; >- Instruction* m_getByIdInstruction { nullptr }; >+ OpGetById::Metadata& m_getByIdMetadata; > }; > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h >index 07d9a7314eeb109c731237d4be327131b11c94ee..74fc2e68c6bb4763fb777bc6c70bb15049b40f47 100644 >--- a/Source/JavaScriptCore/bytecode/Opcode.h >+++ b/Source/JavaScriptCore/bytecode/Opcode.h >@@ -68,6 +68,10 @@ const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS; > FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS); > #undef OPCODE_ID_LENGTHS > >+#define OPCODE_ID_WIDE_LENGTHS(id, length) const int id##_wide_length = length * 4; >+ FOR_EACH_OPCODE_ID(OPCODE_ID_WIDE_LENGTHS); >+#undef OPCODE_ID_WIDE_LENGTHS >+ > #define OPCODE_LENGTH(opcode) opcode##_length > > #define OPCODE_ID_LENGTH_MAP(opcode, length) length, >diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp >index 56306fd7ce8bc1367e6b1eef9c98feeb4d6573c7..1e7122c75003333a733b1af5886aea1582f10947 100644 >--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp >+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp >@@ -32,20 +32,20 @@ > > namespace JSC { > >-template <size_t vectorSize, typename Block, typename Instruction> >-static void getJumpTargetsForBytecodeOffset(Block* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, vectorSize>& out) >+template <size_t vectorSize, typename Block> >+static void getJumpTargetsForInstruction(Block* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, vectorSize>& out) > { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- extractStoredJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) { >- out.append(bytecodeOffset + relativeOffset); >+ extractStoredJumpTargetsForInstruction(codeBlock, instruction, [&](int32_t relativeOffset) { >+ out.append(instruction.offset() + relativeOffset); > }); >+ OpcodeID opcodeID = instruction->opcodeID(); > // op_loop_hint does not have jump target stored in bytecode instructions. > if (opcodeID == op_loop_hint) >- out.append(bytecodeOffset); >+ out.append(instruction.offset()); > else if (opcodeID == op_enter && codeBlock->hasTailCalls() && Options::optimizeRecursiveTailCalls()) { > // We need to insert a jump after op_enter, so recursive tail calls have somewhere to jump to. > // But we only want to pay that price for functions that have at least one tail call. >- out.append(bytecodeOffset + opcodeLengths[op_enter]); >+ out.append(instruction.next().offset()); > } > } > >@@ -54,8 +54,8 @@ enum class ComputePreciseJumpTargetsMode { > ForceCompute, > }; > >-template<ComputePreciseJumpTargetsMode Mode, typename Block, typename Instruction, size_t vectorSize> >-void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, vectorSize>& out) >+template<ComputePreciseJumpTargetsMode Mode, typename Block, size_t vectorSize> >+void computePreciseJumpTargetsInternal(Block* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, vectorSize>& out) > { > ASSERT(out.isEmpty()); > >@@ -69,10 +69,8 @@ void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructio > out.append(codeBlock->exceptionHandler(i).end); > } > >- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- getJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out); >- bytecodeOffset += opcodeLengths[opcodeID]; >+ for (const auto& instruction : instructions) { >+ getJumpTargetsForInstruction(codeBlock, instruction, out); > } > > std::sort(out.begin(), out.end()); >@@ -91,34 +89,34 @@ void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructio > out.shrinkCapacity(toIndex); > } > >-void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<unsigned, 32>& out) >+void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<InstructionStream::Offset, 32>& out) > { >- computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, codeBlock->instructions().begin(), codeBlock->instructions().size(), out); >+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, codeBlock->instructions(), out); > } > >-void computePreciseJumpTargets(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out) >+void computePreciseJumpTargets(CodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out) > { >- computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructionsBegin, instructionCount, out); >+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructions, out); > } > >-void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out) >+void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out) > { >- computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructionsBegin, instructionCount, out); >+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructions, out); > } > >-void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned>& out) >+void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset>& out) > { >- computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::ForceCompute>(codeBlock, instructionsBegin, instructionCount, out); >+ computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::ForceCompute>(codeBlock, instructions, out); > } > >-void findJumpTargetsForBytecodeOffset(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out) >+void findJumpTargetsForInstruction(CodeBlock* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out) > { >- getJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out); >+ getJumpTargetsForInstruction(codeBlock, instruction, out); > } > >-void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out) >+void findJumpTargetsForInstruction(UnlinkedCodeBlock* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out) > { >- getJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out); >+ getJumpTargetsForInstruction(codeBlock, instruction, out); > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h >index bcc9346cd5d7020465def09a5b259cf4872d9b93..023de86c1ea7de57cd85a4ac4286259cafb0df2b 100644 >--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h >+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h >@@ -30,16 +30,15 @@ > namespace JSC { > > class UnlinkedCodeBlock; >-struct UnlinkedInstruction; > > // Return a sorted list of bytecode index that are the destination of a jump. >-void computePreciseJumpTargets(CodeBlock*, Vector<unsigned, 32>& out); >-void computePreciseJumpTargets(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out); >-void computePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out); >+void computePreciseJumpTargets(CodeBlock*, Vector<InstructionStream::Offset, 32>& out); >+void computePreciseJumpTargets(CodeBlock*, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out); >+void computePreciseJumpTargets(UnlinkedCodeBlock*, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out); > >-void recomputePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned>& out); >+void recomputePreciseJumpTargets(UnlinkedCodeBlock*, const InstructionStream& instructions, Vector<InstructionStream::Offset>& out); > >-void findJumpTargetsForBytecodeOffset(CodeBlock*, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out); >-void findJumpTargetsForBytecodeOffset(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out); >+void findJumpTargetsForInstruction(CodeBlock*, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out); >+void findJumpTargetsForInstruction(UnlinkedCodeBlock*, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out); > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h b/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h >index 070fde9a0b3d6afdc3d4b5fb98694041f1637d01..7e93f7b42fe7c48477af431e0db012d7b9eb5c56 100644 >--- a/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h >+++ b/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h >@@ -25,64 +25,108 @@ > > #pragma once > >+#include "BytecodeStructs.h" > #include "InterpreterInlines.h" > #include "Opcode.h" > #include "PreciseJumpTargets.h" > > namespace JSC { > >-template<typename Block, typename Instruction, typename Function> >-inline void extractStoredJumpTargetsForBytecodeOffset(Block* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Function function) >+#define SWITCH_JMP(CASE_OP, JMP_TARGET) \ >+ switch (instruction->opcodeID()) { \ >+ CASE_OP(OpJmp) \ >+ /* TODO: unify as instruction->as<UnaryJmp>() */ \ >+ CASE_OP(OpJtrue) \ >+ CASE_OP(OpJfalse) \ >+ CASE_OP(OpJeqNull) \ >+ CASE_OP(OpJneqNull) \ >+ CASE_OP(OpJneqPtr) \ >+ /* TODO: unify as instruction->as<BinaryJmp>() */ \ >+ CASE_OP(OpJless) \ >+ CASE_OP(OpJlesseq) \ >+ CASE_OP(OpJgreater) \ >+ CASE_OP(OpJgreatereq) \ >+ CASE_OP(OpJnless) \ >+ CASE_OP(OpJnlesseq) \ >+ CASE_OP(OpJngreater) \ >+ CASE_OP(OpJngreatereq) \ >+ CASE_OP(OpJeq) \ >+ CASE_OP(OpJneq) \ >+ CASE_OP(OpJstricteq) \ >+ CASE_OP(OpJnstricteq) \ >+ CASE_OP(OpJbelow) \ >+ CASE_OP(OpJbeloweq) \ >+ case op_switch_imm: { \ >+ auto bytecode = instruction->as<OpSwitchImm>(); \ >+ auto& table = codeBlock->switchJumpTable(bytecode.tableIndex); \ >+ for (unsigned i = table.branchOffsets.size(); i--;) \ >+ JMP_TARGET(table.branchOffsets[i]); \ >+ JMP_TARGET(bytecode.defaultOffset); \ >+ break; \ >+ } \ >+ case op_switch_char: { \ >+ auto bytecode = instruction->as<OpSwitchChar>(); \ >+ auto& table = codeBlock->switchJumpTable(bytecode.tableIndex); \ >+ for (unsigned i = table.branchOffsets.size(); i--;) \ >+ JMP_TARGET(table.branchOffsets[i]); \ >+ JMP_TARGET(bytecode.defaultOffset); \ >+ break; \ >+ } \ >+ case op_switch_string: { \ >+ auto bytecode = instruction->as<OpSwitchImm>(); \ >+ auto& table = codeBlock->stringSwitchJumpTable(bytecode.tableIndex); \ >+ auto iter = table.offsetTable.begin(); \ >+ auto end = table.offsetTable.end(); \ >+ for (; iter != end; ++iter) \ >+ JMP_TARGET(iter->value.branchOffset); \ >+ JMP_TARGET(bytecode.defaultOffset); \ >+ break; \ >+ } \ >+ default: \ >+ break; \ >+ } \ >+ >+template<typename Block, typename Function> >+inline void extractStoredJumpTargetsForInstruction(Block* codeBlock, const InstructionStream::Ref& instruction, Function function) > { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); >- Instruction* current = instructionsBegin + bytecodeOffset; >- switch (opcodeID) { >- case op_jmp: >- function(current[1].u.operand); >- break; >- case op_jtrue: >- case op_jfalse: >- case op_jeq_null: >- case op_jneq_null: >- function(current[2].u.operand); >- break; >- case op_jneq_ptr: >- case op_jless: >- case op_jlesseq: >- case op_jgreater: >- case op_jgreatereq: >- case op_jnless: >- case op_jnlesseq: >- case op_jngreater: >- case op_jngreatereq: >- case op_jeq: >- case op_jneq: >- case op_jstricteq: >- case op_jnstricteq: >- case op_jbelow: >- case op_jbeloweq: >- function(current[3].u.operand); >- break; >- case op_switch_imm: >- case op_switch_char: { >- auto& table = codeBlock->switchJumpTable(current[1].u.operand); >- for (unsigned i = table.branchOffsets.size(); i--;) >- function(table.branchOffsets[i]); >- function(current[2].u.operand); >+#define CASE_OP(__op) \ >+ case __op::opcodeID(): \ >+ function(instruction->as<__op>().target); \ > break; >- } >- case op_switch_string: { >- auto& table = codeBlock->stringSwitchJumpTable(current[1].u.operand); >- auto iter = table.offsetTable.begin(); >- auto end = table.offsetTable.end(); >- for (; iter != end; ++iter) >- function(iter->value.branchOffset); >- function(current[2].u.operand); >- break; >- } >- default: >- break; >- } >+ >+#define JMP_TARGET(__target) \ >+ function(__target) >+ >+SWITCH_JMP(CASE_OP, JMP_TARGET) >+ >+#undef CASE_OP >+#undef JMP_TARGET >+} >+ >+template<typename Block, typename Function> >+inline void updateStoredJumpTargetsForInstruction(Block* codeBlock, InstructionStream::MutableRef instruction, Function function) >+{ >+#define CASE_OP(__op) \ >+ case __op::opcodeID(): { \ >+ int32_t target = instruction->as<__op>().target; \ >+ int32_t newTarget = function(target); \ >+ if (newTarget != target) \ >+ instruction->cast<__op>()->setTarget(newTarget); \ >+ break; \ >+ } >+ >+#define JMP_TARGET(__target) \ >+ do { \ >+ int32_t target = __target; \ >+ int32_t newTarget = function(target); \ >+ if (newTarget != target) \ >+ __target = newTarget; \ >+ } while(false) >+ >+SWITCH_JMP(CASE_OP, JMP_TARGET) >+ >+#undef CASE_OP >+#undef JMP_TARGET > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp >index aac974cea948218478c6c885e8f4d7b9ac3b64e3..03abdf49719b95088772a9129d552d50e5c85fe3 100644 >--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp >+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp >@@ -26,6 +26,7 @@ > #include "config.h" > #include "PutByIdStatus.h" > >+#include "BytecodeStructs.h" > #include "CodeBlock.h" > #include "ComplexGetStatus.h" > #include "GetterSetterAccessCase.h" >@@ -55,21 +56,18 @@ ExitFlag PutByIdStatus::hasExitSite(CodeBlock* profiledBlock, unsigned bytecodeI > > PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) > { >- UNUSED_PARAM(profiledBlock); >- UNUSED_PARAM(bytecodeIndex); >- UNUSED_PARAM(uid); >- > VM& vm = *profiledBlock->vm(); > >- Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; >+ auto instruction = profiledBlock->instructions().at(bytecodeIndex); >+ auto& metadata = instruction->as<OpPutById>().metadata(profiledBlock); > >- StructureID structureID = instruction[4].u.structureID; >+ StructureID structureID = metadata.oldStructure; > if (!structureID) > return PutByIdStatus(NoInformation); > > Structure* structure = vm.heap.structureIDTable().get(structureID); > >- StructureID newStructureID = instruction[6].u.structureID; >+ StructureID newStructureID = metadata.newStructure; > if (!newStructureID) { > PropertyOffset offset = structure->getConcurrently(uid); > if (!isValidOffset(offset)) >@@ -87,7 +85,7 @@ PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned > return PutByIdStatus(NoInformation); > > ObjectPropertyConditionSet conditionSet; >- if (!(instruction[8].u.putByIdFlags & PutByIdIsDirect)) { >+ if (!(metadata.flags & PutByIdIsDirect)) { > conditionSet = > generateConditionsForPropertySetterMissConcurrently( > vm, profiledBlock->globalObject(), structure, uid); >diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp >index dc5a363b638606421e7d20dfbd74f944c7d03e63..5ad3f24ca0481d11a67ebd1bfbabe866fd31d48b 100644 >--- a/Source/JavaScriptCore/bytecode/SpecialPointer.cpp >+++ b/Source/JavaScriptCore/bytecode/SpecialPointer.cpp >@@ -44,3 +44,27 @@ void* actualPointerFor(CodeBlock* codeBlock, Special::Pointer pointer) > > } // namespace JSC > >+namespace WTF { >+ >+void printInternal(PrintStream& out, JSC::Special::Pointer pointer) >+{ >+ switch (pointer) { >+ case JSC::Special::CallFunction: >+ out.print("CallFunction"); >+ return; >+ case JSC::Special::ApplyFunction: >+ out.print("ApplyFunction"); >+ return; >+ case JSC::Special::ObjectConstructor: >+ out.print("ObjectConstructor"); >+ return; >+ case JSC::Special::ArrayConstructor: >+ out.print("ArrayConstructor"); >+ return; >+ case JSC::Special::TableSize: >+ out.print("TableSize"); >+ return; >+ } >+} >+ >+} // namespace WTF >diff --git a/Source/JavaScriptCore/bytecode/SpecialPointer.h b/Source/JavaScriptCore/bytecode/SpecialPointer.h >index 21329ec43fcf756d4d8338855d6b858c04c4ccfe..9df7ae00675b05278d004ea1e354f00d46a45dbe 100644 >--- a/Source/JavaScriptCore/bytecode/SpecialPointer.h >+++ b/Source/JavaScriptCore/bytecode/SpecialPointer.h >@@ -61,3 +61,11 @@ void* actualPointerFor(JSGlobalObject*, Special::Pointer); > void* actualPointerFor(CodeBlock*, Special::Pointer); > > } // namespace JSC >+ >+namespace WTF { >+ >+class PrintStream; >+ >+void printInternal(PrintStream&, JSC::Special::Pointer); >+ >+} // namespace WTF >diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp >index 2e2d64f06f97fa1da5625ba6b584dcfd8d911d0b..ba94ce774b545fcafade2afe7d778129f63ebf6d 100644 >--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp >+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp >@@ -34,6 +34,7 @@ > #include "CodeCache.h" > #include "ExecutableInfo.h" > #include "FunctionOverrides.h" >+#include "InstructionStream.h" > #include "JSCInlines.h" > #include "JSString.h" > #include "Parser.h" >@@ -43,7 +44,6 @@ > #include "SymbolTable.h" > #include "UnlinkedEvalCodeBlock.h" > #include "UnlinkedFunctionCodeBlock.h" >-#include "UnlinkedInstructionStream.h" > #include "UnlinkedModuleProgramCodeBlock.h" > #include "UnlinkedProgramCodeBlock.h" > #include <wtf/DataLog.h> >@@ -95,20 +95,20 @@ void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) > for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr) > visitor.append(*ptr); > visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size()); >- if (thisObject->m_unlinkedInstructions) >- visitor.reportExtraMemoryVisited(thisObject->m_unlinkedInstructions->sizeInBytes()); >+ if (thisObject->m_instructions) >+ visitor.reportExtraMemoryVisited(thisObject->m_instructions->sizeInBytes()); > } > > size_t UnlinkedCodeBlock::estimatedSize(JSCell* cell, VM& vm) > { > UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell); >- size_t extraSize = thisObject->m_unlinkedInstructions ? thisObject->m_unlinkedInstructions->sizeInBytes() : 0; >+ size_t extraSize = thisObject->m_instructions ? thisObject->m_instructions->sizeInBytes() : 0; > return Base::estimatedSize(cell, vm) + extraSize; > } > > int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) > { >- ASSERT(bytecodeOffset < instructions().count()); >+ ASSERT(bytecodeOffset < instructions().size()); > int divot { 0 }; > int startOffset { 0 }; > int endOffset { 0 }; >@@ -139,13 +139,12 @@ inline void UnlinkedCodeBlock::getLineAndColumn(const ExpressionRangeInfo& info, > } > > #ifndef NDEBUG >-static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) >+static void dumpLineColumnEntry(size_t index, const InstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) > { >- const auto& instructions = instructionStream.unpackForDebugging(); >- OpcodeID opcode = instructions[instructionOffset].u.opcode; >+ const auto instruction = instructionStream.at(instructionOffset); > const char* event = ""; >- if (opcode == op_debug) { >- switch (instructions[instructionOffset + 1].u.operand) { >+ if (instruction->is<OpDebug>()) { >+ switch (instruction->as<OpDebug>().debugHookType) { > case WillExecuteProgram: event = " WillExecuteProgram"; break; > case DidExecuteProgram: event = " DidExecuteProgram"; break; > case DidEnterCallFrame: event = " DidEnterCallFrame"; break; >@@ -155,7 +154,7 @@ static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& i > case WillExecuteExpression: event = " WillExecuteExpression"; break; > } > } >- dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, opcodeNames[opcode], event); >+ dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, instruction->name(), event); > } > > void UnlinkedCodeBlock::dumpExpressionRangeInfo() >@@ -178,7 +177,7 @@ void UnlinkedCodeBlock::dumpExpressionRangeInfo() > void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, > int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const > { >- ASSERT(bytecodeOffset < instructions().count()); >+ ASSERT(bytecodeOffset < instructions().size()); > > if (!m_expressionInfo.size()) { > startOffset = 0; >@@ -304,20 +303,20 @@ UnlinkedCodeBlock::~UnlinkedCodeBlock() > { > } > >-void UnlinkedCodeBlock::setInstructions(std::unique_ptr<UnlinkedInstructionStream> instructions) >+void UnlinkedCodeBlock::setInstructions(std::unique_ptr<InstructionStream> instructions) > { > ASSERT(instructions); > { > auto locker = holdLock(cellLock()); >- m_unlinkedInstructions = WTFMove(instructions); >+ m_instructions = WTFMove(instructions); > } >- Heap::heap(this)->reportExtraMemoryAllocated(m_unlinkedInstructions->sizeInBytes()); >+ Heap::heap(this)->reportExtraMemoryAllocated(m_instructions->sizeInBytes()); > } > >-const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const >+const InstructionStream& UnlinkedCodeBlock::instructions() const > { >- ASSERT(m_unlinkedInstructions.get()); >- return *m_unlinkedInstructions; >+ ASSERT(m_instructions.get()); >+ return *m_instructions; > } > > UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler) >@@ -332,20 +331,15 @@ UnlinkedHandlerInfo* UnlinkedCodeBlock::handlerForIndex(unsigned index, Required > return UnlinkedHandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler); > } > >-void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter, UnpackedInstructions& instructions) >+void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter, InstructionStreamWriter& instructions) > { > // Before applying the changes, we adjust the jumps based on the original bytecode offset, the offset to the jump target, and > // the insertion information. > >- UnlinkedInstruction* instructionsBegin = instructions.begin(); // OOPS: make this an accessor on rewriter. >- >- for (int bytecodeOffset = 0, instructionCount = instructions.size(); bytecodeOffset < instructionCount;) { >- UnlinkedInstruction* current = instructionsBegin + bytecodeOffset; >- OpcodeID opcodeID = current[0].u.opcode; >- extractStoredJumpTargetsForBytecodeOffset(this, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) { >- relativeOffset = rewriter.adjustJumpTarget(bytecodeOffset, bytecodeOffset + relativeOffset); >+ for (const auto& instruction : instructions) { >+ updateStoredJumpTargetsForInstruction(this, instruction, [&](int32_t relativeOffset) { >+ return rewriter.adjustJumpTarget(instruction.offset(), instruction.offset() + relativeOffset); > }); >- bytecodeOffset += opcodeLength(opcodeID); > } > > // Then, exception handlers should be adjusted. >@@ -378,7 +372,7 @@ void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter, UnpackedIn > > // And recompute the jump target based on the modified unlinked instructions. > m_jumpTargets.clear(); >- recomputePreciseJumpTargets(this, instructions.begin(), instructions.size(), m_jumpTargets); >+ recomputePreciseJumpTargets(this, instructions, m_jumpTargets); > } > > void UnlinkedCodeBlock::shrinkToFit() >diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h >index da77bc9379dc522445989498b7ea98776d92d162..edf848afe1532b9507ee423bb932ee6e81260a3d 100644 >--- a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h >+++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h >@@ -31,6 +31,7 @@ > #include "ExpressionRangeInfo.h" > #include "HandlerInfo.h" > #include "Identifier.h" >+#include "InstructionStream.h" > #include "JSCast.h" > #include "LockDuringMarking.h" > #include "ParserModes.h" >@@ -60,7 +61,6 @@ class SourceProvider; > class UnlinkedCodeBlock; > class UnlinkedFunctionCodeBlock; > class UnlinkedFunctionExecutable; >-class UnlinkedInstructionStream; > struct ExecutableInfo; > > typedef unsigned UnlinkedValueProfile; >@@ -101,17 +101,6 @@ struct UnlinkedSimpleJumpTable { > } > }; > >-struct UnlinkedInstruction { >- UnlinkedInstruction() { u.operand = 0; } >- UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; } >- UnlinkedInstruction(int operand) { u.operand = operand; } >- union { >- OpcodeID opcode; >- int32_t operand; >- unsigned unsignedValue; >- } u; >-}; >- > class UnlinkedCodeBlock : public JSCell { > public: > typedef JSCell Base; >@@ -121,9 +110,6 @@ public: > > enum { CallFunction, ApplyFunction }; > >- typedef UnlinkedInstruction Instruction; >- typedef Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow> UnpackedInstructions; >- > bool isConstructor() const { return m_isConstructor; } > bool isStrictMode() const { return m_isStrictMode; } > bool usesEval() const { return m_usesEval; } >@@ -237,8 +223,8 @@ public: > > void shrinkToFit(); > >- void setInstructions(std::unique_ptr<UnlinkedInstructionStream>); >- const UnlinkedInstructionStream& instructions() const; >+ void setInstructions(std::unique_ptr<InstructionStream>); >+ const InstructionStream& instructions() const; > > int numCalleeLocals() const { return m_numCalleeLocals; } > int numVars() const { return m_numVars; } >@@ -276,6 +262,16 @@ public: > UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } > size_t numberOfFunctionExprs() { return m_functionExprs.size(); } > >+ unsigned addMetadataFor(OpcodeID opcodeID) >+ { >+ auto it = m_metadataCount.find(opcodeID); >+ if (it != m_metadataCount.end()) >+ return it->value++; >+ >+ m_metadataCount.add(opcodeID, 1); >+ return 0; >+ } >+ > // Exception handling support > size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } > void addExceptionHandler(const UnlinkedHandlerInfo& handler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(handler); } >@@ -305,13 +301,13 @@ public: > VirtualRegister thisRegister() const { return m_thisRegister; } > VirtualRegister scopeRegister() const { return m_scopeRegister; } > >- void addPropertyAccessInstruction(unsigned propertyAccessInstruction) >+ void addPropertyAccessInstruction(InstructionStream::Offset propertyAccessInstruction) > { > m_propertyAccessInstructions.append(propertyAccessInstruction); > } > > size_t numberOfPropertyAccessInstructions() const { return m_propertyAccessInstructions.size(); } >- const Vector<unsigned>& propertyAccessInstructions() const { return m_propertyAccessInstructions; } >+ const Vector<InstructionStream::Offset>& propertyAccessInstructions() const { return m_propertyAccessInstructions; } > > bool hasRareData() const { return m_rareData.get(); } > >@@ -401,7 +397,7 @@ private: > friend class BytecodeRewriter; > friend class BytecodeGenerator; > >- void applyModification(BytecodeRewriter&, UnpackedInstructions&); >+ void applyModification(BytecodeRewriter&, InstructionStreamWriter&); > > void createRareDataIfNecessary() > { >@@ -414,7 +410,7 @@ private: > void getLineAndColumn(const ExpressionRangeInfo&, unsigned& line, unsigned& column) const; > BytecodeLivenessAnalysis& livenessAnalysisSlow(CodeBlock*); > >- std::unique_ptr<UnlinkedInstructionStream> m_unlinkedInstructions; >+ std::unique_ptr<InstructionStream> m_instructions; > std::unique_ptr<BytecodeLivenessAnalysis> m_liveness; > > VirtualRegister m_thisRegister; >@@ -458,9 +454,9 @@ private: > SourceParseMode m_parseMode; > CodeType m_codeType; > >- Vector<unsigned> m_jumpTargets; >+ Vector<InstructionStream::Offset> m_jumpTargets; > >- Vector<unsigned> m_propertyAccessInstructions; >+ Vector<InstructionStream::Offset> m_propertyAccessInstructions; > > // Constant Pools > Vector<Identifier> m_identifiers; >@@ -473,6 +469,7 @@ private: > FunctionExpressionVector m_functionExprs; > std::array<unsigned, LinkTimeConstantCount> m_linkTimeConstants; > >+ HashMap<unsigned, unsigned> m_metadataCount; > unsigned m_arrayProfileCount { 0 }; > unsigned m_arrayAllocationProfileCount { 0 }; > unsigned m_objectAllocationProfileCount { 0 }; >diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp >deleted file mode 100644 >index 48c816a149b1bf406075f03572eb95200ed7862d..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.cpp >+++ /dev/null >@@ -1,132 +0,0 @@ >-/* >- * Copyright (C) 2014 Apple Inc. All Rights Reserved. >- * >- * Redistribution and use in source and binary forms, with or without >- * modification, are permitted provided that the following conditions >- * are met: >- * 1. Redistributions of source code must retain the above copyright >- * notice, this list of conditions and the following disclaimer. >- * 2. Redistributions in binary form must reproduce the above copyright >- * notice, this list of conditions and the following disclaimer in the >- * documentation and/or other materials provided with the distribution. >- * >- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- */ >- >-#include "config.h" >-#include "UnlinkedInstructionStream.h" >- >-#include "Opcode.h" >- >-namespace JSC { >- >-static void append8(unsigned char*& ptr, unsigned char value) >-{ >- *(ptr++) = value; >-} >- >-static void append32(unsigned char*& ptr, unsigned value) >-{ >- if (!(value & 0xffffffe0)) { >- *(ptr++) = value; >- return; >- } >- >- if ((value & 0xffffffe0) == 0xffffffe0) { >- *(ptr++) = (Negative5Bit << 5) | (value & 0x1f); >- return; >- } >- >- if ((value & 0xffffffe0) == 0x40000000) { >- *(ptr++) = (ConstantRegister5Bit << 5) | (value & 0x1f); >- return; >- } >- >- if (!(value & 0xffffe000)) { >- *(ptr++) = (Positive13Bit << 5) | ((value >> 8) & 0x1f); >- *(ptr++) = value & 0xff; >- return; >- } >- >- if ((value & 0xffffe000) == 0xffffe000) { >- *(ptr++) = (Negative13Bit << 5) | ((value >> 8) & 0x1f); >- *(ptr++) = value & 0xff; >- return; >- } >- >- if ((value & 0xffffe000) == 0x40000000) { >- *(ptr++) = (ConstantRegister13Bit << 5) | ((value >> 8) & 0x1f); >- *(ptr++) = value & 0xff; >- return; >- } >- >- *(ptr++) = Full32Bit << 5; >- *(ptr++) = value & 0xff; >- *(ptr++) = (value >> 8) & 0xff; >- *(ptr++) = (value >> 16) & 0xff; >- *(ptr++) = (value >> 24) & 0xff; >-} >- >-UnlinkedInstructionStream::UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions) >- : m_instructionCount(instructions.size()) >-{ >- Vector<unsigned char> buffer; >- >- // Reserve enough space up front so we never have to reallocate when appending. >- buffer.resizeToFit(m_instructionCount * 5); >- unsigned char* ptr = buffer.data(); >- >- const UnlinkedInstruction* instructionsData = instructions.data(); >- for (unsigned i = 0; i < m_instructionCount;) { >- const UnlinkedInstruction* pc = &instructionsData[i]; >- OpcodeID opcode = pc[0].u.opcode; >- append8(ptr, opcode); >- >- unsigned opLength = opcodeLength(opcode); >- >- for (unsigned j = 1; j < opLength; ++j) >- append32(ptr, pc[j].u.unsignedValue); >- >- i += opLength; >- } >- >- buffer.shrink(ptr - buffer.data()); >- m_data = RefCountedArray<unsigned char>(buffer); >-} >- >-size_t UnlinkedInstructionStream::sizeInBytes() const >-{ >- return m_data.size() * sizeof(unsigned char); >-} >- >-#ifndef NDEBUG >-const RefCountedArray<UnlinkedInstruction>& UnlinkedInstructionStream::unpackForDebugging() const >-{ >- if (!m_unpackedInstructionsForDebugging.size()) { >- m_unpackedInstructionsForDebugging = RefCountedArray<UnlinkedInstruction>(m_instructionCount); >- >- Reader instructionReader(*this); >- for (unsigned i = 0; !instructionReader.atEnd(); ) { >- const UnlinkedInstruction* pc = instructionReader.next(); >- unsigned opLength = opcodeLength(pc[0].u.opcode); >- for (unsigned j = 0; j < opLength; ++j) >- m_unpackedInstructionsForDebugging[i++] = pc[j]; >- } >- } >- >- return m_unpackedInstructionsForDebugging; >-} >-#endif >- >-} >- >diff --git a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h b/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h >deleted file mode 100644 >index 8c0bf5742dbfdd52bc6ea822c8b77efa5021886f..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/bytecode/UnlinkedInstructionStream.h >+++ /dev/null >@@ -1,149 +0,0 @@ >-/* >- * Copyright (C) 2014 Apple Inc. All Rights Reserved. >- * >- * Redistribution and use in source and binary forms, with or without >- * modification, are permitted provided that the following conditions >- * are met: >- * 1. Redistributions of source code must retain the above copyright >- * notice, this list of conditions and the following disclaimer. >- * 2. Redistributions in binary form must reproduce the above copyright >- * notice, this list of conditions and the following disclaimer in the >- * documentation and/or other materials provided with the distribution. >- * >- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- */ >- >- >-#pragma once >- >-#include "Opcode.h" >-#include "UnlinkedCodeBlock.h" >-#include <wtf/RefCountedArray.h> >- >-namespace JSC { >- >-class UnlinkedInstructionStream { >- WTF_MAKE_FAST_ALLOCATED; >-public: >- explicit UnlinkedInstructionStream(const Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>&); >- >- unsigned count() const { return m_instructionCount; } >- size_t sizeInBytes() const; >- >- class Reader { >- public: >- explicit Reader(const UnlinkedInstructionStream&); >- >- const UnlinkedInstruction* next(); >- bool atEnd() const { return m_index == m_stream.m_data.size(); } >- >- private: >- unsigned char read8(); >- unsigned read32(); >- >- const UnlinkedInstructionStream& m_stream; >- UnlinkedInstruction m_unpackedBuffer[16]; >- unsigned m_index; >- }; >- >-#ifndef NDEBUG >- const RefCountedArray<UnlinkedInstruction>& unpackForDebugging() const; >-#endif >- >-private: >- friend class Reader; >- >-#ifndef NDEBUG >- mutable RefCountedArray<UnlinkedInstruction> m_unpackedInstructionsForDebugging; >-#endif >- >- RefCountedArray<unsigned char> m_data; >- unsigned m_instructionCount; >-}; >- >-// Unlinked instructions are packed in a simple stream format. >-// >-// The first byte is always the opcode. >-// It's followed by an opcode-dependent number of argument values. >-// The first 3 bits of each value determines the format: >-// >-// 5-bit positive integer (1 byte total) >-// 5-bit negative integer (1 byte total) >-// 13-bit positive integer (2 bytes total) >-// 13-bit negative integer (2 bytes total) >-// 5-bit constant register index, based at 0x40000000 (1 byte total) >-// 13-bit constant register index, based at 0x40000000 (2 bytes total) >-// 32-bit raw value (5 bytes total) >- >-enum PackedValueType { >- Positive5Bit = 0, >- Negative5Bit, >- Positive13Bit, >- Negative13Bit, >- ConstantRegister5Bit, >- ConstantRegister13Bit, >- Full32Bit >-}; >- >-ALWAYS_INLINE UnlinkedInstructionStream::Reader::Reader(const UnlinkedInstructionStream& stream) >- : m_stream(stream) >- , m_index(0) >-{ >-} >- >-ALWAYS_INLINE unsigned char UnlinkedInstructionStream::Reader::read8() >-{ >- return m_stream.m_data.data()[m_index++]; >-} >- >-ALWAYS_INLINE unsigned UnlinkedInstructionStream::Reader::read32() >-{ >- const unsigned char* data = &m_stream.m_data.data()[m_index]; >- unsigned char type = data[0] >> 5; >- >- switch (type) { >- case Positive5Bit: >- m_index++; >- return data[0]; >- case Negative5Bit: >- m_index++; >- return 0xffffffe0 | data[0]; >- case Positive13Bit: >- m_index += 2; >- return ((data[0] & 0x1F) << 8) | data[1]; >- case Negative13Bit: >- m_index += 2; >- return 0xffffe000 | ((data[0] & 0x1F) << 8) | data[1]; >- case ConstantRegister5Bit: >- m_index++; >- return 0x40000000 | (data[0] & 0x1F); >- case ConstantRegister13Bit: >- m_index += 2; >- return 0x40000000 | ((data[0] & 0x1F) << 8) | data[1]; >- default: >- ASSERT(type == Full32Bit); >- m_index += 5; >- return data[1] | data[2] << 8 | data[3] << 16 | data[4] << 24; >- } >-} >- >-ALWAYS_INLINE const UnlinkedInstruction* UnlinkedInstructionStream::Reader::next() >-{ >- m_unpackedBuffer[0].u.opcode = static_cast<OpcodeID>(read8()); >- unsigned opLength = opcodeLength(m_unpackedBuffer[0].u.opcode); >- for (unsigned i = 1; i < opLength; ++i) >- m_unpackedBuffer[i].u.unsignedValue = read32(); >- return m_unpackedBuffer; >-} >- >-} // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.cpp b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp >index 57cdb62c959a7677961d6e81135212801f158a2c..57d9aa979bdc99f81fc4593c197c28263f0e94dc 100644 >--- a/Source/JavaScriptCore/bytecode/VirtualRegister.cpp >+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.cpp >@@ -26,6 +26,8 @@ > #include "config.h" > #include "VirtualRegister.h" > >+#include "RegisterID.h" >+ > namespace JSC { > > void VirtualRegister::dump(PrintStream& out) const >@@ -61,5 +63,10 @@ void VirtualRegister::dump(PrintStream& out) const > RELEASE_ASSERT_NOT_REACHED(); > } > >-} // namespace JSC > >+VirtualRegister::VirtualRegister(RegisterID* reg) >+ : VirtualRegister(reg->m_virtualRegister.m_virtualRegister) >+{ >+} >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.h b/Source/JavaScriptCore/bytecode/VirtualRegister.h >index f32e8d24f6d4dc1366f2f1efb7796401b0f68bb9..16aa68358960158c05a65a1d41e6933f275537a9 100644 >--- a/Source/JavaScriptCore/bytecode/VirtualRegister.h >+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.h >@@ -42,11 +42,15 @@ inline bool operandIsArgument(int operand) > } > > >+class RegisterID; >+ > class VirtualRegister { > public: > friend VirtualRegister virtualRegisterForLocal(int); > friend VirtualRegister virtualRegisterForArgument(int, int); > >+ VirtualRegister(RegisterID*); >+ > VirtualRegister() > : m_virtualRegister(s_invalidVirtualRegister) > { } >diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >index 00afc9f96c2a95c17735f1634cbe70576cac3d17..a43f74219a6d3752b6bd90f343c91bd4d1b9d13c 100644 >--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >@@ -36,6 +36,7 @@ > #include "BuiltinNames.h" > #include "BytecodeGeneratorification.h" > #include "BytecodeLivenessAnalysis.h" >+#include "BytecodeStructs.h" > #include "CatchScope.h" > #include "DefinePropertyAttributes.h" > #include "Interpreter.h" >@@ -56,7 +57,6 @@ > #include "UnlinkedCodeBlock.h" > #include "UnlinkedEvalCodeBlock.h" > #include "UnlinkedFunctionCodeBlock.h" >-#include "UnlinkedInstructionStream.h" > #include "UnlinkedModuleProgramCodeBlock.h" > #include "UnlinkedProgramCodeBlock.h" > #include <wtf/BitVector.h> >@@ -67,6 +67,21 @@ > > namespace JSC { > >+template<typename CallOp, typename = std::true_type> >+struct VarArgsOp; >+ >+template<typename CallOp> >+struct VarArgsOp<CallOp, std::enable_if_t<std::is_same<CallOp, OpTailCall>::value, std::true_type>> { >+ using type = OpTailCallVarargs; >+}; >+ >+ >+template<typename CallOp> >+struct VarArgsOp<CallOp, std::enable_if_t<!std::is_same<CallOp, OpTailCall>::value, std::true_type>> { >+ using type = OpCallVarargs; >+}; >+ >+ > template<typename T> > static inline void shrinkToFit(T& segmentedVector) > { >@@ -78,9 +93,45 @@ void Label::setLocation(BytecodeGenerator& generator, unsigned location) > { > m_location = location; > >- unsigned size = m_unresolvedJumps.size(); >- for (unsigned i = 0; i < size; ++i) >- generator.instructions()[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first; >+ for (auto offset : m_unresolvedJumps) { >+ auto instruction = generator.m_writer.ref(offset); >+ int target = m_location - offset; >+ >+#define CASE(__op) \ >+ case __op::opcodeID(): \ >+ instruction->cast<__op>()->setTarget(target); \ >+ return; >+ >+ switch (instruction->opcodeID()) { >+ CASE(OpJmp) >+ CASE(OpJtrue) >+ CASE(OpJfalse) >+ CASE(OpJeqNull) >+ CASE(OpJneqNull) >+ CASE(OpJeq) >+ CASE(OpJstricteq) >+ CASE(OpJneq) >+ CASE(OpJnstricteq) >+ CASE(OpJless) >+ CASE(OpJlesseq) >+ CASE(OpJgreater) >+ CASE(OpJgreatereq) >+ CASE(OpJnless) >+ CASE(OpJnlesseq) >+ CASE(OpJngreater) >+ CASE(OpJngreatereq) >+ CASE(OpJbelow) >+ CASE(OpJbeloweq) >+ default: >+ ASSERT_NOT_REACHED(); >+ } >+#undef CASE >+ } >+} >+ >+int Label::bind(BytecodeGenerator* generator) >+{ >+ return bind(generator->instructions().size()); > } > > void Variable::dump(PrintStream& out) const >@@ -159,10 +210,7 @@ ParserError BytecodeGenerator::generate() > > for (auto& tuple : m_catchesToEmit) { > Ref<Label> realCatchTarget = newEmittedLabel(); >- emitOpcode(op_catch); >- instructions().append(std::get<1>(tuple)); >- instructions().append(std::get<2>(tuple)); >- instructions().append(0); >+ OpCatch::emit(this, std::get<1>(tuple), std::get<2>(tuple)); > > TryData* tryData = std::get<0>(tuple); > emitJump(tryData->target.get()); >@@ -207,10 +255,10 @@ ParserError BytecodeGenerator::generate() > > > if (isGeneratorOrAsyncFunctionBodyParseMode(m_codeBlock->parseMode())) >- performGeneratorification(m_codeBlock.get(), m_instructions, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex); >+ performGeneratorification(*this, m_codeBlock.get(), m_writer, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex); > > RELEASE_ASSERT(static_cast<unsigned>(m_codeBlock->numCalleeLocals()) < static_cast<unsigned>(FirstConstantRegisterIndex)); >- m_codeBlock->setInstructions(std::make_unique<UnlinkedInstructionStream>(m_instructions)); >+ m_codeBlock->setInstructions(m_writer.finalize()); > > m_codeBlock->shrinkToFit(); > >@@ -448,20 +496,12 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke > entry.disableWatching(*m_vm); > functionSymbolTable->set(NoLockingNecessary, name, entry); > } >- emitOpcode(op_put_to_scope); >- instructions().append(m_lexicalEnvironmentRegister->index()); >- instructions().append(UINT_MAX); >- instructions().append(virtualRegisterForArgument(1 + i).offset()); >- instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand()); >- instructions().append(symbolTableConstantIndex); >- instructions().append(offset.offset()); >+ OpPutToScope::emit(this, m_lexicalEnvironmentRegister, UINT_MAX, virtualRegisterForArgument(1 + i), GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), symbolTableConstantIndex, offset.offset()); > } > > // This creates a scoped arguments object and copies the overflow arguments into the > // scope. It's the equivalent of calling ScopedArguments::createByCopying(). >- emitOpcode(op_create_scoped_arguments); >- instructions().append(m_argumentsRegister->index()); >- instructions().append(m_lexicalEnvironmentRegister->index()); >+ OpCreateScopedArguments::emit(this, m_argumentsRegister, m_lexicalEnvironmentRegister); > } else { > // We're going to put all parameters into the DirectArguments object. First ensure > // that the symbol table knows that this is happening. >@@ -470,8 +510,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke > functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(DirectArgumentsOffset(i)))); > } > >- emitOpcode(op_create_direct_arguments); >- instructions().append(m_argumentsRegister->index()); >+ OpCreateDirectArguments::emit(this, m_argumentsRegister); > } > } else if (isSimpleParameterList) { > // Create the formal parameters the normal way. Any of them could be captured, or not. If >@@ -495,20 +534,13 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionNode* functionNode, Unlinke > static_cast<const BindingNode*>(parameters.at(i).first)->boundProperty(); > functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(offset))); > >- emitOpcode(op_put_to_scope); >- instructions().append(m_lexicalEnvironmentRegister->index()); >- instructions().append(addConstant(ident)); >- instructions().append(virtualRegisterForArgument(1 + i).offset()); >- instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand()); >- instructions().append(symbolTableConstantIndex); >- instructions().append(offset.offset()); >+ OpPutToScope::emit(this, m_lexicalEnvironmentRegister, addConstant(ident), virtualRegisterForArgument(1 + i), GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), symbolTableConstantIndex, offset.offset()); > } > } > > if (needsArguments && (codeBlock->isStrictMode() || !isSimpleParameterList)) { > // Allocate a cloned arguments object. >- emitOpcode(op_create_cloned_arguments); >- instructions().append(m_argumentsRegister->index()); >+ OpCreateClonedArguments::emit(this, m_argumentsRegister); > } > > // There are some variables that need to be preinitialized to something other than Undefined: >@@ -1165,15 +1197,9 @@ void BytecodeGenerator::initializeVarLexicalEnvironment(int symbolTableConstantI > { > if (hasCapturedVariables) { > RELEASE_ASSERT(m_lexicalEnvironmentRegister); >- emitOpcode(op_create_lexical_environment); >- instructions().append(m_lexicalEnvironmentRegister->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(symbolTableConstantIndex); >- instructions().append(addConstantValue(jsUndefined())->index()); >+ OpCreateLexicalEnvironment::emit(this, m_lexicalEnvironmentRegister, scopeRegister(), VirtualRegister { symbolTableConstantIndex }, addConstantValue(jsUndefined())); > >- emitOpcode(op_mov); >- instructions().append(scopeRegister()->index()); >- instructions().append(m_lexicalEnvironmentRegister->index()); >+ OpMov::emit(this, scopeRegister(), m_lexicalEnvironmentRegister); > > pushLocalControlFlowScope(); > } >@@ -1267,17 +1293,6 @@ void BytecodeGenerator::emitLabel(Label& l0) > m_lastOpcodeID = op_end; > } > >-void BytecodeGenerator::emitOpcode(OpcodeID opcodeID) >-{ >-#ifndef NDEBUG >- size_t opcodePosition = instructions().size(); >- ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end); >- m_lastOpcodePosition = opcodePosition; >-#endif >- instructions().append(opcodeID); >- m_lastOpcodeID = opcodeID; >-} >- > UnlinkedArrayProfile BytecodeGenerator::newArrayProfile() > { > return m_codeBlock->addArrayProfile(); >@@ -1293,18 +1308,9 @@ UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile() > return m_codeBlock->addObjectAllocationProfile(); > } > >-UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID) >-{ >- emitOpcode(opcodeID); >- if (!m_vm->canUseJIT()) >- return static_cast<UnlinkedValueProfile>(-1); >- UnlinkedValueProfile result = m_codeBlock->addValueProfile(); >- return result; >-} >- > void BytecodeGenerator::emitEnter() > { >- emitOpcode(op_enter); >+ OpEnter::emit(this); > > if (LIKELY(Options::optimizeRecursiveTailCalls())) { > // We must add the end of op_enter as a potential jump target, because the bytecode parser may decide to split its basic block >@@ -1317,272 +1323,154 @@ void BytecodeGenerator::emitEnter() > > void BytecodeGenerator::emitLoopHint() > { >- emitOpcode(op_loop_hint); >+ OpLoopHint::emit(this); > emitCheckTraps(); > } > >-void BytecodeGenerator::emitCheckTraps() >+void BytecodeGenerator::emitJump(Label& target) > { >- emitOpcode(op_check_traps); >+ OpJmp::emit(this, target.bind(this)); > } > >-void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index) >+void BytecodeGenerator::emitCheckTraps() > { >- ASSERT(instructions().size() >= 4); >- size_t size = instructions().size(); >- dstIndex = instructions().at(size - 3).u.operand; >- src1Index = instructions().at(size - 2).u.operand; >- src2Index = instructions().at(size - 1).u.operand; >+ OpCheckTraps::emit(this); > } > >-void BytecodeGenerator::retrieveLastUnaryOp(int& dstIndex, int& srcIndex) >+void ALWAYS_INLINE BytecodeGenerator::rewind() > { >- ASSERT(instructions().size() >= 3); >- size_t size = instructions().size(); >- dstIndex = instructions().at(size - 2).u.operand; >- srcIndex = instructions().at(size - 1).u.operand; >+ ASSERT(m_lastInstruction.isValid()); >+ m_lastOpcodeID = m_lastInstruction->opcodeID(); >+ m_writer.rewind(m_lastInstruction); > } > >-void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp() >+template<typename BinOp, typename JmpOp> >+bool BytecodeGenerator::fuseCompareAndJump(RegisterID* cond, Label& target, bool swapOperands) > { >- ASSERT(instructions().size() >= 4); >- instructions().shrink(instructions().size() - 4); >- m_lastOpcodeID = op_end; >-} >+ auto binop = m_lastInstruction->as<BinOp>(); >+ if (cond->index() == binop.dst.offset() && cond->isTemporary() && !cond->refCount()) { >+ rewind(); > >-void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp() >-{ >- ASSERT(instructions().size() >= 3); >- instructions().shrink(instructions().size() - 3); >- m_lastOpcodeID = op_end; >+ if (swapOperands) >+ std::swap(binop.lhs, binop.rhs); >+ >+ JmpOp::emit(this, binop.lhs, binop.rhs, target.bind(this)); >+ return true; >+ } >+ return false; > } > >-void BytecodeGenerator::emitJump(Label& target) >+template<typename UnaryOp, typename JmpOp> >+bool BytecodeGenerator::fuseTestAndJmp(RegisterID* cond, Label& target) > { >- size_t begin = instructions().size(); >- emitOpcode(op_jmp); >- instructions().append(target.bind(begin, instructions().size())); >+ auto unop = m_lastInstruction->as<UnaryOp>(); >+ if (cond->index() == unop.dst.offset() && cond->isTemporary() && !cond->refCount()) { >+ rewind(); >+ >+ JmpOp::emit(this, unop.operand, target.bind(this)); >+ return true; >+ } >+ return false; > } > > void BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label& target) > { >- auto fuseCompareAndJump = [&] (OpcodeID jumpID) { >- int dstIndex; >- int src1Index; >- int src2Index; >- >- retrieveLastBinaryOp(dstIndex, src1Index, src2Index); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindBinaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(jumpID); >- instructions().append(src1Index); >- instructions().append(src2Index); >- instructions().append(target.bind(begin, instructions().size())); >- return true; >- } >- return false; >- }; > > if (m_lastOpcodeID == op_less) { >- if (fuseCompareAndJump(op_jless)) >+ if (fuseCompareAndJump<OpLess, OpJless>(cond, target)) > return; > } else if (m_lastOpcodeID == op_lesseq) { >- if (fuseCompareAndJump(op_jlesseq)) >+ if (fuseCompareAndJump<OpLesseq, OpJlesseq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_greater) { >- if (fuseCompareAndJump(op_jgreater)) >+ if (fuseCompareAndJump<OpGreater, OpJgreater>(cond, target)) > return; > } else if (m_lastOpcodeID == op_greatereq) { >- if (fuseCompareAndJump(op_jgreatereq)) >+ if(fuseCompareAndJump<OpGreatereq, OpJgreatereq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_eq) { >- if (fuseCompareAndJump(op_jeq)) >+ if(fuseCompareAndJump<OpEq, OpJeq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_stricteq) { >- if (fuseCompareAndJump(op_jstricteq)) >+ if (fuseCompareAndJump<OpStricteq, OpJstricteq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_neq) { >- if (fuseCompareAndJump(op_jneq)) >+ if (fuseCompareAndJump<OpNeq, OpJneq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_nstricteq) { >- if (fuseCompareAndJump(op_jnstricteq)) >+ if (fuseCompareAndJump<OpNstricteq, OpJnstricteq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_below) { >- if (fuseCompareAndJump(op_jbelow)) >+ if (fuseCompareAndJump<OpBelow, OpJbelow>(cond, target)) > return; > } else if (m_lastOpcodeID == op_beloweq) { >- if (fuseCompareAndJump(op_jbeloweq)) >+ if (fuseCompareAndJump<OpBeloweq, OpJbeloweq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_eq_null && target.isForward()) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jeq_null); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpEqNull, OpJeqNull>(cond, target)) > return; >- } > } else if (m_lastOpcodeID == op_neq_null && target.isForward()) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jneq_null); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpNeqNull, OpJneqNull>(cond, target)) > return; >- } > } > >- size_t begin = instructions().size(); >- >- emitOpcode(op_jtrue); >- instructions().append(cond->index()); >- instructions().append(target.bind(begin, instructions().size())); >+ OpJtrue::emit(this, cond, target.bind(this)); > } > > void BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label& target) > { >- auto fuseCompareAndJump = [&] (OpcodeID jumpID, bool replaceOperands) { >- int dstIndex; >- int src1Index; >- int src2Index; >- >- retrieveLastBinaryOp(dstIndex, src1Index, src2Index); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindBinaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(jumpID); >- // Since op_below and op_beloweq only accepts Int32, replacing operands is not observable to users. >- if (replaceOperands) >- std::swap(src1Index, src2Index); >- instructions().append(src1Index); >- instructions().append(src2Index); >- instructions().append(target.bind(begin, instructions().size())); >- return true; >- } >- return false; >- }; >- > if (m_lastOpcodeID == op_less && target.isForward()) { >- if (fuseCompareAndJump(op_jnless, false)) >+ if (fuseCompareAndJump<OpLess, OpJnless>(cond, target)) > return; > } else if (m_lastOpcodeID == op_lesseq && target.isForward()) { >- if (fuseCompareAndJump(op_jnlesseq, false)) >+ if (fuseCompareAndJump<OpLesseq, OpJnlesseq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_greater && target.isForward()) { >- if (fuseCompareAndJump(op_jngreater, false)) >+ if (fuseCompareAndJump<OpGreater, OpJngreater>(cond, target)) > return; > } else if (m_lastOpcodeID == op_greatereq && target.isForward()) { >- if (fuseCompareAndJump(op_jngreatereq, false)) >+ if (fuseCompareAndJump<OpGreatereq, OpJngreatereq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_eq && target.isForward()) { >- if (fuseCompareAndJump(op_jneq, false)) >+ if (fuseCompareAndJump<OpEq, OpJneq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_stricteq && target.isForward()) { >- if (fuseCompareAndJump(op_jnstricteq, false)) >+ if (fuseCompareAndJump<OpStricteq, OpJnstricteq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_neq && target.isForward()) { >- if (fuseCompareAndJump(op_jeq, false)) >+ if (fuseCompareAndJump<OpNeq, OpJeq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_nstricteq && target.isForward()) { >- if (fuseCompareAndJump(op_jstricteq, false)) >+ if (fuseCompareAndJump<OpNstricteq, OpJstricteq>(cond, target)) > return; > } else if (m_lastOpcodeID == op_below && target.isForward()) { >- if (fuseCompareAndJump(op_jbeloweq, true)) >+ if (fuseCompareAndJump<OpBelow, OpJbeloweq>(cond, target, true)) > return; > } else if (m_lastOpcodeID == op_beloweq && target.isForward()) { >- if (fuseCompareAndJump(op_jbelow, true)) >+ if (fuseCompareAndJump<OpBeloweq, OpJbelow>(cond, target, true)) > return; > } else if (m_lastOpcodeID == op_not) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jtrue); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpNot, OpJtrue>(cond, target)) > return; >- } > } else if (m_lastOpcodeID == op_eq_null && target.isForward()) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jneq_null); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpEqNull, OpJneqNull>(cond, target)) > return; >- } > } else if (m_lastOpcodeID == op_neq_null && target.isForward()) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { >- rewindUnaryOp(); >- >- size_t begin = instructions().size(); >- emitOpcode(op_jeq_null); >- instructions().append(srcIndex); >- instructions().append(target.bind(begin, instructions().size())); >+ if (fuseTestAndJmp<OpNeqNull, OpJeqNull>(cond, target)) > return; >- } > } > >- size_t begin = instructions().size(); >- emitOpcode(op_jfalse); >- instructions().append(cond->index()); >- instructions().append(target.bind(begin, instructions().size())); >+ OpJfalse::emit(this, cond, target.bind(this)); > } > > void BytecodeGenerator::emitJumpIfNotFunctionCall(RegisterID* cond, Label& target) > { >- size_t begin = instructions().size(); >- >- emitOpcode(op_jneq_ptr); >- instructions().append(cond->index()); >- instructions().append(Special::CallFunction); >- instructions().append(target.bind(begin, instructions().size())); >- instructions().append(0); >+ OpJneqPtr::emit(this, cond, Special::CallFunction, target.bind(this)); > } > > void BytecodeGenerator::emitJumpIfNotFunctionApply(RegisterID* cond, Label& target) > { >- size_t begin = instructions().size(); >- >- emitOpcode(op_jneq_ptr); >- instructions().append(cond->index()); >- instructions().append(Special::ApplyFunction); >- instructions().append(target.bind(begin, instructions().size())); >- instructions().append(0); >+ OpJneqPtr::emit(this, cond, Special::ApplyFunction, target.bind(this)); > } > > bool BytecodeGenerator::hasConstant(const Identifier& ident) const >@@ -1644,9 +1532,7 @@ RegisterID* BytecodeGenerator::moveLinkTimeConstant(RegisterID* dst, LinkTimeCon > if (!dst) > return m_linkTimeConstantRegisters[constantIndex]; > >- emitOpcode(op_mov); >- instructions().append(dst->index()); >- instructions().append(m_linkTimeConstantRegisters[constantIndex]->index()); >+ OpMov::emit(this, dst, m_linkTimeConstantRegisters[constantIndex]); > > return dst; > } >@@ -1655,9 +1541,8 @@ RegisterID* BytecodeGenerator::moveEmptyValue(RegisterID* dst) > { > RefPtr<RegisterID> emptyValue = addConstantEmptyValue(); > >- emitOpcode(op_mov); >- instructions().append(dst->index()); >- instructions().append(emptyValue->index()); >+ OpMov::emit(this, dst, emptyValue.get()); >+ > return dst; > } > >@@ -1665,163 +1550,169 @@ RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src) > { > ASSERT(src != m_emptyValueRegister); > >- m_staticPropertyAnalyzer.mov(dst->index(), src->index()); >- emitOpcode(op_mov); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ m_staticPropertyAnalyzer.mov(dst, src); >+ OpMov::emit(this, dst, src); > > return dst; > } > >-RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src) >+RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src, OperandTypes types) > { >- ASSERT_WITH_MESSAGE(op_to_number != opcodeID, "op_to_number has a Value Profile."); >- ASSERT_WITH_MESSAGE(op_negate != opcodeID, "op_negate has an Arith Profile."); >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- >+ switch (opcodeID) { >+ case op_not: >+ emitUnaryOp<OpNot>(dst, src); >+ break; >+ case op_negate: >+ OpNegate::emit(this, dst, src, types); >+ break; >+ case op_to_number: >+ emitUnaryOp<OpToNumber>(dst, src); >+ break; >+ default: >+ ASSERT_NOT_REACHED(); >+ } > return dst; > } > >-RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src, OperandTypes types) >+RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) > { >- ASSERT_WITH_MESSAGE(op_to_number != opcodeID, "op_to_number has a Value Profile."); >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ switch (opcodeID) { >+ case op_eq: >+ return emitBinaryOp<OpEq>(dst, src1, src2, types); >+ case op_neq: >+ return emitBinaryOp<OpNeq>(dst, src1, src2, types); >+ case op_stricteq: >+ return emitBinaryOp<OpStricteq>(dst, src1, src2, types); >+ case op_nstricteq: >+ return emitBinaryOp<OpNstricteq>(dst, src1, src2, types); >+ case op_less: >+ return emitBinaryOp<OpLess>(dst, src1, src2, types); >+ case op_lesseq: >+ return emitBinaryOp<OpLesseq>(dst, src1, src2, types); >+ case op_greater: >+ return emitBinaryOp<OpGreater>(dst, src1, src2, types); >+ case op_greatereq: >+ return emitBinaryOp<OpGreatereq>(dst, src1, src2, types); >+ case op_below: >+ return emitBinaryOp<OpBelow>(dst, src1, src2, types); >+ case op_beloweq: >+ return emitBinaryOp<OpBeloweq>(dst, src1, src2, types); >+ case op_mod: >+ return emitBinaryOp<OpMod>(dst, src1, src2, types); >+ case op_pow: >+ return emitBinaryOp<OpPow>(dst, src1, src2, types); >+ case op_lshift: >+ return emitBinaryOp<OpLshift>(dst, src1, src2, types); >+ case op_rshift: >+ return emitBinaryOp<OpRshift>(dst, src1, src2, types); >+ case op_urshift: >+ return emitBinaryOp<OpUrshift>(dst, src1, src2, types); >+ case op_add: >+ return emitBinaryOp<OpAdd>(dst, src1, src2, types); >+ case op_mul: >+ return emitBinaryOp<OpMul>(dst, src1, src2, types); >+ case op_div: >+ return emitBinaryOp<OpDiv>(dst, src1, src2, types); >+ case op_sub: >+ return emitBinaryOp<OpSub>(dst, src1, src2, types); >+ case op_bitand: >+ return emitBinaryOp<OpBitand>(dst, src1, src2, types); >+ case op_bitxor: >+ return emitBinaryOp<OpBitxor>(dst, src1, src2, types); >+ case op_bitor: >+ return emitBinaryOp<OpBitor>(dst, src1, src2, types); >+ default: >+ ASSERT_NOT_REACHED(); >+ } >+} > >- if (opcodeID == op_negate) >- instructions().append(ArithProfile(types.first()).bits()); >+RegisterID* BytecodeGenerator::emitToObject(RegisterID* dst, RegisterID* src, const Identifier& message) >+{ >+ OpToObject::emit(this, dst, src, addConstant(message)); > return dst; > } > >-RegisterID* BytecodeGenerator::emitUnaryOpProfiled(OpcodeID opcodeID, RegisterID* dst, RegisterID* src) >+RegisterID* BytecodeGenerator::emitToNumber(RegisterID* dst, RegisterID* src) > { >- UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- instructions().append(profile); >- return dst; >+ return emitUnaryOp<OpToNumber>(dst, src); > } > >-RegisterID* BytecodeGenerator::emitToObject(RegisterID* dst, RegisterID* src, const Identifier& message) >+RegisterID* BytecodeGenerator::emitToString(RegisterID* dst, RegisterID* src) > { >- UnlinkedValueProfile profile = emitProfiledOpcode(op_to_object); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- instructions().append(addConstant(message)); >- instructions().append(profile); >- return dst; >+ return emitUnaryOp<OpToString>(dst, src); > } > >-RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst) >+RegisterID* BytecodeGenerator::emitTypeOf(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_inc); >- instructions().append(srcDst->index()); >- return srcDst; >+ return emitUnaryOp<OpTypeof>(dst, src); > } > >-RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst) >+RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst) > { >- emitOpcode(op_dec); >- instructions().append(srcDst->index()); >+ OpInc::emit(this, srcDst); > return srcDst; > } > >-RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) >+RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst) > { >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src1->index()); >- instructions().append(src2->index()); >- >- if (opcodeID == op_bitor || opcodeID == op_bitand || opcodeID == op_bitxor || >- opcodeID == op_add || opcodeID == op_mul || opcodeID == op_sub || opcodeID == op_div) >- instructions().append(ArithProfile(types.first(), types.second()).bits()); >- >- return dst; >+ OpDec::emit(this, srcDst); >+ return srcDst; > } > >-RegisterID* BytecodeGenerator::emitEqualityOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2) >+template<typename EqOp> >+RegisterID* BytecodeGenerator::emitEqualityOp(RegisterID* dst, RegisterID* src1, RegisterID* src2) > { >- if (m_lastOpcodeID == op_typeof) { >- int dstIndex; >- int srcIndex; >- >- retrieveLastUnaryOp(dstIndex, srcIndex); >- >- if (src1->index() == dstIndex >+ if (m_lastInstruction->is<OpTypeof>()) { >+ auto op = m_lastInstruction->as<OpTypeof>(); >+ if (src1->index() == op.dst.offset() > && src1->isTemporary() > && m_codeBlock->isConstantRegisterIndex(src2->index()) > && m_codeBlock->constantRegister(src2->index()).get().isString()) { > const String& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue(); > if (value == "undefined") { >- rewindUnaryOp(); >- emitOpcode(op_is_undefined); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsUndefined::emit(this, dst, op.value); > return dst; > } > if (value == "boolean") { >- rewindUnaryOp(); >- emitOpcode(op_is_boolean); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsBoolean::emit(this, dst, op.value); > return dst; > } > if (value == "number") { >- rewindUnaryOp(); >- emitOpcode(op_is_number); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsNumber::emit(this, dst, op.value); > return dst; > } > if (value == "string") { >- rewindUnaryOp(); >- emitOpcode(op_is_cell_with_type); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >- instructions().append(StringType); >+ rewind(); >+ OpIsCellWithType::emit(this, dst, op.value, StringType); > return dst; > } > if (value == "symbol") { >- rewindUnaryOp(); >- emitOpcode(op_is_cell_with_type); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >- instructions().append(SymbolType); >+ rewind(); >+ OpIsCellWithType::emit(this, dst, op.value, SymbolType); > return dst; > } > if (Options::useBigInt() && value == "bigint") { >- rewindUnaryOp(); >- emitOpcode(op_is_cell_with_type); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >- instructions().append(BigIntType); >+ rewind(); >+ OpIsCellWithType::emit(this, dst, op.value, BigIntType); > return dst; > } > if (value == "object") { >- rewindUnaryOp(); >- emitOpcode(op_is_object_or_null); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsObjectOrNull::emit(this, dst, op.value); > return dst; > } > if (value == "function") { >- rewindUnaryOp(); >- emitOpcode(op_is_function); >- instructions().append(dst->index()); >- instructions().append(srcIndex); >+ rewind(); >+ OpIsFunction::emit(this, dst, op.value); > return dst; > } > } > } > >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(src1->index()); >- instructions().append(src2->index()); >+ EqOp::emit(this, dst, src1, src2); > return dst; > } > >@@ -1843,12 +1734,7 @@ void BytecodeGenerator::emitProfileType(RegisterID* registerToProfile, ProfileTy > if (!registerToProfile) > return; > >- emitOpcode(op_profile_type); >- instructions().append(registerToProfile->index()); >- instructions().append(0); >- instructions().append(flag); >- instructions().append(0); >- instructions().append(resolveType()); >+ OpProfileType::emit(this, registerToProfile, 0, flag, {}, resolveType()); > > // Don't emit expression info for this version of profile type. This generally means > // we're profiling information for something that isn't in the actual text of a JavaScript >@@ -1869,13 +1755,7 @@ void BytecodeGenerator::emitProfileType(RegisterID* registerToProfile, ProfileTy > return; > > // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? >- emitOpcode(op_profile_type); >- instructions().append(registerToProfile->index()); >- instructions().append(0); >- instructions().append(flag); >- instructions().append(0); >- instructions().append(resolveType()); >- >+ OpProfileType::emit(this, registerToProfile, 0, flag, {}, resolveType()); > emitTypeProfilerExpressionInfo(startDivot, endDivot); > } > >@@ -1899,12 +1779,7 @@ void BytecodeGenerator::emitProfileType(RegisterID* registerToProfile, const Var > } > > // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? >- emitOpcode(op_profile_type); >- instructions().append(registerToProfile->index()); >- instructions().append(symbolTableOrScopeDepth); >- instructions().append(flag); >- instructions().append(addConstant(var.ident())); >- instructions().append(resolveType()); >+ OpProfileType::emit(this, registerToProfile, symbolTableOrScopeDepth, flag, addConstant(var.ident()), resolveType()); > > emitTypeProfilerExpressionInfo(startDivot, endDivot); > } >@@ -1916,8 +1791,7 @@ void BytecodeGenerator::emitProfileControlFlow(int textOffset) > size_t bytecodeOffset = instructions().size(); > m_codeBlock->addOpProfileControlFlowBytecodeOffset(bytecodeOffset); > >- emitOpcode(op_profile_control_flow); >- instructions().append(textOffset); >+ OpProfileControlFlow::emit(this, textOffset); > } > } > >@@ -2116,11 +1990,7 @@ void BytecodeGenerator::pushLexicalScopeInternal(VariableEnvironment& environmen > if (constantSymbolTableResult) > *constantSymbolTableResult = constantSymbolTable; > >- emitOpcode(op_create_lexical_environment); >- instructions().append(newScope->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(constantSymbolTable->index()); >- instructions().append(addConstantValue(tdzRequirement == TDZRequirement::UnderTDZ ? jsTDZValue() : jsUndefined())->index()); >+ OpCreateLexicalEnvironment::emit(this, newScope, scopeRegister(), VirtualRegister { symbolTableConstantIndex }, addConstantValue(tdzRequirement == TDZRequirement::UnderTDZ ? jsTDZValue() : jsUndefined())); > > move(scopeRegister(), newScope); > >@@ -2251,10 +2121,7 @@ RegisterID* BytecodeGenerator::emitResolveScopeForHoistingFuncDeclInEval(Registe > ASSERT(m_codeType == EvalCode); > > dst = finalDestination(dst); >- emitOpcode(op_resolve_scope_for_hoisting_func_decl_in_eval); >- instructions().append(kill(dst)); >- instructions().append(m_topMostScope->index()); >- instructions().append(addConstant(property)); >+ OpResolveScopeForHoistingFuncDeclInEval::emit(this, kill(dst), m_topMostScope, addConstant(property)); > return dst; > } > >@@ -2352,11 +2219,7 @@ void BytecodeGenerator::prepareLexicalScopeForNextForLoopIteration(VariableEnvir > RefPtr<RegisterID> parentScope = emitGetParentScope(newTemporary(), loopScope); > move(scopeRegister(), parentScope.get()); > >- emitOpcode(op_create_lexical_environment); >- instructions().append(loopScope->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(loopSymbolTable->index()); >- instructions().append(addConstantValue(jsTDZValue())->index()); >+ OpCreateLexicalEnvironment::emit(this, loopScope, scopeRegister(), loopSymbolTable, addConstantValue(jsTDZValue())); > > move(scopeRegister(), loopScope); > >@@ -2481,10 +2344,7 @@ void BytecodeGenerator::createVariable( > > RegisterID* BytecodeGenerator::emitOverridesHasInstance(RegisterID* dst, RegisterID* constructor, RegisterID* hasInstanceValue) > { >- emitOpcode(op_overrides_has_instance); >- instructions().append(dst->index()); >- instructions().append(constructor->index()); >- instructions().append(hasInstanceValue->index()); >+ OpOverridesHasInstance::emit(this, dst, constructor, hasInstanceValue); > return dst; > } > >@@ -2549,13 +2409,7 @@ RegisterID* BytecodeGenerator::emitResolveScope(RegisterID* dst, const Variable& > > // resolve_scope dst, id, ResolveType, depth > dst = tempDestination(dst); >- emitOpcode(op_resolve_scope); >- instructions().append(kill(dst)); >- instructions().append(scopeRegister()->index()); >- instructions().append(addConstant(variable.ident())); >- instructions().append(resolveType()); >- instructions().append(localScopeDepth()); >- instructions().append(0); >+ OpResolveScope::emit(this, kill(dst), scopeRegister(), addConstant(variable.ident()), resolveType(), localScopeDepth()); > return dst; > } > >@@ -2570,11 +2424,7 @@ RegisterID* BytecodeGenerator::emitGetFromScope(RegisterID* dst, RegisterID* sco > return move(dst, variable.local()); > > case VarKind::DirectArgument: { >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_arguments); >- instructions().append(kill(dst)); >- instructions().append(scope->index()); >- instructions().append(variable.offset().capturedArgumentsOffset().offset()); >- instructions().append(profile); >+ OpGetFromArguments::emit(this, kill(dst), scope, variable.offset().capturedArgumentsOffset().offset()); > return dst; > } > >@@ -2583,14 +2433,14 @@ RegisterID* BytecodeGenerator::emitGetFromScope(RegisterID* dst, RegisterID* sco > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > > // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_scope); >- instructions().append(kill(dst)); >- instructions().append(scope->index()); >- instructions().append(addConstant(variable.ident())); >- instructions().append(GetPutInfo(resolveMode, variable.offset().isScope() ? LocalClosureVar : resolveType(), InitializationMode::NotInitialization).operand()); >- instructions().append(localScopeDepth()); >- instructions().append(variable.offset().isScope() ? variable.offset().scopeOffset().offset() : 0); >- instructions().append(profile); >+ OpGetFromScope::emit( >+ this, >+ kill(dst), >+ scope, >+ addConstant(variable.ident()), >+ GetPutInfo(resolveMode, variable.offset().isScope() ? LocalClosureVar : resolveType(), InitializationMode::NotInitialization), >+ localScopeDepth(), >+ variable.offset().isScope() ? variable.offset().scopeOffset().offset() : 0); > return dst; > } } > >@@ -2605,10 +2455,7 @@ RegisterID* BytecodeGenerator::emitPutToScope(RegisterID* scope, const Variable& > return value; > > case VarKind::DirectArgument: >- emitOpcode(op_put_to_arguments); >- instructions().append(scope->index()); >- instructions().append(variable.offset().capturedArgumentsOffset().offset()); >- instructions().append(value->index()); >+ OpPutToArguments::emit(this, scope, variable.offset().capturedArgumentsOffset().offset(), value); > return value; > > case VarKind::Scope: >@@ -2616,21 +2463,19 @@ RegisterID* BytecodeGenerator::emitPutToScope(RegisterID* scope, const Variable& > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > > // put_to_scope scope, id, value, GetPutInfo, Structure, Operand >- emitOpcode(op_put_to_scope); >- instructions().append(scope->index()); >- instructions().append(addConstant(variable.ident())); >- instructions().append(value->index()); >+ GetPutInfo getPutInfo(0); >+ int scopeDepth; > ScopeOffset offset; > if (variable.offset().isScope()) { > offset = variable.offset().scopeOffset(); >- instructions().append(GetPutInfo(resolveMode, LocalClosureVar, initializationMode).operand()); >- instructions().append(variable.symbolTableConstantIndex()); >+ getPutInfo = GetPutInfo(resolveMode, LocalClosureVar, initializationMode); >+ scopeDepth = variable.symbolTableConstantIndex(); > } else { > ASSERT(resolveType() != LocalClosureVar); >- instructions().append(GetPutInfo(resolveMode, resolveType(), initializationMode).operand()); >- instructions().append(localScopeDepth()); >+ getPutInfo = GetPutInfo(resolveMode, resolveType(), initializationMode); >+ scopeDepth = localScopeDepth(); > } >- instructions().append(!!offset ? offset.offset() : 0); >+ OpPutToScope::emit(this, scope, addConstant(variable.ident()), value, getPutInfo, scopeDepth, !!offset ? offset.offset() : 0); > return value; > } } > >@@ -2646,40 +2491,25 @@ RegisterID* BytecodeGenerator::initializeVariable(const Variable& variable, Regi > > RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype) > { >- emitOpcode(op_instanceof); >- instructions().append(dst->index()); >- instructions().append(value->index()); >- instructions().append(basePrototype->index()); >+ OpInstanceof::emit(this, dst, value, basePrototype); > return dst; > } > > RegisterID* BytecodeGenerator::emitInstanceOfCustom(RegisterID* dst, RegisterID* value, RegisterID* constructor, RegisterID* hasInstanceValue) > { >- emitOpcode(op_instanceof_custom); >- instructions().append(dst->index()); >- instructions().append(value->index()); >- instructions().append(constructor->index()); >- instructions().append(hasInstanceValue->index()); >+ OpInstanceofCustom::emit(this, dst, value, constructor, hasInstanceValue); > return dst; > } > > RegisterID* BytecodeGenerator::emitInByVal(RegisterID* dst, RegisterID* property, RegisterID* base) > { >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- emitOpcode(op_in_by_val); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(arrayProfile); >+ OpInByVal::emit(this, dst, base, property); > return dst; > } > > RegisterID* BytecodeGenerator::emitInById(RegisterID* dst, RegisterID* base, const Identifier& property) > { >- emitOpcode(op_in_by_id); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >+ OpInById::emit(this, dst, base, addConstant(property)); > return dst; > } > >@@ -2687,11 +2517,7 @@ RegisterID* BytecodeGenerator::emitTryGetById(RegisterID* dst, RegisterID* base, > { > ASSERT_WITH_MESSAGE(!parseIndex(property), "Indexed properties are not supported with tryGetById."); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_try_get_by_id); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >- instructions().append(profile); >+ OpTryGetById::emit(this, kill(dst), base, addConstant(property)); > return dst; > } > >@@ -2701,15 +2527,8 @@ RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, co > > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >- instructions().append(0); >- instructions().append(0); >- instructions().append(0); >- instructions().append(Options::prototypeHitCountForLLIntCaching()); >- instructions().append(profile); >+ OpGetById::emit(this, kill(dst), base, addConstant(property)); >+ // TODO: instructions().append(Options::prototypeHitCountForLLIntCaching()); > return dst; > } > >@@ -2717,12 +2536,7 @@ RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, Re > { > ASSERT_WITH_MESSAGE(!parseIndex(property), "Indexed properties should be handled with get_by_val."); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id_with_this); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(thisVal->index()); >- instructions().append(addConstant(property)); >- instructions().append(profile); >+ OpGetByIdWithThis::emit(this, kill(dst), base, thisVal, addConstant(property)); > return dst; > } > >@@ -2732,13 +2546,7 @@ RegisterID* BytecodeGenerator::emitDirectGetById(RegisterID* dst, RegisterID* ba > > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id_direct); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >- instructions().append(0); >- instructions().append(0); >- instructions().append(profile); >+ OpGetByIdDirect::emit(this, kill(dst), base, addConstant(property)); > return dst; > } > >@@ -2748,19 +2556,11 @@ RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& p > > unsigned propertyIndex = addConstant(property); > >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > >- m_codeBlock->addPropertyAccessInstruction(instructions().size()); >+ // TODO: m_codeBlock->addPropertyAccessInstruction(m_writer.ref()); > >- emitOpcode(op_put_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(value->index()); >- instructions().append(0); // old structure >- instructions().append(0); // offset >- instructions().append(0); // new structure >- instructions().append(0); // structure chain >- instructions().append(static_cast<int>(PutByIdNone)); // is not direct >+ OpPutById::emit(this, base, propertyIndex, value, PutByIdNone); // is not direct > > return value; > } >@@ -2771,11 +2571,7 @@ RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, RegisterID* thisVal > > unsigned propertyIndex = addConstant(property); > >- emitOpcode(op_put_by_id_with_this); >- instructions().append(base->index()); >- instructions().append(thisValue->index()); >- instructions().append(propertyIndex); >- instructions().append(value->index()); >+ OpPutByIdWithThis::emit(this, base, thisValue, propertyIndex, value); > > return value; > } >@@ -2786,76 +2582,48 @@ RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identif > > unsigned propertyIndex = addConstant(property); > >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > > m_codeBlock->addPropertyAccessInstruction(instructions().size()); > >- emitOpcode(op_put_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(value->index()); >- instructions().append(0); // old structure >- instructions().append(0); // offset >- instructions().append(0); // new structure >- instructions().append(0); // structure chain (unused if direct) >- instructions().append(static_cast<int>((putType == PropertyNode::KnownDirect || property != m_vm->propertyNames->underscoreProto) ? PutByIdIsDirect : PutByIdNone)); >+ PutByIdFlags type = (putType == PropertyNode::KnownDirect || property != m_vm->propertyNames->underscoreProto) ? PutByIdIsDirect : PutByIdNone; >+ OpPutById::emit(this, base, propertyIndex, value, type); > return value; > } > > void BytecodeGenerator::emitPutGetterById(RegisterID* base, const Identifier& property, unsigned attributes, RegisterID* getter) > { > unsigned propertyIndex = addConstant(property); >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > >- emitOpcode(op_put_getter_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(attributes); >- instructions().append(getter->index()); >+ OpPutGetterById::emit(this, base, propertyIndex, attributes, getter); > } > > void BytecodeGenerator::emitPutSetterById(RegisterID* base, const Identifier& property, unsigned attributes, RegisterID* setter) > { > unsigned propertyIndex = addConstant(property); >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > >- emitOpcode(op_put_setter_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(attributes); >- instructions().append(setter->index()); >+ OpPutSetterById::emit(this, base, propertyIndex, attributes, setter); > } > > void BytecodeGenerator::emitPutGetterSetter(RegisterID* base, const Identifier& property, unsigned attributes, RegisterID* getter, RegisterID* setter) > { > unsigned propertyIndex = addConstant(property); > >- m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); >+ m_staticPropertyAnalyzer.putById(base, propertyIndex); > >- emitOpcode(op_put_getter_setter_by_id); >- instructions().append(base->index()); >- instructions().append(propertyIndex); >- instructions().append(attributes); >- instructions().append(getter->index()); >- instructions().append(setter->index()); >+ OpPutGetterSetterById::emit(this, base, propertyIndex, attributes, getter, setter); > } > > void BytecodeGenerator::emitPutGetterByVal(RegisterID* base, RegisterID* property, unsigned attributes, RegisterID* getter) > { >- emitOpcode(op_put_getter_by_val); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(attributes); >- instructions().append(getter->index()); >+ OpPutGetterByVal::emit(this, base, property, attributes, getter); > } > > void BytecodeGenerator::emitPutSetterByVal(RegisterID* base, RegisterID* property, unsigned attributes, RegisterID* setter) > { >- emitOpcode(op_put_setter_by_val); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(attributes); >- instructions().append(setter->index()); >+ OpPutSetterByVal::emit(this, base, property, attributes, setter); > } > > void BytecodeGenerator::emitPutGeneratorFields(RegisterID* nextFunction) >@@ -2896,10 +2664,7 @@ void BytecodeGenerator::emitPutAsyncGeneratorFields(RegisterID* nextFunction) > > RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property) > { >- emitOpcode(op_del_by_id); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(addConstant(property)); >+ OpDelById::emit(this, dst, base, addConstant(property)); > return dst; > } > >@@ -2920,133 +2685,85 @@ RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, R > > ASSERT(context.type() == ForInContext::StructureForInContextType); > StructureForInContext& structureContext = static_cast<StructureForInContext&>(context); >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_direct_pname); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(structureContext.index()->index()); >- instructions().append(structureContext.enumerator()->index()); >- instructions().append(profile); >- >- structureContext.addGetInst(instIndex, property->index(), profile); >+ OpGetDirectPname::emit(this, kill(dst), base, property, structureContext.index(), structureContext.enumerator()); >+ >+ structureContext.addGetInst(instIndex, property->index()); > return dst; > } > >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(arrayProfile); >- instructions().append(profile); >+ OpGetByVal::emit(this, kill(dst), base, property); > return dst; > } > > RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* thisValue, RegisterID* property) > { >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val_with_this); >- instructions().append(kill(dst)); >- instructions().append(base->index()); >- instructions().append(thisValue->index()); >- instructions().append(property->index()); >- instructions().append(profile); >+ OpGetByValWithThis::emit(this, kill(dst), base, thisValue, property); > return dst; > } > > RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value) > { >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- emitOpcode(op_put_by_val); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(value->index()); >- instructions().append(arrayProfile); >- >+ OpPutByVal::emit(this, base, property, value); > return value; > } > > RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* thisValue, RegisterID* property, RegisterID* value) > { >- emitOpcode(op_put_by_val_with_this); >- instructions().append(base->index()); >- instructions().append(thisValue->index()); >- instructions().append(property->index()); >- instructions().append(value->index()); >- >+ OpPutByValWithThis::emit(this, base, thisValue, property, value); > return value; > } > > RegisterID* BytecodeGenerator::emitDirectPutByVal(RegisterID* base, RegisterID* property, RegisterID* value) > { >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- emitOpcode(op_put_by_val_direct); >- instructions().append(base->index()); >- instructions().append(property->index()); >- instructions().append(value->index()); >- instructions().append(arrayProfile); >+ OpPutByValDirect::emit(this, base, property, value); > return value; > } > > RegisterID* BytecodeGenerator::emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property) > { >- emitOpcode(op_del_by_val); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(property->index()); >+ OpDelByVal::emit(this, dst, base, property); > return dst; > } > > void BytecodeGenerator::emitSuperSamplerBegin() > { >- emitOpcode(op_super_sampler_begin); >+ OpSuperSamplerBegin::emit(this); > } > > void BytecodeGenerator::emitSuperSamplerEnd() > { >- emitOpcode(op_super_sampler_end); >+ OpSuperSamplerEnd::emit(this); > } > > RegisterID* BytecodeGenerator::emitIdWithProfile(RegisterID* src, SpeculatedType profile) > { >- emitOpcode(op_identity_with_profile); >- instructions().append(src->index()); >- instructions().append(static_cast<uint32_t>(profile >> 32)); >- instructions().append(static_cast<uint32_t>(profile)); >+ OpIdentityWithProfile::emit(this, src, static_cast<uint32_t>(profile >> 32), static_cast<uint32_t>(profile)); > return src; > } > > void BytecodeGenerator::emitUnreachable() > { >- emitOpcode(op_unreachable); >+ OpUnreachable::emit(this); > } > > RegisterID* BytecodeGenerator::emitGetArgument(RegisterID* dst, int32_t index) > { >- UnlinkedValueProfile profile = emitProfiledOpcode(op_get_argument); >- instructions().append(dst->index()); >- instructions().append(index + 1); // Including |this|. >- instructions().append(profile); >+ OpGetArgument::emit(this, dst, index + 1 /* Including |this| */); > return dst; > } > > RegisterID* BytecodeGenerator::emitCreateThis(RegisterID* dst) > { >- size_t begin = instructions().size(); >- m_staticPropertyAnalyzer.createThis(dst->index(), begin + 3); >+ m_staticPropertyAnalyzer.createThis(dst, m_writer.ref()); > > m_codeBlock->addPropertyAccessInstruction(instructions().size()); >- emitOpcode(op_create_this); >- instructions().append(dst->index()); >- instructions().append(dst->index()); >- instructions().append(0); >- instructions().append(0); >+ OpCreateThis::emit(this, dst, dst, 0); > return dst; > } > > void BytecodeGenerator::emitTDZCheck(RegisterID* target) > { >- emitOpcode(op_check_tdz); >- instructions().append(target->index()); >+ OpCheckTdz::emit(this, target); > } > > bool BytecodeGenerator::needsTDZCheck(const Variable& variable) >@@ -3146,13 +2863,9 @@ void BytecodeGenerator::restoreTDZStack(const BytecodeGenerator::PreservedTDZSta > > RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst) > { >- size_t begin = instructions().size(); >- m_staticPropertyAnalyzer.newObject(dst->index(), begin + 2); >+ m_staticPropertyAnalyzer.newObject(dst, m_writer.ref()); > >- emitOpcode(op_new_object); >- instructions().append(dst->index()); >- instructions().append(0); >- instructions().append(newObjectAllocationProfile()); >+ OpNewObject::emit(this, dst, 0); > return dst; > } > >@@ -3195,10 +2908,7 @@ RegisterID* BytecodeGenerator::addTemplateObjectConstant(Ref<TemplateObjectDescr > > RegisterID* BytecodeGenerator::emitNewArrayBuffer(RegisterID* dst, JSImmutableButterfly* array, IndexingType recommendedIndexingType) > { >- emitOpcode(op_new_array_buffer); >- instructions().append(dst->index()); >- instructions().append(addConstantValue(array)->index()); >- instructions().append(newArrayAllocationProfile(recommendedIndexingType)); >+ OpNewArrayBuffer::emit(this, dst, addConstantValue(array), recommendedIndexingType); > return dst; > } > >@@ -3216,11 +2926,7 @@ RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elemen > emitNode(argv.last().get(), n->value()); > } > ASSERT(!length); >- emitOpcode(op_new_array); >- instructions().append(dst->index()); >- instructions().append(argv.size() ? argv[0]->index() : 0); // argv >- instructions().append(argv.size()); // argc >- instructions().append(newArrayAllocationProfile(recommendedIndexingType)); >+ OpNewArray::emit(this, dst, argv.size() ? argv[0].get() : VirtualRegister {}, argv.size(), recommendedIndexingType); > return dst; > } > >@@ -3246,9 +2952,7 @@ RegisterID* BytecodeGenerator::emitNewArrayWithSpread(RegisterID* dst, ElementNo > RefPtr<RegisterID> tmp = newTemporary(); > emitNode(tmp.get(), expression); > >- emitOpcode(op_spread); >- instructions().append(argv[i].get()->index()); >- instructions().append(tmp.get()->index()); >+ OpSpread::emit(this, argv[i].get(), tmp.get()); > } else { > ExpressionNode* expression = node->value(); > emitNode(argv[i].get(), expression); >@@ -3258,30 +2962,19 @@ RegisterID* BytecodeGenerator::emitNewArrayWithSpread(RegisterID* dst, ElementNo > } > > unsigned bitVectorIndex = m_codeBlock->addBitVector(WTFMove(bitVector)); >- emitOpcode(op_new_array_with_spread); >- instructions().append(dst->index()); >- instructions().append(argv[0]->index()); // argv >- instructions().append(argv.size()); // argc >- instructions().append(bitVectorIndex); >- >+ OpNewArrayWithSpread::emit(this, dst, argv[0].get(), argv.size(), bitVectorIndex); > return dst; > } > > RegisterID* BytecodeGenerator::emitNewArrayWithSize(RegisterID* dst, RegisterID* length) > { >- emitOpcode(op_new_array_with_size); >- instructions().append(dst->index()); >- instructions().append(length->index()); >- instructions().append(newArrayAllocationProfile(ArrayWithUndecided)); >- >+ OpNewArrayWithSize::emit(this, dst, length); > return dst; > } > > RegisterID* BytecodeGenerator::emitNewRegExp(RegisterID* dst, RegExp* regExp) > { >- emitOpcode(op_new_regexp); >- instructions().append(dst->index()); >- instructions().append(addConstantValue(regExp)->index()); >+ OpNewRegexp::emit(this, dst, addConstantValue(regExp)); > return dst; > } > >@@ -3289,30 +2982,25 @@ void BytecodeGenerator::emitNewFunctionExpressionCommon(RegisterID* dst, Functio > { > unsigned index = m_codeBlock->addFunctionExpr(makeFunction(function)); > >- OpcodeID opcodeID = op_new_func_exp; > switch (function->parseMode()) { > case SourceParseMode::GeneratorWrapperFunctionMode: > case SourceParseMode::GeneratorWrapperMethodMode: >- opcodeID = op_new_generator_func_exp; >+ OpNewGeneratorFuncExp::emit(this, dst, scopeRegister(), index); > break; > case SourceParseMode::AsyncFunctionMode: > case SourceParseMode::AsyncMethodMode: > case SourceParseMode::AsyncArrowFunctionMode: >- opcodeID = op_new_async_func_exp; >+ OpNewAsyncFuncExp::emit(this, dst, scopeRegister(), index); > break; > case SourceParseMode::AsyncGeneratorWrapperFunctionMode: > case SourceParseMode::AsyncGeneratorWrapperMethodMode: > ASSERT(Options::useAsyncIterator()); >- opcodeID = op_new_async_generator_func_exp; >+ OpNewAsyncGeneratorFuncExp::emit(this, dst, scopeRegister(), index); > break; > default: >+ OpNewFuncExp::emit(this, dst, scopeRegister(), index); > break; > } >- >- emitOpcode(opcodeID); >- instructions().append(dst->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(index); > } > > RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* dst, FuncExprNode* func) >@@ -3345,10 +3033,7 @@ RegisterID* BytecodeGenerator::emitNewDefaultConstructor(RegisterID* dst, Constr > > unsigned index = m_codeBlock->addFunctionExpr(executable); > >- emitOpcode(op_new_func_exp); >- instructions().append(dst->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(index); >+ OpNewFuncExp::emit(this, dst, scopeRegister(), index); > return dst; > } > >@@ -3356,17 +3041,14 @@ RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FunctionMetadata > { > unsigned index = m_codeBlock->addFunctionDecl(makeFunction(function)); > if (isGeneratorWrapperParseMode(function->parseMode())) >- emitOpcode(op_new_generator_func); >+ OpNewGeneratorFunc::emit(this, dst, scopeRegister(), index); > else if (function->parseMode() == SourceParseMode::AsyncFunctionMode) >- emitOpcode(op_new_async_func); >+ OpNewAsyncFunc::emit(this, dst, scopeRegister(), index); > else if (isAsyncGeneratorWrapperParseMode(function->parseMode())) { > ASSERT(Options::useAsyncIterator()); >- emitOpcode(op_new_async_generator_func); >+ OpNewAsyncGeneratorFunc::emit(this, dst, scopeRegister(), index); > } else >- emitOpcode(op_new_func); >- instructions().append(dst->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(index); >+ OpNewFunc::emit(this, dst, scopeRegister(), index); > return dst; > } > >@@ -3387,28 +3069,26 @@ void BytecodeGenerator::emitSetFunctionNameIfNeeded(ExpressionNode* valueNode, R > > // FIXME: We should use an op_call to an internal function here instead. > // https://bugs.webkit.org/show_bug.cgi?id=155547 >- emitOpcode(op_set_function_name); >- instructions().append(value->index()); >- instructions().append(name->index()); >+ OpSetFunctionName::emit(this, value, name); > } > > RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); >+ return emitCall<OpCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitCallInTailPosition(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { > if (m_inTailPosition) { > m_codeBlock->setHasTailCalls(); >- return emitCall(op_tail_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); >+ return emitCall<OpTailCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); > } >- return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); >+ return emitCall<OpCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCall(op_call_eval, dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); >+ return emitCall<OpCallEval>(dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); > } > > ExpectedFunction BytecodeGenerator::expectedFunctionForIdentifier(const Identifier& identifier) >@@ -3429,12 +3109,7 @@ ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, > if (callArguments.argumentCountIncludingThis() >= 2) > return NoExpectedFunction; > >- size_t begin = instructions().size(); >- emitOpcode(op_jneq_ptr); >- instructions().append(func->index()); >- instructions().append(Special::ObjectConstructor); >- instructions().append(realCall->bind(begin, instructions().size())); >- instructions().append(0); >+ OpJneqPtr::emit(this, func, Special::ObjectConstructor, realCall->bind(this)); > > if (dst != ignoredResult()) > emitNewObject(dst); >@@ -3450,23 +3125,14 @@ ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, > if (callArguments.argumentCountIncludingThis() > 2) > return NoExpectedFunction; > >- size_t begin = instructions().size(); >- emitOpcode(op_jneq_ptr); >- instructions().append(func->index()); >- instructions().append(Special::ArrayConstructor); >- instructions().append(realCall->bind(begin, instructions().size())); >- instructions().append(0); >+ OpJneqPtr::emit(this, func, Special::ArrayConstructor, realCall->bind(this)); > > if (dst != ignoredResult()) { > if (callArguments.argumentCountIncludingThis() == 2) > emitNewArrayWithSize(dst, callArguments.argumentRegister(0)); > else { > ASSERT(callArguments.argumentCountIncludingThis() == 1); >- emitOpcode(op_new_array); >- instructions().append(dst->index()); >- instructions().append(0); >- instructions().append(0); >- instructions().append(newArrayAllocationProfile(ArrayWithUndecided)); >+ OpNewArray::emit(this, dst, {}, 0, ArrayWithUndecided); > } > } > break; >@@ -3477,16 +3143,16 @@ ExpectedFunction BytecodeGenerator::emitExpectedFunctionSnippet(RegisterID* dst, > return NoExpectedFunction; > } > >- size_t begin = instructions().size(); >- emitOpcode(op_jmp); >- instructions().append(done.bind(begin, instructions().size())); >+ OpJmp::emit(this, done.bind(this)); > emitLabel(realCall.get()); > > return expectedFunction; > } > >-RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) >+template<typename CallOp> >+RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >+ constexpr auto opcodeID = CallOp::opcodeID(); > ASSERT(opcodeID == op_call || opcodeID == op_call_eval || opcodeID == op_tail_call); > ASSERT(func->refCount()); > >@@ -3502,18 +3168,15 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi > if (elements && !elements->next() && elements->value()->isSpreadExpression()) { > ExpressionNode* expression = static_cast<SpreadExpressionNode*>(elements->value())->expression(); > RefPtr<RegisterID> argumentRegister = emitNode(callArguments.argumentRegister(0), expression); >- emitOpcode(op_spread); >- instructions().append(argumentRegister.get()->index()); >- instructions().append(argumentRegister.get()->index()); >+ OpSpread::emit(this, argumentRegister.get(), argumentRegister.get()); > >- RefPtr<RegisterID> thisRegister = move(newTemporary(), callArguments.thisRegister()); >- return emitCallVarargs(opcodeID == op_tail_call ? op_tail_call_varargs : op_call_varargs, dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<typename VarArgsOp<CallOp>::type>(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); > } > } > RefPtr<RegisterID> argumentRegister; > argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0)); > RefPtr<RegisterID> thisRegister = move(newTemporary(), callArguments.thisRegister()); >- return emitCallVarargs(opcodeID == op_tail_call ? op_tail_call_varargs : op_call_varargs, dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<typename VarArgsOp<CallOp>::type>(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); > } > for (; n; n = n->m_next) > emitNode(callArguments.argumentRegister(argument++), n); >@@ -3536,18 +3199,9 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi > emitLogShadowChickenTailIfNecessary(); > > // Emit call. >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID); > ASSERT(dst); > ASSERT(dst != ignoredResult()); >- instructions().append(dst->index()); >- instructions().append(func->index()); >- instructions().append(callArguments.argumentCountIncludingThis()); >- instructions().append(callArguments.stackOffset()); >- instructions().append(m_codeBlock->addLLIntCallLinkInfo()); >- instructions().append(0); >- instructions().append(arrayProfile); >- instructions().append(profile); >+ CallOp::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset()); > > if (expectedFunction != NoExpectedFunction) > emitLabel(done.get()); >@@ -3557,47 +3211,41 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi > > RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCallVarargs(op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<OpCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitCallVarargsInTailPosition(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCallVarargs(m_inTailPosition ? op_tail_call_varargs : op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ if (m_inTailPosition) >+ return emitCallVarargs<OpTailCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<OpCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { >- return emitCallVarargs(op_construct_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<OpConstructVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); > } > > RegisterID* BytecodeGenerator::emitCallForwardArgumentsInTailPosition(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { > ASSERT(m_inTailPosition); >- return emitCallVarargs(op_tail_call_forward_arguments, dst, func, thisRegister, nullptr, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); >+ return emitCallVarargs<OpTailCallForwardArguments>(dst, func, thisRegister, nullptr, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); > } > >-RegisterID* BytecodeGenerator::emitCallVarargs(OpcodeID opcode, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) >+template<typename VarargsOp> >+RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) > { > if (m_shouldEmitDebugHooks && debuggableCall == DebuggableCall::Yes) > emitDebugHook(WillExecuteExpression, divotStart); > > emitExpressionInfo(divot, divotStart, divotEnd); > >- if (opcode == op_tail_call_varargs) >+ if (VarargsOp::opcodeID() == op_tail_call_varargs) > emitLogShadowChickenTailIfNecessary(); > > // Emit call. >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- UnlinkedValueProfile profile = emitProfiledOpcode(opcode); > ASSERT(dst != ignoredResult()); >- instructions().append(dst->index()); >- instructions().append(func->index()); >- instructions().append(thisRegister ? thisRegister->index() : 0); >- instructions().append(arguments ? arguments->index() : 0); >- instructions().append(firstFreeRegister->index()); >- instructions().append(firstVarArgOffset); >- instructions().append(arrayProfile); >- instructions().append(profile); >+ VarargsOp::emit(this, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset); > return dst; > } > >@@ -3605,17 +3253,14 @@ void BytecodeGenerator::emitLogShadowChickenPrologueIfNecessary() > { > if (!m_shouldEmitDebugHooks && !Options::alwaysUseShadowChicken()) > return; >- emitOpcode(op_log_shadow_chicken_prologue); >- instructions().append(scopeRegister()->index()); >+ OpLogShadowChickenPrologue::emit(this, scopeRegister()); > } > > void BytecodeGenerator::emitLogShadowChickenTailIfNecessary() > { > if (!m_shouldEmitDebugHooks && !Options::alwaysUseShadowChicken()) > return; >- emitOpcode(op_log_shadow_chicken_tail); >- instructions().append(thisRegister()->index()); >- instructions().append(scopeRegister()->index()); >+ OpLogShadowChickenTail::emit(this, thisRegister(), scopeRegister()); > } > > void BytecodeGenerator::emitCallDefineProperty(RegisterID* newObj, RegisterID* propertyNameRegister, >@@ -3661,18 +3306,9 @@ void BytecodeGenerator::emitCallDefineProperty(RegisterID* newObj, RegisterID* p > else > setter = throwTypeErrorFunction; > >- emitOpcode(op_define_accessor_property); >- instructions().append(newObj->index()); >- instructions().append(propertyNameRegister->index()); >- instructions().append(getter->index()); >- instructions().append(setter->index()); >- instructions().append(emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))->index()); >+ OpDefineAccessorProperty::emit(this, newObj, propertyNameRegister, getter.get(), setter.get(), emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))); > } else { >- emitOpcode(op_define_data_property); >- instructions().append(newObj->index()); >- instructions().append(propertyNameRegister->index()); >- instructions().append(valueRegister->index()); >- instructions().append(emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))->index()); >+ OpDefineDataProperty::emit(this, newObj, propertyNameRegister, valueRegister, emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))); > } > } > >@@ -3696,21 +3332,22 @@ RegisterID* BytecodeGenerator::emitReturn(RegisterID* src, ReturnFrom from) > emitLabel(isUndefinedLabel.get()); > emitTDZCheck(&m_thisRegister); > } >- emitUnaryNoDstOp(op_ret, &m_thisRegister); >+ OpRet::emit(this, &m_thisRegister); > emitLabel(isObjectLabel.get()); > } > } > >- return emitUnaryNoDstOp(op_ret, src); >+ OpRet::emit(this, src); >+ return src; > } > >-RegisterID* BytecodeGenerator::emitUnaryNoDstOp(OpcodeID opcodeID, RegisterID* src) >+RegisterID* BytecodeGenerator::emitEnd(RegisterID* src) > { >- emitOpcode(opcodeID); >- instructions().append(src->index()); >+ OpEnd::emit(this, src); > return src; > } > >+ > RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, RegisterID* lazyThis, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd) > { > ASSERT(func->refCount()); >@@ -3728,9 +3365,7 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, > if (elements && !elements->next() && elements->value()->isSpreadExpression()) { > ExpressionNode* expression = static_cast<SpreadExpressionNode*>(elements->value())->expression(); > RefPtr<RegisterID> argumentRegister = emitNode(callArguments.argumentRegister(0), expression); >- emitOpcode(op_spread); >- instructions().append(argumentRegister.get()->index()); >- instructions().append(argumentRegister.get()->index()); >+ OpSpread::emit(this, argumentRegister.get(), argumentRegister.get()); > > move(callArguments.thisRegister(), lazyThis); > RefPtr<RegisterID> thisRegister = move(newTemporary(), callArguments.thisRegister()); >@@ -3759,16 +3394,7 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, > Ref<Label> done = newLabel(); > expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get()); > >- UnlinkedValueProfile profile = emitProfiledOpcode(op_construct); >- ASSERT(dst != ignoredResult()); >- instructions().append(dst->index()); >- instructions().append(func->index()); >- instructions().append(callArguments.argumentCountIncludingThis()); >- instructions().append(callArguments.stackOffset()); >- instructions().append(m_codeBlock->addLLIntCallLinkInfo()); >- instructions().append(0); >- instructions().append(0); >- instructions().append(profile); >+ OpConstruct::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset()); > > if (expectedFunction != NoExpectedFunction) > emitLabel(done.get()); >@@ -3778,25 +3404,18 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, > > RegisterID* BytecodeGenerator::emitStrcat(RegisterID* dst, RegisterID* src, int count) > { >- emitOpcode(op_strcat); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- instructions().append(count); >- >+ OpStrcat::emit(this, dst, src, count); > return dst; > } > > void BytecodeGenerator::emitToPrimitive(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_to_primitive); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpToPrimitive::emit(this, dst, src); > } > > void BytecodeGenerator::emitGetScope() > { >- emitOpcode(op_get_scope); >- instructions().append(scopeRegister()->index()); >+ OpGetScope::emit(this, scopeRegister()); > } > > RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* objectScope) >@@ -3805,10 +3424,7 @@ RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* objectScope) > RegisterID* newScope = newBlockScopeVariable(); > newScope->ref(); > >- emitOpcode(op_push_with_scope); >- instructions().append(newScope->index()); >- instructions().append(scopeRegister()->index()); >- instructions().append(objectScope->index()); >+ OpPushWithScope::emit(this, newScope, scopeRegister(), objectScope); > > move(scopeRegister(), newScope); > m_lexicalScopeStack.append({ nullptr, newScope, true, 0 }); >@@ -3818,9 +3434,7 @@ RegisterID* BytecodeGenerator::emitPushWithScope(RegisterID* objectScope) > > RegisterID* BytecodeGenerator::emitGetParentScope(RegisterID* dst, RegisterID* scope) > { >- emitOpcode(op_get_parent_scope); >- instructions().append(dst->index()); >- instructions().append(scope->index()); >+ OpGetParentScope::emit(this, dst, scope); > return dst; > } > >@@ -3845,9 +3459,7 @@ void BytecodeGenerator::emitDebugHook(DebugHookType debugHookType, const JSTextP > return; > > emitExpressionInfo(divot, divot, divot); >- emitOpcode(op_debug); >- instructions().append(debugHookType); >- instructions().append(false); >+ OpDebug::emit(this, debugHookType, false); > } > > void BytecodeGenerator::emitDebugHook(DebugHookType debugHookType, unsigned line, unsigned charOffset, unsigned lineStart) >@@ -4006,7 +3618,7 @@ void BytecodeGenerator::popTry(TryData* tryData, Label& end) > > void BytecodeGenerator::emitCatch(RegisterID* exceptionRegister, RegisterID* thrownValueRegister, TryData* data) > { >- m_catchesToEmit.append(CatchEntry { data, exceptionRegister->index(), thrownValueRegister->index() }); >+ m_catchesToEmit.append(CatchEntry { data, exceptionRegister, thrownValueRegister }); > } > > void BytecodeGenerator::restoreScopeRegister(int lexicalScopeIndex) >@@ -4046,6 +3658,18 @@ int BytecodeGenerator::labelScopeDepthToLexicalScopeIndex(int targetLabelScopeDe > return targetScope.lexicalScopeIndex; > } > >+void BytecodeGenerator::emitThrow(RegisterID* exc) >+{ >+ m_usesExceptions = true; >+ OpThrow::emit(this, exc); >+} >+ >+RegisterID* BytecodeGenerator::emitArgumentCount(RegisterID* dst) >+{ >+ OpArgumentCount::emit(this, dst); >+ return dst; >+} >+ > int BytecodeGenerator::localScopeDepth() const > { > return m_localScopeDepth; >@@ -4062,16 +3686,12 @@ void BytecodeGenerator::emitThrowStaticError(ErrorType errorType, RegisterID* ra > { > RefPtr<RegisterID> message = newTemporary(); > emitToString(message.get(), raw); >- emitOpcode(op_throw_static_error); >- instructions().append(message->index()); >- instructions().append(static_cast<unsigned>(errorType)); >+ OpThrowStaticError::emit(this, message.get(), errorType); > } > > void BytecodeGenerator::emitThrowStaticError(ErrorType errorType, const Identifier& message) > { >- emitOpcode(op_throw_static_error); >- instructions().append(addConstantValue(addStringConstant(message))->index()); >- instructions().append(static_cast<unsigned>(errorType)); >+ OpThrowStaticError::emit(this, addConstantValue(addStringConstant(message)), errorType); > } > > void BytecodeGenerator::emitThrowReferenceError(const String& message) >@@ -4153,21 +3773,18 @@ void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::S > SwitchInfo info = { static_cast<uint32_t>(instructions().size()), type }; > switch (type) { > case SwitchInfo::SwitchImmediate: >- emitOpcode(op_switch_imm); >+ OpSwitchImm::emit(this, 0, 0, scrutineeRegister); > break; > case SwitchInfo::SwitchCharacter: >- emitOpcode(op_switch_char); >+ OpSwitchChar::emit(this, 0, 0, scrutineeRegister); > break; > case SwitchInfo::SwitchString: >- emitOpcode(op_switch_string); >+ OpSwitchString::emit(this, 0, 0, scrutineeRegister); > break; > default: > RELEASE_ASSERT_NOT_REACHED(); > } > >- instructions().append(0); // place holder for table index >- instructions().append(0); // place holder for default target >- instructions().append(scrutineeRegister->index()); > m_switchContextStack.append(info); > } > >@@ -4208,7 +3825,7 @@ static void prepareJumpTableForSwitch( > // We're emitting this after the clause labels should have been fixed, so > // the labels should not be "forward" references > ASSERT(!labels[i]->isForward()); >- jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3)); >+ jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress)); > } > } > >@@ -4221,7 +3838,7 @@ static void prepareJumpTableForStringSwitch(UnlinkedStringJumpTable& jumpTable, > > ASSERT(nodes[i]->isString()); > StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl(); >- jumpTable.offsetTable.add(clause, UnlinkedStringJumpTable::OffsetLocation { labels[i]->bind(switchAddress, switchAddress + 3) }); >+ jumpTable.offsetTable.add(clause, UnlinkedStringJumpTable::OffsetLocation { labels[i]->bind(switchAddress) }); > } > } > >@@ -4229,12 +3846,10 @@ void BytecodeGenerator::endSwitch(uint32_t clauseCount, const Vector<Ref<Label>, > { > SwitchInfo switchInfo = m_switchContextStack.last(); > m_switchContextStack.removeLast(); >- >- switch (switchInfo.switchType) { >- case SwitchInfo::SwitchImmediate: >- case SwitchInfo::SwitchCharacter: { >- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfSwitchJumpTables(); >- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel.bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); >+ >+ auto handleSwitch = [&](auto* op) { >+ op->setTableIndex(m_codeBlock->numberOfSwitchJumpTables()); >+ op->setDefaultOffset(defaultLabel.bind(switchInfo.bytecodeOffset)); > > UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addSwitchJumpTable(); > prepareJumpTableForSwitch( >@@ -4242,12 +3857,24 @@ void BytecodeGenerator::endSwitch(uint32_t clauseCount, const Vector<Ref<Label>, > switchInfo.switchType == SwitchInfo::SwitchImmediate > ? keyForImmediateSwitch > : keyForCharacterSwitch); >+ }; >+ >+ switch (switchInfo.switchType) { >+ case SwitchInfo::SwitchImmediate: { >+ auto* op = m_writer.ref(switchInfo.bytecodeOffset)->cast<OpSwitchImm>(); >+ handleSwitch(op); >+ break; >+ } >+ case SwitchInfo::SwitchCharacter: { >+ auto* op = m_writer.ref(switchInfo.bytecodeOffset)->cast<OpSwitchChar>(); >+ handleSwitch(op); > break; > } > > case SwitchInfo::SwitchString: { >- instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables(); >- instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel.bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); >+ auto* op = m_writer.ref(switchInfo.bytecodeOffset)->cast<OpSwitchString>(); >+ op->setTableIndex(m_codeBlock->numberOfSwitchJumpTables()); >+ op->setDefaultOffset(defaultLabel.bind(switchInfo.bytecodeOffset)); > > UnlinkedStringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable(); > prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes); >@@ -4459,114 +4086,79 @@ RegisterID* BytecodeGenerator::emitGetGlobalPrivate(RegisterID* dst, const Ident > > RegisterID* BytecodeGenerator::emitGetEnumerableLength(RegisterID* dst, RegisterID* base) > { >- emitOpcode(op_get_enumerable_length); >- instructions().append(dst->index()); >- instructions().append(base->index()); >+ OpGetEnumerableLength::emit(this, dst, base); > return dst; > } > > RegisterID* BytecodeGenerator::emitHasGenericProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName) > { >- emitOpcode(op_has_generic_property); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(propertyName->index()); >+ OpHasGenericProperty::emit(this, dst, base, propertyName); > return dst; > } > > RegisterID* BytecodeGenerator::emitHasIndexedProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName) > { >- UnlinkedArrayProfile arrayProfile = newArrayProfile(); >- emitOpcode(op_has_indexed_property); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(propertyName->index()); >- instructions().append(arrayProfile); >+ OpHasIndexedProperty::emit(this, dst, base, propertyName); > return dst; > } > > RegisterID* BytecodeGenerator::emitHasStructureProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName, RegisterID* enumerator) > { >- emitOpcode(op_has_structure_property); >- instructions().append(dst->index()); >- instructions().append(base->index()); >- instructions().append(propertyName->index()); >- instructions().append(enumerator->index()); >+ OpHasStructureProperty::emit(this, dst, base, propertyName, enumerator); > return dst; > } > > RegisterID* BytecodeGenerator::emitGetPropertyEnumerator(RegisterID* dst, RegisterID* base) > { >- emitOpcode(op_get_property_enumerator); >- instructions().append(dst->index()); >- instructions().append(base->index()); >+ OpGetPropertyEnumerator::emit(this, dst, base); > return dst; > } > > RegisterID* BytecodeGenerator::emitEnumeratorStructurePropertyName(RegisterID* dst, RegisterID* enumerator, RegisterID* index) > { >- emitOpcode(op_enumerator_structure_pname); >- instructions().append(dst->index()); >- instructions().append(enumerator->index()); >- instructions().append(index->index()); >+ OpEnumeratorStructurePname::emit(this, dst, enumerator, index); > return dst; > } > > RegisterID* BytecodeGenerator::emitEnumeratorGenericPropertyName(RegisterID* dst, RegisterID* enumerator, RegisterID* index) > { >- emitOpcode(op_enumerator_generic_pname); >- instructions().append(dst->index()); >- instructions().append(enumerator->index()); >- instructions().append(index->index()); >+ OpEnumeratorGenericPname::emit(this, dst, enumerator, index); > return dst; > } > > RegisterID* BytecodeGenerator::emitToIndexString(RegisterID* dst, RegisterID* index) > { >- emitOpcode(op_to_index_string); >- instructions().append(dst->index()); >- instructions().append(index->index()); >+ OpToIndexString::emit(this, dst, index); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsCellWithType(RegisterID* dst, RegisterID* src, JSType type) > { >- emitOpcode(op_is_cell_with_type); >- instructions().append(dst->index()); >- instructions().append(src->index()); >- instructions().append(type); >+ OpIsCellWithType::emit(this, dst, src, type); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsObject(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_is_object); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpIsObject::emit(this, dst, src); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsNumber(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_is_number); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpIsNumber::emit(this, dst, src); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsUndefined(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_is_undefined); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpIsUndefined::emit(this, dst, src); > return dst; > } > > RegisterID* BytecodeGenerator::emitIsEmpty(RegisterID* dst, RegisterID* src) > { >- emitOpcode(op_is_empty); >- instructions().append(dst->index()); >- instructions().append(src->index()); >+ OpIsEmpty::emit(this, dst, src); > return dst; > } > >@@ -4771,14 +4363,9 @@ void BytecodeGenerator::invalidateForInContextForLocal(RegisterID* localRegister > RegisterID* BytecodeGenerator::emitRestParameter(RegisterID* result, unsigned numParametersToSkip) > { > RefPtr<RegisterID> restArrayLength = newTemporary(); >- emitOpcode(op_get_rest_length); >- instructions().append(restArrayLength->index()); >- instructions().append(numParametersToSkip); >+ OpGetRestLength::emit(this, restArrayLength.get(), numParametersToSkip); > >- emitOpcode(op_create_rest); >- instructions().append(result->index()); >- instructions().append(restArrayLength->index()); >- instructions().append(numParametersToSkip); >+ OpCreateRest::emit(this, result, restArrayLength.get(), numParametersToSkip); > > return result; > } >@@ -4789,9 +4376,7 @@ void BytecodeGenerator::emitRequireObjectCoercible(RegisterID* value, const Stri > // thus incorrectly throws a TypeError for interfaces like HTMLAllCollection. > Ref<Label> target = newLabel(); > size_t begin = instructions().size(); >- emitOpcode(op_jneq_null); >- instructions().append(value->index()); >- instructions().append(target->bind(begin, instructions().size())); >+ OpJneqNull::emit(this, value, target->bind(begin)); > emitThrowTypeError(error); > emitLabel(target.get()); > } >@@ -4822,10 +4407,7 @@ void BytecodeGenerator::emitYieldPoint(RegisterID* argument, JSAsyncGeneratorFun > Vector<TryContext> savedTryContextStack; > m_tryContextStack.swap(savedTryContextStack); > >- emitOpcode(op_yield); >- instructions().append(generatorFrameRegister()->index()); >- instructions().append(yieldPointIndex); >- instructions().append(argument->index()); >+ OpYield::emit(this, generatorFrameRegister(), yieldPointIndex, argument); > > // Restore the try contexts, which start offset is updated to the merge point. > m_tryContextStack.swap(savedTryContextStack); >@@ -4838,11 +4420,11 @@ RegisterID* BytecodeGenerator::emitYield(RegisterID* argument, JSAsyncGeneratorF > > Ref<Label> normalLabel = newLabel(); > RefPtr<RegisterID> condition = newTemporary(); >- emitEqualityOp(op_stricteq, condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); >+ emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); > emitJumpIfTrue(condition.get(), normalLabel.get()); > > Ref<Label> throwLabel = newLabel(); >- emitEqualityOp(op_stricteq, condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); >+ emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); > emitJumpIfTrue(condition.get(), throwLabel.get()); > // Return. > { >@@ -4891,7 +4473,7 @@ RegisterID* BytecodeGenerator::emitGetAsyncIterator(RegisterID* argument, Throwa > Ref<Label> asyncIteratorFound = newLabel(); > Ref<Label> iteratorReceived = newLabel(); > >- emitJumpIfTrue(emitUnaryOp(op_eq_null, newTemporary(), iterator.get()), asyncIteratorNotFound.get()); >+ emitJumpIfTrue(emitUnaryOp<OpEqNull>(newTemporary(), iterator.get()), asyncIteratorNotFound.get()); > > emitJump(asyncIteratorFound.get()); > emitLabel(asyncIteratorNotFound.get()); >@@ -4950,10 +4532,10 @@ RegisterID* BytecodeGenerator::emitDelegateYield(RegisterID* argument, Throwable > Ref<Label> returnLabel = newLabel(); > { > RefPtr<RegisterID> condition = newTemporary(); >- emitEqualityOp(op_stricteq, condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); >+ emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); > emitJumpIfTrue(condition.get(), normalLabel.get()); > >- emitEqualityOp(op_stricteq, condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ReturnMode)))); >+ emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ReturnMode)))); > emitJumpIfTrue(condition.get(), returnLabel.get()); > > // Fallthrough to ThrowMode. >@@ -5120,7 +4702,7 @@ bool BytecodeGenerator::emitReturnViaFinallyIfNeeded(RegisterID* returnRegister) > void BytecodeGenerator::emitFinallyCompletion(FinallyContext& context, RegisterID* completionTypeRegister, Label& normalCompletionLabel) > { > if (context.numberOfBreaksOrContinues() || context.handlesReturns()) { >- emitJumpIf(op_stricteq, completionTypeRegister, CompletionType::Normal, normalCompletionLabel); >+ emitJumpIf<OpStricteq>(completionTypeRegister, CompletionType::Normal, normalCompletionLabel); > > FinallyContext* outerContext = context.outerContext(); > >@@ -5130,7 +4712,7 @@ void BytecodeGenerator::emitFinallyCompletion(FinallyContext& context, RegisterI > for (size_t i = 0; i < numberOfJumps; i++) { > Ref<Label> nextLabel = newLabel(); > auto& jump = context.jumps(i); >- emitJumpIf(op_nstricteq, completionTypeRegister, jump.jumpID, nextLabel.get()); >+ emitJumpIf<OpNstricteq>(completionTypeRegister, jump.jumpID, nextLabel.get()); > > restoreScopeRegister(jump.targetLexicalScopeIndex); > emitSetCompletionType(CompletionType::Normal); >@@ -5143,13 +4725,13 @@ void BytecodeGenerator::emitFinallyCompletion(FinallyContext& context, RegisterI > // We are not the outermost finally. > bool hasBreaksOrContinuesNotCoveredByJumps = context.numberOfBreaksOrContinues() > numberOfJumps; > if (hasBreaksOrContinuesNotCoveredByJumps || context.handlesReturns()) >- emitJumpIf(op_nstricteq, completionTypeRegister, CompletionType::Throw, *outerContext->finallyLabel()); >+ emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Throw, *outerContext->finallyLabel()); > > } else { > // We are the outermost finally. > if (context.handlesReturns()) { > Ref<Label> notReturnLabel = newLabel(); >- emitJumpIf(op_nstricteq, completionTypeRegister, CompletionType::Return, notReturnLabel.get()); >+ emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Return, notReturnLabel.get()); > > emitWillLeaveCallFrameDebugHook(); > emitReturn(completionValueRegister(), ReturnFrom::Finally); >@@ -5158,7 +4740,7 @@ void BytecodeGenerator::emitFinallyCompletion(FinallyContext& context, RegisterI > } > } > } >- emitJumpIf(op_nstricteq, completionTypeRegister, CompletionType::Throw, normalCompletionLabel); >+ emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Throw, normalCompletionLabel); > emitThrow(completionValueRegister()); > } > >@@ -5183,69 +4765,85 @@ void BytecodeGenerator::releaseCompletionRecordRegisters() > m_completionValueRegister = nullptr; > } > >-void BytecodeGenerator::emitJumpIf(OpcodeID compareOpcode, RegisterID* completionTypeRegister, CompletionType type, Label& jumpTarget) >+template<typename CompareOp> >+void BytecodeGenerator::emitJumpIf(RegisterID* completionTypeRegister, CompletionType type, Label& jumpTarget) > { > RefPtr<RegisterID> tempRegister = newTemporary(); > RegisterID* valueConstant = addConstantValue(jsNumber(static_cast<int>(type))); > OperandTypes operandTypes = OperandTypes(ResultType::numberTypeIsInt32(), ResultType::unknownType()); > >- auto equivalenceResult = emitBinaryOp(compareOpcode, tempRegister.get(), valueConstant, completionTypeRegister, operandTypes); >+ auto equivalenceResult = emitBinaryOp<CompareOp>(tempRegister.get(), valueConstant, completionTypeRegister, operandTypes); > emitJumpIfTrue(equivalenceResult, jumpTarget); > } > >-void StructureForInContext::finalize(BytecodeGenerator& generator) >+void StructureForInContext::finalize(BytecodeGenerator& /*generator*/) > { > if (isValid()) > return; > >- for (const auto& instTuple : m_getInsts) { >- unsigned instIndex = std::get<0>(instTuple); >- int propertyRegIndex = std::get<1>(instTuple); >- UnlinkedValueProfile valueProfile = std::get<2>(instTuple); >- OpcodeID op = generator.instructions()[instIndex].u.opcode; >- RELEASE_ASSERT(op == op_get_direct_pname); >- ASSERT(opcodeLength(op_get_direct_pname) == 7); >- ASSERT(opcodeLength(op_get_by_val) == 6); >- >- // 0. Change the opcode to get_by_val. >- generator.instructions()[instIndex].u.opcode = op_get_by_val; >- // 1. dst stays the same. >- // 2. base stays the same. >- // 3. property gets switched to the original property. >- generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; >- // 4. add an array profile. >- generator.instructions()[instIndex + 4].u.unsignedValue = generator.newArrayProfile(); >- // 5. set the result value profile. >- generator.instructions()[instIndex + 5].u.unsignedValue = valueProfile; >- // 6. nop out the last instruction word. >- generator.instructions()[instIndex + 6].u.opcode = op_nop; >- } >-} >- >-void IndexedForInContext::finalize(BytecodeGenerator& generator) >+ // TODO >+ //for (const auto& instTuple : m_getInsts) { >+ //unsigned instIndex = std::get<0>(instTuple); >+ //int propertyRegIndex = std::get<1>(instTuple); >+ //OpcodeID op = generator.instructions()[instIndex].u.opcode; >+ //RELEASE_ASSERT(op == op_get_direct_pname); >+ //ASSERT(opcodeLength(op_get_direct_pname) == 7); >+ //ASSERT(opcodeLength(op_get_by_val) == 6); >+ >+ //// 0. Change the opcode to get_by_val. >+ //generator.instructions()[instIndex].u.opcode = op_get_by_val; >+ //// 1. dst stays the same. >+ //// 2. base stays the same. >+ //// 3. property gets switched to the original property. >+ //generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; >+ //// 4. add an array profile. >+ //generator.instructions()[instIndex + 4].u.unsignedValue = generator.newArrayProfile(); >+ //// TODO: do we need this step? >+ //// 5. set the result value profile. >+ ////generator.instructions()[instIndex + 5].u.unsignedValue = valueProfile; >+ //// 6. nop out the last instruction word. >+ //generator.instructions()[instIndex + 6].u.opcode = op_nop; >+ //} >+} >+ >+void IndexedForInContext::finalize(BytecodeGenerator& /*generator*/) > { > if (isValid()) > return; > >- for (const auto& instPair : m_getInsts) { >- unsigned instIndex = instPair.first; >- int propertyRegIndex = instPair.second; >- OpcodeID op = generator.instructions()[instIndex].u.opcode; >- RELEASE_ASSERT(op == op_get_by_val); >- // We just need to perform the get_by_val with the original property here, >- // not the indexed one. >- generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; >+ // TODO >+ //for (const auto& instPair : m_getInsts) { >+ //unsigned instIndex = instPair.first; >+ //int propertyRegIndex = instPair.second; >+ //OpcodeID op = generator.instructions()[instIndex].u.opcode; >+ //RELEASE_ASSERT(op == op_get_by_val); >+ //// We just need to perform the get_by_val with the original property here, >+ //// not the indexed one. >+ //generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; >+ //} >+} >+ >+void StaticPropertyAnalysis::record() >+{ >+ auto* instruction = m_instructionRef.ptr(); >+ auto size = m_propertyIndexes.size(); >+ switch (instruction->opcodeID()) { >+ case OpNewObject::opcodeID(): >+ instruction->cast<OpNewObject>()->setInlineCapacity(size); >+ return; >+ case OpCreateThis::opcodeID(): >+ instruction->cast<OpCreateThis>()->setInlineCapacity(size); >+ return; >+ default: >+ ASSERT_NOT_REACHED(); > } > } > > void BytecodeGenerator::emitToThis() > { > m_codeBlock->addPropertyAccessInstruction(instructions().size()); >- UnlinkedValueProfile profile = emitProfiledOpcode(op_to_this); >- instructions().append(kill(&m_thisRegister)); >- instructions().append(0); >- instructions().append(0); >- instructions().append(profile); >+ >+ OpToThis::emit(this, kill(&m_thisRegister)); > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >index 8ac6bc1e88ef9ec86d88461d1617515fd0e3ed59..fc22bf5d152d301f52203b4ed711df383d8ba6ec 100644 >--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >@@ -41,6 +41,7 @@ > #include "LabelScope.h" > #include "Nodes.h" > #include "ParserError.h" >+#include "ProfileTypeBytecodeFlag.h" > #include "RegisterID.h" > #include "StaticPropertyAnalyzer.h" > #include "SymbolTable.h" >@@ -208,7 +209,7 @@ namespace JSC { > > class StructureForInContext : public ForInContext { > public: >- using GetInst = std::tuple<unsigned, int, UnlinkedValueProfile>; >+ using GetInst = std::tuple<unsigned, int>; > > StructureForInContext(RegisterID* localRegister, RegisterID* indexRegister, RegisterID* propertyRegister, RegisterID* enumeratorRegister) > : ForInContext(localRegister) >@@ -227,9 +228,9 @@ namespace JSC { > RegisterID* property() const { return m_propertyRegister.get(); } > RegisterID* enumerator() const { return m_enumeratorRegister.get(); } > >- void addGetInst(unsigned instIndex, int propertyRegIndex, UnlinkedValueProfile valueProfile) >+ void addGetInst(unsigned instIndex, int propertyRegIndex) > { >- m_getInsts.append(GetInst { instIndex, propertyRegIndex, valueProfile }); >+ m_getInsts.append(GetInst { instIndex, propertyRegIndex }); > } > > void finalize(BytecodeGenerator&); >@@ -344,17 +345,11 @@ namespace JSC { > TryData* tryData; > }; > >- enum ProfileTypeBytecodeFlag { >- ProfileTypeBytecodeClosureVar, >- ProfileTypeBytecodeLocallyResolved, >- ProfileTypeBytecodeDoesNotHaveGlobalID, >- ProfileTypeBytecodeFunctionArgument, >- ProfileTypeBytecodeFunctionReturnStatement >- }; >- > class BytecodeGenerator { > WTF_MAKE_FAST_ALLOCATED; > WTF_MAKE_NONCOPYABLE(BytecodeGenerator); >+ >+ friend class Label; > public: > typedef DeclarationStacks::FunctionStack FunctionStack; > >@@ -495,6 +490,22 @@ namespace JSC { > n->emitBytecode(*this, dst); > } > >+ void recordOpcode(OpcodeID opcodeID) >+ { >+#ifndef NDEBUG >+ // TODO >+ //ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end); >+#endif >+ // TODO >+ //m_lastInstruction = m_writer.ref(); >+ m_lastOpcodeID = opcodeID; >+ }; >+ >+ unsigned addMetadataFor(OpcodeID opcodeID) >+ { >+ return m_codeBlock->addMetadataFor(opcodeID); >+ } >+ > void emitNode(StatementNode* n) > { > emitNode(nullptr, n); >@@ -570,31 +581,32 @@ namespace JSC { > ASSERT(divot.offset >= divotStart.offset); > ASSERT(divotEnd.offset >= divot.offset); > >- int sourceOffset = m_scopeNode->source().startOffset(); >- unsigned firstLine = m_scopeNode->source().firstLine().oneBasedInt(); >+ //int sourceOffset = m_scopeNode->source().startOffset(); >+ //unsigned firstLine = m_scopeNode->source().firstLine().oneBasedInt(); > >- int divotOffset = divot.offset - sourceOffset; >- int startOffset = divot.offset - divotStart.offset; >- int endOffset = divotEnd.offset - divot.offset; >+ //int divotOffset = divot.offset - sourceOffset; >+ //int startOffset = divot.offset - divotStart.offset; >+ //int endOffset = divotEnd.offset - divot.offset; > >- unsigned line = divot.line; >- ASSERT(line >= firstLine); >- line -= firstLine; >+ //unsigned line = divot.line; >+ //ASSERT(line >= firstLine); >+ //line -= firstLine; > >- int lineStart = divot.lineStartOffset; >- if (lineStart > sourceOffset) >- lineStart -= sourceOffset; >- else >- lineStart = 0; >+ //int lineStart = divot.lineStartOffset; >+ //if (lineStart > sourceOffset) >+ //lineStart -= sourceOffset; >+ //else >+ //lineStart = 0; > >- if (divotOffset < lineStart) >- return; >+ //if (divotOffset < lineStart) >+ //return; > >- unsigned column = divotOffset - lineStart; >+ //unsigned column = divotOffset - lineStart; > >- unsigned instructionOffset = instructions().size(); >- if (!m_isBuiltinFunction) >- m_codeBlock->addExpressionInfo(instructionOffset, divotOffset, startOffset, endOffset, line, column); >+ // TODO >+ //unsigned instructionOffset = instructions().size(); >+ //if (!m_isBuiltinFunction) >+ //m_codeBlock->addExpressionInfo(instructionOffset, divotOffset, startOffset, endOffset, line, column); > } > > >@@ -654,13 +666,46 @@ namespace JSC { > RegisterID* emitLoad(RegisterID* dst, IdentifierSet& excludedList); > RegisterID* emitLoadGlobalObject(RegisterID* dst); > >- RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src); >+ template<typename UnaryOp, typename = std::enable_if_t<UnaryOp::opcodeID() != op_negate>> >+ RegisterID* emitUnaryOp(RegisterID* dst, RegisterID* src) >+ { >+ //ASSERT_WITH_MESSAGE(op_negate != UnaryOp::opcodeID(), "op_negate has an Arith Profile."); >+ UnaryOp::emit(this, dst, src); >+ return dst; >+ } >+ > RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src, OperandTypes); >- RegisterID* emitUnaryOpProfiled(OpcodeID, RegisterID* dst, RegisterID* src); >- RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes); >- RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2); >- RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src); > >+ template<typename BinaryOp> >+ std::enable_if_t< >+ BinaryOp::opcodeID() != op_bitor && BinaryOp::opcodeID() != op_bitand && >+ BinaryOp::opcodeID() != op_bitxor && BinaryOp::opcodeID() != op_add && >+ BinaryOp::opcodeID() != op_mul && BinaryOp::opcodeID() != op_sub && >+ BinaryOp::opcodeID() != op_div >+ , RegisterID*> >+ emitBinaryOp(RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes) >+ { >+ BinaryOp::emit(this, dst, src1, src2); >+ return dst; >+ } >+ >+ template<typename BinaryOp> >+ std::enable_if_t< >+ BinaryOp::opcodeID() == op_bitor || BinaryOp::opcodeID() == op_bitand || >+ BinaryOp::opcodeID() == op_bitxor || BinaryOp::opcodeID() == op_add || >+ BinaryOp::opcodeID() == op_mul || BinaryOp::opcodeID() == op_sub || >+ BinaryOp::opcodeID() == op_div >+ , RegisterID*> >+ emitBinaryOp(RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) >+ { >+ BinaryOp::emit(this, dst, src1, src2, types); >+ return dst; >+ } >+ >+ RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types); >+ >+ template<typename EqOp> >+ RegisterID* emitEqualityOp(RegisterID* dst, RegisterID* src1, RegisterID* src2); > RegisterID* emitCreateThis(RegisterID* dst); > void emitTDZCheck(RegisterID* target); > bool needsTDZCheck(const Variable&); >@@ -685,8 +730,8 @@ namespace JSC { > RegisterID* moveLinkTimeConstant(RegisterID* dst, LinkTimeConstant); > RegisterID* moveEmptyValue(RegisterID* dst); > >- RegisterID* emitToNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOpProfiled(op_to_number, dst, src); } >- RegisterID* emitToString(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_string, dst, src); } >+ RegisterID* emitToNumber(RegisterID* dst, RegisterID* src); >+ RegisterID* emitToString(RegisterID* dst, RegisterID* src); > RegisterID* emitToObject(RegisterID* dst, RegisterID* src, const Identifier& message); > RegisterID* emitInc(RegisterID* srcDst); > RegisterID* emitDec(RegisterID* srcDst); >@@ -694,7 +739,7 @@ namespace JSC { > RegisterID* emitOverridesHasInstance(RegisterID* dst, RegisterID* constructor, RegisterID* hasInstanceValue); > RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype); > RegisterID* emitInstanceOfCustom(RegisterID* dst, RegisterID* value, RegisterID* constructor, RegisterID* hasInstanceValue); >- RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); } >+ RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src); > RegisterID* emitInByVal(RegisterID* dst, RegisterID* property, RegisterID* base); > RegisterID* emitInById(RegisterID* dst, RegisterID* base, const Identifier& property); > >@@ -755,7 +800,7 @@ namespace JSC { > > enum class ReturnFrom { Normal, Finally }; > RegisterID* emitReturn(RegisterID* src, ReturnFrom = ReturnFrom::Normal); >- RegisterID* emitEnd(RegisterID* src) { return emitUnaryNoDstOp(op_end, src); } >+ RegisterID* emitEnd(RegisterID* src); > > RegisterID* emitConstruct(RegisterID* dst, RegisterID* func, RegisterID* lazyThis, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd); > RegisterID* emitStrcat(RegisterID* dst, RegisterID* src, int count); >@@ -779,6 +824,12 @@ namespace JSC { > void emitJumpIfNotFunctionCall(RegisterID* cond, Label& target); > void emitJumpIfNotFunctionApply(RegisterID* cond, Label& target); > >+ template<typename BinOp, typename JmpOp> >+ bool fuseCompareAndJump(RegisterID* cond, Label& target, bool swapOperands = false); >+ >+ template<typename UnaryOp, typename JmpOp> >+ bool fuseTestAndJmp(RegisterID* cond, Label& target); >+ > void emitEnter(); > void emitCheckTraps(); > >@@ -840,11 +891,8 @@ namespace JSC { > > int labelScopeDepthToLexicalScopeIndex(int labelScopeDepth); > >- void emitThrow(RegisterID* exc) >- { >- m_usesExceptions = true; >- emitUnaryNoDstOp(op_throw, exc); >- } >+ void emitThrow(RegisterID*); >+ RegisterID* emitArgumentCount(RegisterID*); > > void emitThrowStaticError(ErrorType, RegisterID*); > void emitThrowStaticError(ErrorType, const Identifier& message); >@@ -914,7 +962,8 @@ namespace JSC { > move(completionValueRegister(), reg); > } > >- void emitJumpIf(OpcodeID compareOpcode, RegisterID* completionTypeRegister, CompletionType, Label& jumpTarget); >+ template<typename CompareOp> >+ void emitJumpIf(RegisterID* completionTypeRegister, CompletionType, Label& jumpTarget); > > bool emitJumpViaFinallyIfNeeded(int targetLabelScopeDepth, Label& jumpTarget); > bool emitReturnViaFinallyIfNeeded(RegisterID* returnRegister); >@@ -1011,18 +1060,14 @@ namespace JSC { > void emitOpcode(OpcodeID); > UnlinkedArrayAllocationProfile newArrayAllocationProfile(IndexingType); > UnlinkedObjectAllocationProfile newObjectAllocationProfile(); >- UnlinkedValueProfile emitProfiledOpcode(OpcodeID); >- int kill(RegisterID* dst) >+ RegisterID* kill(RegisterID* dst) > { >- int index = dst->index(); >- m_staticPropertyAnalyzer.kill(index); >- return index; >+ m_staticPropertyAnalyzer.kill(dst); >+ return dst; > } > >- void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); > void retrieveLastUnaryOp(int& dstIndex, int& srcIndex); >- ALWAYS_INLINE void rewindBinaryOp(); >- ALWAYS_INLINE void rewindUnaryOp(); >+ ALWAYS_INLINE void rewind(); > > void allocateCalleeSaveSpace(); > void allocateAndEmitScope(); >@@ -1039,7 +1084,8 @@ namespace JSC { > // (i.e. "Object()" is identical to "new Object()"). > ExpectedFunction emitExpectedFunctionSnippet(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, Label& done); > >- RegisterID* emitCall(OpcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); >+ template<typename CallOp> >+ RegisterID* emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); > > RegisterID* emitCallIterator(RegisterID* iterator, RegisterID* argument, ThrowableExpressionData*); > RegisterID* newRegister(); >@@ -1102,7 +1148,8 @@ namespace JSC { > void getVariablesUnderTDZ(VariableEnvironment&); > > RegisterID* emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); >- RegisterID* emitCallVarargs(OpcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); >+ template<typename CallOp> >+ RegisterID* emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); > > void emitLogShadowChickenPrologueIfNecessary(); > void emitLogShadowChickenTailIfNecessary(); >@@ -1125,10 +1172,13 @@ namespace JSC { > JSValue addBigIntConstant(const Identifier&, uint8_t radix, bool sign); > RegisterID* addTemplateObjectConstant(Ref<TemplateObjectDescriptor>&&); > >- Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions() { return m_instructions; } >+ const InstructionStream& instructions() const { return m_writer; } > > RegisterID* emitThrowExpressionTooDeepException(); > >+ void write(uint8_t byte) { m_writer.write(byte); } >+ void write(uint32_t i) { m_writer.write(i); } >+ > class PreservedTDZStack { > private: > Vector<TDZMap> m_preservedTDZStack; >@@ -1138,8 +1188,17 @@ namespace JSC { > void preserveTDZStack(PreservedTDZStack&); > void restoreTDZStack(const PreservedTDZStack&); > >+ template<typename Func> >+ void withWriter(InstructionStreamWriter& writer, Func fn) >+ { >+ auto tmp = m_writer; >+ m_writer = writer; >+ fn(); >+ m_writer = tmp; >+ } >+ > private: >- Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow> m_instructions; >+ InstructionStreamWriter m_writer; > > bool m_shouldEmitDebugHooks; > >@@ -1229,14 +1288,12 @@ namespace JSC { > IdentifierBigIntMap m_bigIntMap; > TemplateObjectDescriptorMap m_templateObjectDescriptorMap; > >- StaticPropertyAnalyzer m_staticPropertyAnalyzer { &m_instructions }; >+ StaticPropertyAnalyzer m_staticPropertyAnalyzer; > > VM* m_vm; > > OpcodeID m_lastOpcodeID = op_end; >-#ifndef NDEBUG >- size_t m_lastOpcodePosition { 0 }; >-#endif >+ InstructionStream::MutableRef m_lastInstruction { m_writer.ref() }; > > bool m_usesExceptions { false }; > bool m_expressionTooDeep { false }; >@@ -1246,10 +1303,11 @@ namespace JSC { > bool m_needsToUpdateArrowFunctionContext; > DerivedContextType m_derivedContextType { DerivedContextType::None }; > >- using CatchEntry = std::tuple<TryData*, int, int>; >+ using CatchEntry = std::tuple<TryData*, VirtualRegister, VirtualRegister>; > Vector<CatchEntry> m_catchesToEmit; > }; > >+ > } // namespace JSC > > namespace WTF { >diff --git a/Source/JavaScriptCore/bytecompiler/Label.h b/Source/JavaScriptCore/bytecompiler/Label.h >index 3e2d297f23d105c15984011a0f55a33574df053a..7e6bed65c2f66b1de785822c10148fb5bec415e9 100644 >--- a/Source/JavaScriptCore/bytecompiler/Label.h >+++ b/Source/JavaScriptCore/bytecompiler/Label.h >@@ -34,24 +34,38 @@ > #include <limits.h> > > namespace JSC { >- > class BytecodeGenerator; > > class Label { > WTF_MAKE_NONCOPYABLE(Label); > public: >+ class Bound { >+ >+ }; >+ > Label() = default; > >+ Label(unsigned location) >+ : m_location(location) >+ { } >+ > void setLocation(BytecodeGenerator&, unsigned); > >- int bind(int opcode, int offset) const >+ int bind(BytecodeGenerator*); >+ >+ int bind(unsigned offset) > { > m_bound = true; >- if (m_location == invalidLocation) { >- m_unresolvedJumps.append(std::make_pair(opcode, offset)); >- return 0; >- } >- return m_location - opcode; >+ if (!isForward()) >+ return m_location - offset; >+ m_unresolvedJumps.append(offset); >+ return 0; >+ } >+ >+ int bind() >+ { >+ ASSERT(!isForward()); >+ return bind(0u); > } > > void ref() { ++m_refCount; } >@@ -65,16 +79,10 @@ namespace JSC { > > bool isForward() const { return m_location == invalidLocation; } > >- int bind() >- { >- ASSERT(!isForward()); >- return bind(0, 0); >- } >- > bool isBound() const { return m_bound; } > > private: >- typedef Vector<std::pair<int, int>, 8> JumpVector; >+ typedef Vector<int, 8> JumpVector; > > static const unsigned invalidLocation = UINT_MAX; > >diff --git a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp >index 366964f1d63a5de692592f9c4544e0872128dad9..5a8530136e03daa4bbee74194babd6e57fd5804a 100644 >--- a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp >+++ b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp >@@ -464,7 +464,7 @@ handleSpread: > }); > for (; n; n = n->next()) { > if (n->elision()) >- generator.emitBinaryOp(op_add, index.get(), index.get(), generator.emitLoad(0, jsNumber(n->elision())), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); >+ generator.emitBinaryOp<OpAdd>(index.get(), index.get(), generator.emitLoad(0, jsNumber(n->elision())), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); > if (n->value()->isSpreadExpression()) { > SpreadExpressionNode* spread = static_cast<SpreadExpressionNode*>(n->value()); > generator.emitEnumeration(spread, spread->expression(), spreader); >@@ -475,7 +475,7 @@ handleSpread: > } > > if (m_elision) { >- generator.emitBinaryOp(op_add, index.get(), index.get(), generator.emitLoad(0, jsNumber(m_elision)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); >+ generator.emitBinaryOp<OpAdd>(index.get(), index.get(), generator.emitLoad(0, jsNumber(m_elision)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); > generator.emitPutById(array.get(), generator.propertyNames().length, index.get()); > } > return generator.move(dst, array.get()); >@@ -996,7 +996,7 @@ RegisterID* BytecodeIntrinsicNode::emit_intrinsic_argumentCount(BytecodeGenerato > { > ASSERT(!m_args->m_listNode); > >- return generator.emitUnaryNoDstOp(op_argument_count, generator.finalDestination(dst)); >+ return generator.emitArgumentCount(generator.finalDestination(dst)); > } > > RegisterID* BytecodeIntrinsicNode::emit_intrinsic_putByIdDirect(BytecodeGenerator& generator, RegisterID* dst) >@@ -1460,13 +1460,13 @@ RegisterID* ApplyFunctionCallDotNode::emitBytecode(BytecodeGenerator& generator, > Ref<Label> haveThis = generator.newLabel(); > Ref<Label> end = generator.newLabel(); > RefPtr<RegisterID> compareResult = generator.newTemporary(); >- RefPtr<RegisterID> indexZeroCompareResult = generator.emitBinaryOp(op_eq, compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(0)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); >+ RefPtr<RegisterID> indexZeroCompareResult = generator.emitBinaryOp<OpEq>(compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(0)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); > generator.emitJumpIfFalse(indexZeroCompareResult.get(), haveThis.get()); > generator.move(thisRegister.get(), value); > generator.emitLoad(index.get(), jsNumber(1)); > generator.emitJump(end.get()); > generator.emitLabel(haveThis.get()); >- RefPtr<RegisterID> indexOneCompareResult = generator.emitBinaryOp(op_eq, compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(1)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); >+ RefPtr<RegisterID> indexOneCompareResult = generator.emitBinaryOp<OpEq>(compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(1)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); > generator.emitJumpIfFalse(indexOneCompareResult.get(), end.get()); > generator.move(argumentsRegister.get(), value); > generator.emitLoad(index.get(), jsNumber(2)); >@@ -1898,7 +1898,7 @@ RegisterID* BitwiseNotNode::emitBytecode(BytecodeGenerator& generator, RegisterI > { > RefPtr<RegisterID> src2 = generator.emitLoad(nullptr, jsNumber(-1)); > RefPtr<RegisterID> src1 = generator.emitNode(m_expr); >- return generator.emitBinaryOp(op_bitxor, generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(m_expr->resultDescriptor(), ResultType::numberTypeIsInt32())); >+ return generator.emitBinaryOp<OpBitxor>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(m_expr->resultDescriptor(), ResultType::numberTypeIsInt32())); > } > > // ------------------------------ LogicalNotNode ----------------------------------- >@@ -2166,7 +2166,7 @@ RegisterID* BinaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID* > if (m_expr1->isNull() || m_expr2->isNull()) { > RefPtr<RegisterID> src = generator.tempDestination(dst); > generator.emitNode(src.get(), m_expr1->isNull() ? m_expr2 : m_expr1); >- return generator.emitUnaryOp(op_neq_null, generator.finalDestination(dst, src.get()), src.get()); >+ return generator.emitUnaryOp<OpNeqNull>(generator.finalDestination(dst, src.get()), src.get()); > } > } > >@@ -2184,17 +2184,17 @@ RegisterID* BinaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID* > if (wasTypeof && (opcodeID == op_neq || opcodeID == op_nstricteq)) { > RefPtr<RegisterID> tmp = generator.tempDestination(dst); > if (opcodeID == op_neq) >- generator.emitEqualityOp(op_eq, generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); >+ generator.emitEqualityOp<OpEq>(generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); > else if (opcodeID == op_nstricteq) >- generator.emitEqualityOp(op_stricteq, generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); >+ generator.emitEqualityOp<OpStricteq>(generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); > else > RELEASE_ASSERT_NOT_REACHED(); >- return generator.emitUnaryOp(op_not, generator.finalDestination(dst, tmp.get()), tmp.get()); >+ return generator.emitUnaryOp<OpNot>(generator.finalDestination(dst, tmp.get()), tmp.get()); > } > RegisterID* result = generator.emitBinaryOp(opcodeID, generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(left->resultDescriptor(), right->resultDescriptor())); > if (m_shouldToUnsignedResult) { > if (opcodeID == op_urshift && dst != generator.ignoredResult()) >- return generator.emitUnaryOp(op_unsigned, result, result); >+ return generator.emitUnaryOp<OpUnsigned>(result, result); > } > return result; > } >@@ -2204,7 +2204,7 @@ RegisterID* EqualNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds > if (m_expr1->isNull() || m_expr2->isNull()) { > RefPtr<RegisterID> src = generator.tempDestination(dst); > generator.emitNode(src.get(), m_expr1->isNull() ? m_expr2 : m_expr1); >- return generator.emitUnaryOp(op_eq_null, generator.finalDestination(dst, src.get()), src.get()); >+ return generator.emitUnaryOp<OpEqNull>(generator.finalDestination(dst, src.get()), src.get()); > } > > ExpressionNode* left = m_expr1; >@@ -2214,7 +2214,7 @@ RegisterID* EqualNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds > > RefPtr<RegisterID> src1 = generator.emitNodeForLeftHandSide(left, m_rightHasAssignments, m_expr2->isPure(generator)); > RefPtr<RegisterID> src2 = generator.emitNode(right); >- return generator.emitEqualityOp(op_eq, generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); >+ return generator.emitEqualityOp<OpEq>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); > } > > RegisterID* StrictEqualNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) >@@ -2226,7 +2226,7 @@ RegisterID* StrictEqualNode::emitBytecode(BytecodeGenerator& generator, Register > > RefPtr<RegisterID> src1 = generator.emitNodeForLeftHandSide(left, m_rightHasAssignments, m_expr2->isPure(generator)); > RefPtr<RegisterID> src2 = generator.emitNode(right); >- return generator.emitEqualityOp(op_stricteq, generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); >+ return generator.emitEqualityOp<OpStricteq>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); > } > > RegisterID* ThrowableBinaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) >@@ -2419,7 +2419,7 @@ static ALWAYS_INLINE RegisterID* emitReadModifyAssignment(BytecodeGenerator& gen > generator.emitExpressionInfo(emitExpressionInfoForMe->divot(), emitExpressionInfoForMe->divotStart(), emitExpressionInfoForMe->divotEnd()); > RegisterID* result = generator.emitBinaryOp(opcodeID, dst, src1, src2, types); > if (oper == OpURShift) >- return generator.emitUnaryOp(op_unsigned, result, result); >+ return generator.emitUnaryOp<OpUnsigned>(result, result); > return result; > } > >@@ -3092,7 +3092,7 @@ void ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) > generator.emitLabel(loopStart.get()); > generator.emitLoopHint(); > >- RefPtr<RegisterID> result = generator.emitEqualityOp(op_less, generator.newTemporary(), i.get(), length.get()); >+ RefPtr<RegisterID> result = generator.emitEqualityOp<OpLess>(generator.newTemporary(), i.get(), length.get()); > generator.emitJumpIfFalse(result.get(), loopEnd.get()); > generator.emitHasIndexedProperty(result.get(), base.get(), i.get()); > generator.emitJumpIfFalse(result.get(), *scope->continueTarget()); >@@ -3133,7 +3133,7 @@ void ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) > generator.emitLabel(loopStart.get()); > generator.emitLoopHint(); > >- RefPtr<RegisterID> result = generator.emitUnaryOp(op_eq_null, generator.newTemporary(), propertyName.get()); >+ RefPtr<RegisterID> result = generator.emitUnaryOp<OpEqNull>(generator.newTemporary(), propertyName.get()); > generator.emitJumpIfTrue(result.get(), loopEnd.get()); > generator.emitHasStructureProperty(result.get(), base.get(), propertyName.get(), enumerator.get()); > generator.emitJumpIfFalse(result.get(), *scope->continueTarget()); >@@ -3174,7 +3174,7 @@ void ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) > generator.emitLabel(loopStart.get()); > generator.emitLoopHint(); > >- RefPtr<RegisterID> result = generator.emitUnaryOp(op_eq_null, generator.newTemporary(), propertyName.get()); >+ RefPtr<RegisterID> result = generator.emitUnaryOp<OpEqNull>(generator.newTemporary(), propertyName.get()); > generator.emitJumpIfTrue(result.get(), loopEnd.get()); > > generator.emitHasGenericProperty(result.get(), base.get(), propertyName.get()); >@@ -3499,7 +3499,7 @@ void CaseBlockNode::emitBytecodeForBlock(BytecodeGenerator& generator, RegisterI > for (ClauseListNode* list = m_list1; list; list = list->getNext()) { > RefPtr<RegisterID> clauseVal = generator.newTemporary(); > generator.emitNode(clauseVal.get(), list->getClause()->expr()); >- generator.emitBinaryOp(op_stricteq, clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); >+ generator.emitBinaryOp<OpStricteq>(clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); > labelVector.append(generator.newLabel()); > generator.emitJumpIfTrue(clauseVal.get(), labelVector[labelVector.size() - 1].get()); > } >@@ -3507,7 +3507,7 @@ void CaseBlockNode::emitBytecodeForBlock(BytecodeGenerator& generator, RegisterI > for (ClauseListNode* list = m_list2; list; list = list->getNext()) { > RefPtr<RegisterID> clauseVal = generator.newTemporary(); > generator.emitNode(clauseVal.get(), list->getClause()->expr()); >- generator.emitBinaryOp(op_stricteq, clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); >+ generator.emitBinaryOp<OpStricteq>(clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); > labelVector.append(generator.newLabel()); > generator.emitJumpIfTrue(clauseVal.get(), labelVector[labelVector.size() - 1].get()); > } >@@ -3855,11 +3855,11 @@ void FunctionNode::emitBytecode(BytecodeGenerator& generator, RegisterID*) > Ref<Label> generatorBodyLabel = generator.newLabel(); > { > RefPtr<RegisterID> condition = generator.newTemporary(); >- generator.emitEqualityOp(op_stricteq, condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); >+ generator.emitEqualityOp<OpStricteq>(condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); > generator.emitJumpIfTrue(condition.get(), generatorBodyLabel.get()); > > Ref<Label> throwLabel = generator.newLabel(); >- generator.emitEqualityOp(op_stricteq, condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); >+ generator.emitEqualityOp<OpStricteq>(condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); > generator.emitJumpIfTrue(condition.get(), throwLabel.get()); > > generator.emitReturn(generator.generatorValueRegister()); >@@ -4019,7 +4019,7 @@ RegisterID* ClassExprNode::emitBytecode(BytecodeGenerator& generator, RegisterID > generator.emitJumpIfTrue(generator.emitIsUndefined(tempRegister.get(), superclass.get()), superclassIsUndefinedLabel.get()); > > Ref<Label> superclassIsNullLabel = generator.newLabel(); >- generator.emitJumpIfTrue(generator.emitUnaryOp(op_eq_null, tempRegister.get(), superclass.get()), superclassIsNullLabel.get()); >+ generator.emitJumpIfTrue(generator.emitUnaryOp<OpEqNull>(tempRegister.get(), superclass.get()), superclassIsNullLabel.get()); > > Ref<Label> superclassIsObjectLabel = generator.newLabel(); > generator.emitJumpIfTrue(generator.emitIsObject(tempRegister.get(), superclass.get()), superclassIsObjectLabel.get()); >@@ -4029,8 +4029,8 @@ RegisterID* ClassExprNode::emitBytecode(BytecodeGenerator& generator, RegisterID > generator.emitGetById(protoParent.get(), superclass.get(), generator.propertyNames().prototype); > > Ref<Label> protoParentIsObjectOrNullLabel = generator.newLabel(); >- generator.emitJumpIfTrue(generator.emitUnaryOp(op_is_object_or_null, tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); >- generator.emitJumpIfTrue(generator.emitUnaryOp(op_is_function, tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); >+ generator.emitJumpIfTrue(generator.emitUnaryOp<OpIsObjectOrNull>(tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); >+ generator.emitJumpIfTrue(generator.emitUnaryOp<OpIsFunction>(tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); > generator.emitThrowTypeError("The value of the superclass's prototype property is not an object."_s); > generator.emitLabel(protoParentIsObjectOrNullLabel.get()); > >diff --git a/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.cpp b/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.cpp >new file mode 100644 >index 0000000000000000000000000000000000000000..b11aefdf4955a311ebec110bc68d18d884536fa8 >--- /dev/null >+++ b/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.cpp >@@ -0,0 +1,55 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+ >+#include "config.h" >+#include "ProfileTypeBytecodeFlag.h" >+ >+#include <wtf/PrintStream.h> >+ >+namespace WTF { >+ >+void printInternal(PrintStream& out, JSC::ProfileTypeBytecodeFlag flag) >+{ >+ switch(flag) { >+ case JSC::ProfileTypeBytecodeClosureVar: >+ out.print("ProfileTypeBytecodeClosureVar"); >+ return; >+ case JSC::ProfileTypeBytecodeLocallyResolved: >+ out.print("ProfileTypeBytecodeLocallyResolved"); >+ return; >+ case JSC::ProfileTypeBytecodeDoesNotHaveGlobalID: >+ out.print("ProfileTypeBytecodeDoesNotHaveGlobalID"); >+ return; >+ case JSC::ProfileTypeBytecodeFunctionArgument: >+ out.print("ProfileTypeBytecodeFunctionArgument"); >+ return; >+ case JSC::ProfileTypeBytecodeFunctionReturnStatement: >+ out.print("ProfileTypeBytecodeFunctionReturnStatement"); >+ return; >+ } >+} >+ >+} // namespace WTF >diff --git a/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.h b/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.h >new file mode 100644 >index 0000000000000000000000000000000000000000..7504169d2b0fe6ca92b6efd9b0d635fdaa32a0a3 >--- /dev/null >+++ b/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.h >@@ -0,0 +1,46 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#pragma once >+ >+namespace JSC { >+ >+enum ProfileTypeBytecodeFlag { >+ ProfileTypeBytecodeClosureVar, >+ ProfileTypeBytecodeLocallyResolved, >+ ProfileTypeBytecodeDoesNotHaveGlobalID, >+ ProfileTypeBytecodeFunctionArgument, >+ ProfileTypeBytecodeFunctionReturnStatement >+}; >+ >+} // namespace JSC >+ >+namespace WTF { >+ >+class PrintStream; >+ >+void printInternal(PrintStream&, JSC::ProfileTypeBytecodeFlag); >+ >+} // namespace WTF >diff --git a/Source/JavaScriptCore/bytecompiler/RegisterID.h b/Source/JavaScriptCore/bytecompiler/RegisterID.h >index cc80f5eb8913562ad25cd6f7aad1c5bffd8a03b7..d9adffce4d8032de8b409fa3adb8ad497632c661 100644 >--- a/Source/JavaScriptCore/bytecompiler/RegisterID.h >+++ b/Source/JavaScriptCore/bytecompiler/RegisterID.h >@@ -37,6 +37,8 @@ namespace JSC { > > class RegisterID { > WTF_MAKE_NONCOPYABLE(RegisterID); >+ >+ friend class VirtualRegister; > public: > RegisterID() > : m_refCount(0) >@@ -122,7 +124,6 @@ namespace JSC { > bool m_didSetIndex; > #endif > }; >- > } // namespace JSC > > namespace WTF { >diff --git a/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h b/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h >index f23e8425a795f98a7c4dc61bf35c15d1b079b3ae..8757d3a0d54fc89fb0d97ac8b99e86a341eb6d74 100644 >--- a/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h >+++ b/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h >@@ -25,6 +25,7 @@ > > #pragma once > >+#include "InstructionStream.h" > #include <wtf/HashSet.h> > > namespace JSC { >@@ -32,29 +33,24 @@ namespace JSC { > // Reference count indicates number of live registers that alias this object. > class StaticPropertyAnalysis : public RefCounted<StaticPropertyAnalysis> { > public: >- static Ref<StaticPropertyAnalysis> create(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions, unsigned target) >+ static Ref<StaticPropertyAnalysis> create(InstructionStream::MutableRef&& instructionRef) > { >- return adoptRef(*new StaticPropertyAnalysis(instructions, target)); >+ return adoptRef(*new StaticPropertyAnalysis(WTFMove(instructionRef))); > } > > void addPropertyIndex(unsigned propertyIndex) { m_propertyIndexes.add(propertyIndex); } > >- void record() >- { >- (*m_instructions)[m_target] = m_propertyIndexes.size(); >- } >+ void record(); > > int propertyIndexCount() { return m_propertyIndexes.size(); } > > private: >- StaticPropertyAnalysis(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions, unsigned target) >- : m_instructions(instructions) >- , m_target(target) >+ StaticPropertyAnalysis(InstructionStream::MutableRef&& instructionRef) >+ : m_instructionRef(WTFMove(instructionRef)) > { > } > >- Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* m_instructions; >- unsigned m_target; >+ InstructionStream::MutableRef m_instructionRef; > typedef HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> PropertyIndexSet; > PropertyIndexSet m_propertyIndexes; > }; >diff --git a/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h b/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h >index cc3b1e4a983391501d3fdf3a67a9c4a34bd9a268..fc5166c965015d98c2410ec82f2aef01cdb059c4 100644 >--- a/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h >+++ b/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h >@@ -35,63 +35,55 @@ namespace JSC { > // is understood to be lossy, and it's OK if it turns out to be wrong sometimes. > class StaticPropertyAnalyzer { > public: >- StaticPropertyAnalyzer(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>*); >- >- void createThis(int dst, unsigned offsetOfInlineCapacityOperand); >- void newObject(int dst, unsigned offsetOfInlineCapacityOperand); >- void putById(int dst, unsigned propertyIndex); // propertyIndex is an index into a uniqued set of strings. >- void mov(int dst, int src); >+ void createThis(RegisterID* dst, InstructionStream::MutableRef&& instructionRef); >+ void newObject(RegisterID* dst, InstructionStream::MutableRef&& instructionRef); >+ void putById(RegisterID* dst, unsigned propertyIndex); // propertyIndex is an index into a uniqued set of strings. >+ void mov(RegisterID* dst, RegisterID* src); > > void kill(); >- void kill(int dst); >+ void kill(RegisterID* dst); > > private: > void kill(StaticPropertyAnalysis*); > >- Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* m_instructions; > typedef HashMap<int, RefPtr<StaticPropertyAnalysis>, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> AnalysisMap; > AnalysisMap m_analyses; > }; > >-inline StaticPropertyAnalyzer::StaticPropertyAnalyzer(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions) >- : m_instructions(instructions) >-{ >-} >- >-inline void StaticPropertyAnalyzer::createThis(int dst, unsigned offsetOfInlineCapacityOperand) >+inline void StaticPropertyAnalyzer::createThis(RegisterID* dst, InstructionStream::MutableRef&& instructionRef) > { > AnalysisMap::AddResult addResult = m_analyses.add( >- dst, StaticPropertyAnalysis::create(m_instructions, offsetOfInlineCapacityOperand)); >+ dst->index(), StaticPropertyAnalysis::create(WTFMove(instructionRef))); > ASSERT_UNUSED(addResult, addResult.isNewEntry); // Can't have two 'this' in the same constructor. > } > >-inline void StaticPropertyAnalyzer::newObject(int dst, unsigned offsetOfInlineCapacityOperand) >+inline void StaticPropertyAnalyzer::newObject(RegisterID* dst, InstructionStream::MutableRef&& instructionRef) > { >- RefPtr<StaticPropertyAnalysis> analysis = StaticPropertyAnalysis::create(m_instructions, offsetOfInlineCapacityOperand); >- AnalysisMap::AddResult addResult = m_analyses.add(dst, analysis); >+ RefPtr<StaticPropertyAnalysis> analysis = StaticPropertyAnalysis::create(WTFMove(instructionRef)); >+ AnalysisMap::AddResult addResult = m_analyses.add(dst->index(), analysis); > if (!addResult.isNewEntry) { > kill(addResult.iterator->value.get()); > addResult.iterator->value = WTFMove(analysis); > } > } > >-inline void StaticPropertyAnalyzer::putById(int dst, unsigned propertyIndex) >+inline void StaticPropertyAnalyzer::putById(RegisterID* dst, unsigned propertyIndex) > { >- StaticPropertyAnalysis* analysis = m_analyses.get(dst); >+ StaticPropertyAnalysis* analysis = m_analyses.get(dst->index()); > if (!analysis) > return; > analysis->addPropertyIndex(propertyIndex); > } > >-inline void StaticPropertyAnalyzer::mov(int dst, int src) >+inline void StaticPropertyAnalyzer::mov(RegisterID* dst, RegisterID* src) > { >- RefPtr<StaticPropertyAnalysis> analysis = m_analyses.get(src); >+ RefPtr<StaticPropertyAnalysis> analysis = m_analyses.get(src->index()); > if (!analysis) { > kill(dst); > return; > } > >- AnalysisMap::AddResult addResult = m_analyses.add(dst, analysis); >+ AnalysisMap::AddResult addResult = m_analyses.add(dst->index(), analysis); > if (!addResult.isNewEntry) { > kill(addResult.iterator->value.get()); > addResult.iterator->value = WTFMove(analysis); >@@ -107,7 +99,7 @@ inline void StaticPropertyAnalyzer::kill(StaticPropertyAnalysis* analysis) > analysis->record(); > } > >-inline void StaticPropertyAnalyzer::kill(int dst) >+inline void StaticPropertyAnalyzer::kill(RegisterID* dst) > { > // We observe kills in order to avoid piling on properties to an object after > // its bytecode register has been recycled. >@@ -148,7 +140,7 @@ inline void StaticPropertyAnalyzer::kill(int dst) > // so we accept kills to any registers except for registers that have no inferred > // properties yet. > >- AnalysisMap::iterator it = m_analyses.find(dst); >+ AnalysisMap::iterator it = m_analyses.find(dst->index()); > if (it == m_analyses.end()) > return; > if (!it->value->propertyIndexCount()) >diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >index e9c7fa5b04fa2ed7eb91195bfadb47e7b818f19c..0d8677cf8e07c21bc0633ffcd7ec22971dcca178 100644 >--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >@@ -134,7 +134,7 @@ private: > > // Helper for min and max. > template<typename ChecksFunctor> >- bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks); >+ bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks); > > void refineStatically(CallLinkStatus&, Node* callTarget); > // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin, >@@ -151,11 +151,13 @@ private: > // Handle calls. This resolves issues surrounding inlining and intrinsics. > enum Terminality { Terminal, NonTerminal }; > Terminality handleCall( >- int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, >+ VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, > Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus, > SpeculatedType prediction); >- Terminality handleCall(Instruction* pc, NodeType op, CallMode); >- Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode); >+ template<typename CallOp> >+ Terminality handleCall(const Instruction* pc, NodeType op, CallMode); >+ template<typename CallOp> >+ Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode); > void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt); > void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis); > Node* getArgumentCount(); >@@ -163,28 +165,39 @@ private: > bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded); > unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1. > // Handle inlining. Return true if it succeeded, false if we need to plant a call. >- bool handleVarargsInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind); >+ bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind); > unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind); > enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing }; >- CallOptimizationResult handleCallVariant(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee); >- CallOptimizationResult handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); >+ CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee); >+ CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); > template<typename ChecksFunctor> >- void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks); >+ void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks); > // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. > template<typename ChecksFunctor> >- bool handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); >+ bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); > template<typename ChecksFunctor> >- bool handleDOMJITCall(Node* callee, int resultOperand, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); >+ bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); > template<typename ChecksFunctor> >- bool handleIntrinsicGetter(int resultOperand, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks); >+ bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks); > template<typename ChecksFunctor> >- bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks); >+ bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks); > template<typename ChecksFunctor> >- bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks); >+ bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks); > Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value); > Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset); >- bool handleDOMJITGetter(int resultOperand, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction); >- bool handleModuleNamespaceLoad(int resultOperand, SpeculatedType, Node* base, GetByIdStatus); >+ bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction); >+ bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus); >+ >+ template<typename Bytecode> >+ void handlePutByVal(Bytecode); >+ template <typename Bytecode> >+ void handlePutAccessorById(NodeType, Bytecode); >+ template <typename Bytecode> >+ void handlePutAccessorByVal(NodeType, Bytecode); >+ template <typename Bytecode> >+ void handleNewFunc(NodeType, Bytecode); >+ template <typename Bytecode> >+ void handleNewFuncExp(NodeType, Bytecode); > > // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not > // check the validity of the condition, but it may return a null one if it encounters a contradiction. >@@ -204,7 +217,7 @@ private: > Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value); > > void handleGetById( >- int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize); >+ VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize); > void emitPutById( > Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect); > void handlePutById( >@@ -786,7 +799,7 @@ private: > } > > Node* addCall( >- int result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset, >+ VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset, > SpeculatedType prediction) > { > if (op == TailCall) { >@@ -798,9 +811,8 @@ private: > > Node* call = addCallWithoutSettingResult( > op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction)); >- VirtualRegister resultReg(result); >- if (resultReg.isValid()) >- set(resultReg, call); >+ if (result.isValid()) >+ set(result, call); > return call; > } > >@@ -832,8 +844,8 @@ private: > // chain and use its prediction. If we only have > // inlined tail call frames, we use SpecFullTop > // to avoid a spurious OSR exit. >- Instruction* instruction = &m_inlineStackTop->m_profiledBlock->instructions()[bytecodeIndex]; >- OpcodeID opcodeID = Interpreter::getOpcodeID(instruction->u.opcode); >+ auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex); >+ OpcodeID opcodeID = instruction->opcodeID(); > > switch (opcodeID) { > case op_tail_call: >@@ -892,12 +904,12 @@ private: > return getPrediction(m_currentIndex); > } > >- ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action) >+ ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action) > { > ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); >- profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); >- bool makeSafe = profile->outOfBounds(locker); >- return ArrayMode::fromObserved(locker, profile, action, makeSafe); >+ profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); >+ bool makeSafe = profile.outOfBounds(locker); >+ return ArrayMode::fromObserved(locker, &profile, action, makeSafe); > } > > Node* makeSafe(Node* node) >@@ -1145,7 +1157,7 @@ private: > > Vector<DelayedSetLocal, 2> m_setLocalQueue; > >- Instruction* m_currentInstruction; >+ const Instruction* m_currentInstruction; > bool m_hasDebuggerEnabled; > bool m_hasAnyForceOSRExits { false }; > }; >@@ -1196,17 +1208,17 @@ void ByteCodeParser::addJumpTo(unsigned bytecodeIndex) > m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock); > } > >-ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode) >+template<typename CallOp> >+ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode) > { > static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), > "op_call, op_tail_call and op_construct should always have the same length"); > static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), > "op_call, op_tail_call and op_construct should always have the same length"); > >- int result = pc[1].u.operand; >- Node* callTarget = get(VirtualRegister(pc[2].u.operand)); >- int argumentCountIncludingThis = pc[3].u.operand; >- int registerOffset = -pc[4].u.operand; >+ auto bytecode = pc->as<CallOp>(); >+ Node* callTarget = get(bytecode.callee); >+ int registerOffset = -static_cast<int>(bytecode.argv); > > CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( > m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), >@@ -1214,8 +1226,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType > > InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode); > >- return handleCall(result, op, kind, OPCODE_LENGTH(op_call), callTarget, >- argumentCountIncludingThis, registerOffset, callLinkStatus, getPrediction()); >+ return handleCall(bytecode.dst, op, kind, OPCODE_LENGTH(op_call), callTarget, >+ bytecode.argc, registerOffset, callLinkStatus, getPrediction()); > } > > void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget) >@@ -1225,7 +1237,7 @@ void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* call > } > > ByteCodeParser::Terminality ByteCodeParser::handleCall( >- int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, >+ VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, > Node* callTarget, int argumentCountIncludingThis, int registerOffset, > CallLinkStatus callLinkStatus, SpeculatedType prediction) > { >@@ -1257,23 +1269,21 @@ ByteCodeParser::Terminality ByteCodeParser::handleCall( > return callNode->op() == TailCall ? Terminal : NonTerminal; > } > >-ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode) >+template<typename CallOp> >+ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode) > { > static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs), > "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length"); > static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs), > "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length"); > >- int result = pc[1].u.operand; >- int callee = pc[2].u.operand; >- int thisReg = pc[3].u.operand; >- int arguments = pc[4].u.operand; >- int firstFreeReg = pc[5].u.operand; >- int firstVarArgOffset = pc[6].u.operand; >+ auto bytecode = pc->as<CallOp>(); >+ int firstFreeReg = bytecode.firstFree.offset(); >+ int firstVarArgOffset = bytecode.firstVarArg; > > SpeculatedType prediction = getPrediction(); > >- Node* callTarget = get(VirtualRegister(callee)); >+ Node* callTarget = get(bytecode.callee); > > CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( > m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), >@@ -1285,8 +1295,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N > if (callLinkStatus.canOptimize()) { > addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses.addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget); > >- if (handleVarargsInlining(callTarget, result, >- callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), >+ if (handleVarargsInlining(callTarget, bytecode.dst, >+ callLinkStatus, firstFreeReg, bytecode.thisValue, bytecode.arguments, > firstVarArgOffset, op, > InlineCallFrame::varargsKindFor(callMode))) { > if (UNLIKELY(m_graph.compilation())) >@@ -1298,10 +1308,10 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N > CallVarargsData* data = m_graph.m_callVarargsData.add(); > data->firstVarArgOffset = firstVarArgOffset; > >- Node* thisChild = get(VirtualRegister(thisReg)); >+ Node* thisChild = get(bytecode.thisValue); > Node* argumentsChild = nullptr; > if (op != TailCallForwardVarargs) >- argumentsChild = get(VirtualRegister(arguments)); >+ argumentsChild = get(bytecode.arguments); > > if (op == TailCallVarargs || op == TailCallForwardVarargs) { > if (allInlineFramesAreTailCalls()) { >@@ -1312,9 +1322,8 @@ ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, N > } > > Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild); >- VirtualRegister resultReg(result); >- if (resultReg.isValid()) >- set(resultReg, call); >+ if (bytecode.dst.isValid()) >+ set(bytecode.dst, call); > return NonTerminal; > } > >@@ -1540,9 +1549,9 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu > } > > template<typename ChecksFunctor> >-void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks) >+void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks) > { >- Instruction* savedCurrentInstruction = m_currentInstruction; >+ const Instruction* savedCurrentInstruction = m_currentInstruction; > CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); > > ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX); >@@ -1573,9 +1582,8 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar > > size_t argumentPositionStart = m_graph.m_argumentPositions.size(); > >- VirtualRegister resultReg(resultOperand); >- if (resultReg.isValid()) >- resultReg = m_inlineStackTop->remapOperand(resultReg); >+ if (result.isValid()) >+ result = m_inlineStackTop->remapOperand(result); > > VariableAccessData* calleeVariable = nullptr; > if (callee.isClosureCall()) { >@@ -1636,7 +1644,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar > // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin. > } > >- InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), resultReg, >+ InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result, > (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock); > > // This is where the actual inlining really happens. >@@ -1684,7 +1692,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar > m_currentInstruction = savedCurrentInstruction; > } > >-ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee) >+ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee) > { > VERBOSE_LOG(" Considering callee ", callee, "\n"); > >@@ -1720,7 +1728,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c > }; > > if (InternalFunction* function = callee.internalFunction()) { >- if (handleConstantInternalFunction(callTargetNode, resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) { >+ if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) { > endSpecialCase(); > return CallOptimizationResult::Inlined; > } >@@ -1730,7 +1738,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c > > Intrinsic intrinsic = callee.intrinsicFor(specializationKind); > if (intrinsic != NoIntrinsic) { >- if (handleIntrinsicCall(callTargetNode, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { >+ if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { > endSpecialCase(); > return CallOptimizationResult::Inlined; > } >@@ -1740,7 +1748,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c > > if (Options::useDOMJIT()) { > if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) { >- if (handleDOMJITCall(callTargetNode, resultOperand, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { >+ if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { > endSpecialCase(); > return CallOptimizationResult::Inlined; > } >@@ -1756,12 +1764,12 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* c > if (needsToCheckCallee) > emitFunctionChecks(callee, callTargetNode, thisArgument); > }; >- inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck); >+ inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck); > inliningBalance -= myInliningCost; > return CallOptimizationResult::Inlined; > } > >-bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOperand, >+bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result, > const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument, > VirtualRegister argumentsArgument, unsigned argumentsOffset, > NodeType callOp, InlineCallFrame::Kind kind) >@@ -1873,7 +1881,7 @@ bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOpera > // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to > // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without > // calling LoadVarargs twice. >- inlineCall(callTargetNode, resultOperand, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks); >+ inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks); > > VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n"); > return true; >@@ -1890,7 +1898,7 @@ unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus > } > > ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( >- Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, >+ Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus, > int registerOffset, VirtualRegister thisArgument, > int argumentCountIncludingThis, > unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction) >@@ -1905,7 +1913,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > // this in cases where we don't need control flow diamonds to check the callee. > if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) { > return handleCallVariant( >- callTargetNode, resultOperand, callLinkStatus[0], registerOffset, thisArgument, >+ callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument, > argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true); > } > >@@ -1995,7 +2003,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > Node* myCallTargetNode = getDirect(calleeReg); > > auto inliningResult = handleCallVariant( >- myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset, >+ myCallTargetNode, result, callLinkStatus[i], registerOffset, > thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction, > inliningBalance, continuationBlock, false); > >@@ -2032,7 +2040,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > Node* myCallTargetNode = getDirect(calleeReg); > if (couldTakeSlowPath) { > addCall( >- resultOperand, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis, >+ result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis, > registerOffset, prediction); > VERBOSE_LOG("We added a call in the slow path\n"); > } else { >@@ -2040,7 +2048,7 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > addToGraph(Phantom, myCallTargetNode); > emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); > >- set(VirtualRegister(resultOperand), addToGraph(BottomValue)); >+ set(result, addToGraph(BottomValue)); > VERBOSE_LOG("couldTakeSlowPath was false\n"); > } > >@@ -2065,28 +2073,28 @@ ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( > } > > template<typename ChecksFunctor> >-bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks) >+bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks) > { > ASSERT(op == ArithMin || op == ArithMax); > > if (argumentCountIncludingThis == 1) { > insertChecks(); >- double result = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(result))))); >+ double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity(); >+ set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit))))); > return true; > } > > if (argumentCountIncludingThis == 2) { > insertChecks(); >- Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); >- addToGraph(Phantom, Edge(result, NumberUse)); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); >+ addToGraph(Phantom, Edge(resultNode, NumberUse)); >+ set(result, resultNode); > return true; > } > > if (argumentCountIncludingThis == 3) { > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); >+ set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); > return true; > } > >@@ -2095,7 +2103,7 @@ bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOf > } > > template<typename ChecksFunctor> >-bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) >+bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) > { > VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n"); > >@@ -2105,7 +2113,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > // o.__defineSetter__("foo", Math.pow) > // > // Which is extremely amusing, but probably not worth optimizing. >- if (!VirtualRegister(resultOperand).isValid()) >+ if (!result.isValid()) > return false; > > switch (intrinsic) { >@@ -2115,7 +2123,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > case AbsIntrinsic: { > if (argumentCountIncludingThis == 1) { // Math.abs() > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > >@@ -2126,15 +2134,15 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset))); > if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) > node->mergeFlags(NodeMayOverflowInt32InDFG); >- set(VirtualRegister(resultOperand), node); >+ set(result, node); > return true; > } > > case MinIntrinsic: >- return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks); >+ return handleMinMax(result, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks); > > case MaxIntrinsic: >- return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks); >+ return handleMinMax(result, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks); > > #define DFG_ARITH_UNARY(capitalizedName, lowerName) \ > case capitalizedName##Intrinsic: >@@ -2143,7 +2151,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > { > if (argumentCountIncludingThis == 1) { > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > Arith::UnaryType type = Arith::UnaryType::Sin; >@@ -2158,7 +2166,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > RELEASE_ASSERT_NOT_REACHED(); > } > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2166,7 +2174,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > case SqrtIntrinsic: { > if (argumentCountIncludingThis == 1) { > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > >@@ -2182,7 +2190,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > RELEASE_ASSERT_NOT_REACHED(); > } > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2190,13 +2198,13 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (argumentCountIncludingThis < 3) { > // Math.pow() and Math.pow(x) return NaN. > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > insertChecks(); > VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset); > VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset); >- set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand))); >+ set(result, addToGraph(ArithPow, get(xOperand), get(yOperand))); > return true; > } > >@@ -2211,7 +2219,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX) > return false; > >- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile, Array::Write); >+ ArrayMode arrayMode = getArrayMode(m_currentInstruction->as<OpCall>().metadata(m_codeBlock).arrayProfile, Array::Write); > if (!arrayMode.isJSArray()) > return false; > switch (arrayMode.type()) { >@@ -2225,7 +2233,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > for (int i = 0; i < argumentCountIncludingThis; ++i) > addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); > Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction)); >- set(VirtualRegister(resultOperand), arrayPush); >+ set(result, arrayPush); > > return true; > } >@@ -2249,7 +2257,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) > return false; > >- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile, Array::Read); >+ ArrayMode arrayMode = getArrayMode(m_currentInstruction->as<OpCall>().metadata(m_codeBlock).arrayProfile, Array::Write); > if (!arrayMode.isJSArray()) > return false; > >@@ -2314,7 +2322,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addVarArgChild(addToGraph(GetButterfly, array)); > > Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo()); >- set(VirtualRegister(resultOperand), arraySlice); >+ set(result, arraySlice); > return true; > } > >@@ -2338,7 +2346,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)) > return false; > >- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile, Array::Read); >+ ArrayMode arrayMode = getArrayMode(m_currentInstruction->as<OpCall>().metadata(m_codeBlock).arrayProfile, Array::Write); > if (!arrayMode.isJSArray()) > return false; > >@@ -2379,7 +2387,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addVarArgChild(nullptr); > > Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo()); >- set(VirtualRegister(resultOperand), node); >+ set(result, node); > return true; > } > >@@ -2398,7 +2406,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (argumentCountIncludingThis != 1) > return false; > >- ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile, Array::Write); >+ ArrayMode arrayMode = getArrayMode(m_currentInstruction->as<OpCall>().metadata(m_codeBlock).arrayProfile, Array::Write); > if (!arrayMode.isJSArray()) > return false; > switch (arrayMode.type()) { >@@ -2408,7 +2416,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > case Array::ArrayStorage: { > insertChecks(); > Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset))); >- set(VirtualRegister(resultOperand), arrayPop); >+ set(result, arrayPop); > return true; > } > >@@ -2491,19 +2499,19 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > for (unsigned i = 0; i < numArgs; ++i) > args.append(get(virtualRegisterForArgument(1 + i, registerOffset))); > >- Node* result; >+ Node* resultNode; > if (numArgs + 1 <= 3) { > while (args.size() < 3) > args.append(nullptr); >- result = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]); >+ resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]); > } else { > for (Node* node : args) > addVarArgChild(node); > addVarArgChild(nullptr); >- result = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction)); >+ resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction)); > } > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -2524,7 +2532,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset); > parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand)); > } >- set(VirtualRegister(resultOperand), parseInt); >+ set(result, parseInt); > return true; > } > >@@ -2537,7 +2545,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); > Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand)); > >- set(VirtualRegister(resultOperand), charCode); >+ set(result, charCode); > return true; > } > >@@ -2550,16 +2558,16 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); > Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand)); > >- set(VirtualRegister(resultOperand), charCode); >+ set(result, charCode); > return true; > } > case Clz32Intrinsic: { > insertChecks(); > if (argumentCountIncludingThis == 1) >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32))))); >+ set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32))))); > else { > Node* operand = get(virtualRegisterForArgument(1, registerOffset)); >- set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand)); >+ set(result, addToGraph(ArithClz32, operand)); > } > return true; > } >@@ -2571,7 +2579,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); > Node* charCode = addToGraph(StringFromCharCode, get(indexOperand)); > >- set(VirtualRegister(resultOperand), charCode); >+ set(result, charCode); > > return true; > } >@@ -2582,7 +2590,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); >- set(VirtualRegister(resultOperand), regExpExec); >+ set(result, regExpExec); > > return true; > } >@@ -2633,7 +2641,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset)); > Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset))); >- set(VirtualRegister(resultOperand), regExpExec); >+ set(result, regExpExec); > > return true; > } >@@ -2643,7 +2651,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); >- set(VirtualRegister(resultOperand), regExpMatch); >+ set(result, regExpMatch); > return true; > } > >@@ -2652,7 +2660,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2661,7 +2669,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2670,7 +2678,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); >+ set(result, addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); > return true; > } > >@@ -2679,7 +2687,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse))); >+ set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse))); > return true; > } > >@@ -2687,7 +2695,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > ASSERT(argumentCountIncludingThis == 2); > > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > >@@ -2738,8 +2746,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > >- Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); >+ set(result, resultNode); > return true; > } > >@@ -2748,8 +2756,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > return false; > > insertChecks(); >- Node* result = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); >+ set(result, resultNode); > return true; > } > >@@ -2759,7 +2767,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > case TruncIntrinsic: { > if (argumentCountIncludingThis == 1) { > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); > return true; > } > insertChecks(); >@@ -2776,7 +2784,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > op = ArithTrunc; > } > Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand); >- set(VirtualRegister(resultOperand), roundNode); >+ set(result, roundNode); > return true; > } > case IMulIntrinsic: { >@@ -2787,7 +2795,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset); > Node* left = get(leftOperand); > Node* right = get(rightOperand); >- set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right)); >+ set(result, addToGraph(ArithIMul, left, right)); > return true; > } > >@@ -2795,32 +2803,32 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (argumentCountIncludingThis != 1) > return false; > insertChecks(); >- set(VirtualRegister(resultOperand), addToGraph(ArithRandom)); >+ set(result, addToGraph(ArithRandom)); > return true; > } > > case DFGTrueIntrinsic: { > insertChecks(); >- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true))); >+ set(result, jsConstant(jsBoolean(true))); > return true; > } > > case FTLTrueIntrinsic: { > insertChecks(); >- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(isFTL(m_graph.m_plan.mode)))); >+ set(result, jsConstant(jsBoolean(isFTL(m_graph.m_plan.mode)))); > return true; > } > > case OSRExitIntrinsic: { > insertChecks(); > addToGraph(ForceOSRExit); >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantUndefined))); > return true; > } > > case IsFinalTierIntrinsic: { > insertChecks(); >- set(VirtualRegister(resultOperand), >+ set(result, > jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true))); > return true; > } >@@ -2832,7 +2840,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (node->hasHeapPrediction()) > node->setHeapPrediction(SpecInt32Only); > } >- set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined))); >+ set(result, addToGraph(JSConstant, OpInfo(m_constantUndefined))); > return true; > } > >@@ -2842,7 +2850,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* node = get(virtualRegisterForArgument(i, registerOffset)); > addToGraph(Phantom, Edge(node, Int32Use)); > } >- set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true))); >+ set(result, jsConstant(jsBoolean(true))); > return true; > } > >@@ -2852,9 +2860,9 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); > if (enableInt52()) >- set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand))); >+ set(result, addToGraph(FiatInt52, get(operand))); > else >- set(VirtualRegister(resultOperand), get(operand)); >+ set(result, get(operand)); > return true; > } > >@@ -2868,8 +2876,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* normalizedKey = addToGraph(NormalizeMapKey, key); > Node* hash = addToGraph(MapHash, normalizedKey); > Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash)); >- Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); >+ set(result, resultNode); > return true; > } > >@@ -2893,8 +2901,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > FrozenValue* frozenPointer = m_graph.freeze(sentinel); > Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket); >- Node* result = addToGraph(LogicalNot, invertedResult); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(LogicalNot, invertedResult); >+ set(result, resultNode); > return true; > } > >@@ -2908,7 +2916,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* normalizedKey = addToGraph(NormalizeMapKey, key); > Node* hash = addToGraph(MapHash, normalizedKey); > addToGraph(SetAdd, base, normalizedKey, hash); >- set(VirtualRegister(resultOperand), base); >+ set(result, base); > return true; > } > >@@ -2929,7 +2937,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addVarArgChild(value); > addVarArgChild(hash); > addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0)); >- set(VirtualRegister(resultOperand), base); >+ set(result, base); > return true; > } > >@@ -2940,8 +2948,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* map = get(virtualRegisterForArgument(1, registerOffset)); > UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse; >- Node* result = addToGraph(GetMapBucketHead, Edge(map, useKind)); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind)); >+ set(result, resultNode); > return true; > } > >@@ -2952,8 +2960,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); > BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map; >- Node* result = addToGraph(GetMapBucketNext, OpInfo(type), bucket); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket); >+ set(result, resultNode); > return true; > } > >@@ -2964,8 +2972,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); > BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map; >- Node* result = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket); >+ set(result, resultNode); > return true; > } > >@@ -2974,8 +2982,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); >- Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); >+ set(result, resultNode); > return true; > } > >@@ -2992,9 +3000,9 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addToGraph(Check, Edge(key, ObjectUse)); > Node* hash = addToGraph(MapHash, key); > Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); >- Node* result = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder); >+ Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder); > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -3012,9 +3020,9 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* hash = addToGraph(MapHash, key); > Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); > Node* invertedResult = addToGraph(IsEmpty, holder); >- Node* result = addToGraph(LogicalNot, invertedResult); >+ Node* resultNode = addToGraph(LogicalNot, invertedResult); > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -3032,9 +3040,9 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* hash = addToGraph(MapHash, key); > Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); > Node* invertedResult = addToGraph(IsEmpty, holder); >- Node* result = addToGraph(LogicalNot, invertedResult); >+ Node* resultNode = addToGraph(LogicalNot, invertedResult); > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -3051,7 +3059,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addToGraph(Check, Edge(key, ObjectUse)); > Node* hash = addToGraph(MapHash, key); > addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); >- set(VirtualRegister(resultOperand), base); >+ set(result, base); > return true; > } > >@@ -3075,7 +3083,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > addVarArgChild(Edge(value)); > addVarArgChild(Edge(hash, Int32Use)); > addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0)); >- set(VirtualRegister(resultOperand), base); >+ set(result, base); > return true; > } > >@@ -3094,8 +3102,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* object = get(virtualRegisterForArgument(0, registerOffset)); > Node* key = get(virtualRegisterForArgument(1, registerOffset)); >- Node* result = addToGraph(HasOwnProperty, object, key); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(HasOwnProperty, object, key); >+ set(result, resultNode); > return true; > } > >@@ -3112,8 +3120,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > Node* end = nullptr; > if (argumentCountIncludingThis > 2) > end = get(virtualRegisterForArgument(2, registerOffset)); >- Node* result = addToGraph(StringSlice, thisString, start, end); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(StringSlice, thisString, start, end); >+ set(result, resultNode); > return true; > } > >@@ -3126,8 +3134,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* thisString = get(virtualRegisterForArgument(0, registerOffset)); >- Node* result = addToGraph(ToLowerCase, thisString); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(ToLowerCase, thisString); >+ set(result, resultNode); > return true; > } > >@@ -3141,12 +3149,12 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > insertChecks(); > Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset)); > if (argumentCountIncludingThis == 1) { >- Node* result = addToGraph(ToString, thisNumber); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(ToString, thisNumber); >+ set(result, resultNode); > } else { > Node* radix = get(virtualRegisterForArgument(1, registerOffset)); >- Node* result = addToGraph(NumberToStringWithRadix, thisNumber, radix); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix); >+ set(result, resultNode); > } > return true; > } >@@ -3157,8 +3165,8 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > > insertChecks(); > Node* input = get(virtualRegisterForArgument(1, registerOffset)); >- Node* result = addToGraph(NumberIsInteger, input); >- set(VirtualRegister(resultOperand), result); >+ Node* resultNode = addToGraph(NumberIsInteger, input); >+ set(result, resultNode); > return true; > } > >@@ -3170,7 +3178,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > if (!isFTL(m_graph.m_plan.mode)) > return false; > insertChecks(); >- set(VirtualRegister(resultOperand), >+ set(result, > addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo())); > return true; > #else >@@ -3185,7 +3193,7 @@ bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrin > } > > template<typename ChecksFunctor> >-bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) >+bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) > { > if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount)) > return false; >@@ -3198,13 +3206,13 @@ bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const > ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments."); > > insertChecks(); >- addCall(resultOperand, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction); >+ addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction); > return true; > } > > > template<typename ChecksFunctor> >-bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks) >+bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks) > { > switch (variant.intrinsic()) { > case TypedArrayByteLengthIntrinsic: { >@@ -3224,14 +3232,14 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre > Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode); > > if (!logSize) { >- set(VirtualRegister(resultOperand), lengthNode); >+ set(result, lengthNode); > return true; > } > > // We can use a BitLShift here because typed arrays will never have a byteLength > // that overflows int32. > Node* shiftNode = jsConstant(jsNumber(logSize)); >- set(VirtualRegister(resultOperand), addToGraph(BitLShift, lengthNode, shiftNode)); >+ set(result, addToGraph(BitLShift, lengthNode, shiftNode)); > > return true; > } >@@ -3248,7 +3256,7 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre > ASSERT(arrayType != Array::Generic); > }); > >- set(VirtualRegister(resultOperand), addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); >+ set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); > > return true; > >@@ -3266,7 +3274,7 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre > ASSERT(arrayType != Array::Generic); > }); > >- set(VirtualRegister(resultOperand), addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); >+ set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); > > return true; > } >@@ -3297,11 +3305,11 @@ bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType pre > // OK, only one prototype is found. We perform constant folding here. > // This information is important for super's constructor call to get new.target constant. > if (prototype && canFold) { >- set(VirtualRegister(resultOperand), weakJSConstant(prototype)); >+ set(result, weakJSConstant(prototype)); > return true; > } > >- set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode)); >+ set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode)); > return true; > } > >@@ -3318,7 +3326,7 @@ static void blessCallDOMGetter(Node* node) > node->clearFlags(NodeMustGenerate); > } > >-bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction) >+bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction) > { > if (!variant.domAttribute()) > return false; >@@ -3358,11 +3366,11 @@ bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant& > } else > callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode); > blessCallDOMGetter(callDOMGetterNode); >- set(VirtualRegister(resultOperand), callDOMGetterNode); >+ set(result, callDOMGetterNode); > return true; > } > >-bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType prediction, Node* base, GetByIdStatus getById) >+bool ByteCodeParser::handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType prediction, Node* base, GetByIdStatus getById) > { > if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) > return false; >@@ -3385,16 +3393,16 @@ bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType > // would recompile. But if we can fold it here, we avoid the exit. > m_graph.freeze(getById.moduleEnvironment()); > if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) { >- set(VirtualRegister(resultOperand), weakJSConstant(value)); >+ set(result, weakJSConstant(value)); > return true; > } >- set(VirtualRegister(resultOperand), addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment()))); >+ set(result, addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment()))); > return true; > } > > template<typename ChecksFunctor> > bool ByteCodeParser::handleTypedArrayConstructor( >- int resultOperand, InternalFunction* function, int registerOffset, >+ VirtualRegister result, InternalFunction* function, int registerOffset, > int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks) > { > if (!isTypedView(type)) >@@ -3444,21 +3452,21 @@ bool ByteCodeParser::handleTypedArrayConstructor( > return false; > > insertChecks(); >- set(VirtualRegister(resultOperand), >+ set(result, > addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > > template<typename ChecksFunctor> > bool ByteCodeParser::handleConstantInternalFunction( >- Node* callTargetNode, int resultOperand, InternalFunction* function, int registerOffset, >+ Node* callTargetNode, VirtualRegister result, InternalFunction* function, int registerOffset, > int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks) > { > VERBOSE_LOG(" Handling constant internal function ", JSValue(function), "\n"); > > // It so happens that the code below assumes that the result operand is valid. It's extremely > // unlikely that the result operand would be invalid - you'd have to call this via a setter call. >- if (!VirtualRegister(resultOperand).isValid()) >+ if (!result.isValid()) > return false; > > if (kind == CodeForConstruct) { >@@ -3476,14 +3484,14 @@ bool ByteCodeParser::handleConstantInternalFunction( > > insertChecks(); > if (argumentCountIncludingThis == 2) { >- set(VirtualRegister(resultOperand), >+ set(result, > addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset)))); > return true; > } > > for (int i = 1; i < argumentCountIncludingThis; ++i) > addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); >- set(VirtualRegister(resultOperand), >+ set(result, > addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(argumentCountIncludingThis - 1))); > return true; > } >@@ -3494,9 +3502,9 @@ bool ByteCodeParser::handleConstantInternalFunction( > > insertChecks(); > if (argumentCountIncludingThis <= 1) >- set(VirtualRegister(resultOperand), jsConstant(jsNumber(0))); >+ set(result, jsConstant(jsNumber(0))); > else >- set(VirtualRegister(resultOperand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); >+ set(result, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); > > return true; > } >@@ -3504,17 +3512,17 @@ bool ByteCodeParser::handleConstantInternalFunction( > if (function->classInfo() == StringConstructor::info()) { > insertChecks(); > >- Node* result; >+ Node* resultNode; > > if (argumentCountIncludingThis <= 1) >- result = jsConstant(m_vm->smallStrings.emptyString()); >+ resultNode = jsConstant(m_vm->smallStrings.emptyString()); > else >- result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset))); >+ resultNode = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset))); > > if (kind == CodeForConstruct) >- result = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), result); >+ resultNode = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), resultNode); > >- set(VirtualRegister(resultOperand), result); >+ set(result, resultNode); > return true; > } > >@@ -3522,20 +3530,20 @@ bool ByteCodeParser::handleConstantInternalFunction( > if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) { > insertChecks(); > >- Node* result; >+ Node* resultNode; > if (argumentCountIncludingThis <= 1) >- result = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor()))); >+ resultNode = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor()))); > else >- result = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))); >- set(VirtualRegister(resultOperand), result); >+ resultNode = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))); >+ set(result, resultNode); > return true; > } > > for (unsigned typeIndex = 0; typeIndex < NumberOfTypedArrayTypes; ++typeIndex) { >- bool result = handleTypedArrayConstructor( >- resultOperand, function, registerOffset, argumentCountIncludingThis, >+ bool handled = handleTypedArrayConstructor( >+ result, function, registerOffset, argumentCountIncludingThis, > indexToTypedArrayType(typeIndex), insertChecks); >- if (result) >+ if (handled) > return true; > } > >@@ -3968,7 +3976,7 @@ Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVarian > } > > void ByteCodeParser::handleGetById( >- int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber, >+ VirtualRegister destination, SpeculatedType prediction, Node* base, unsigned identifierNumber, > GetByIdStatus getByIdStatus, AccessType type, unsigned instructionSize) > { > // Attempt to reduce the set of things in the GetByIdStatus. >@@ -3996,7 +4004,7 @@ void ByteCodeParser::handleGetById( > getById = getByIdStatus.makesCalls() ? GetByIdDirectFlush : GetByIdDirect; > > if (getById != TryGetById && getByIdStatus.isModuleNamespace()) { >- if (handleModuleNamespaceLoad(destinationOperand, prediction, base, getByIdStatus)) { >+ if (handleModuleNamespaceLoad(destination, prediction, base, getByIdStatus)) { > if (UNLIKELY(m_graph.compilation())) > m_graph.compilation()->noticeInlinedGetById(); > return; >@@ -4010,7 +4018,7 @@ void ByteCodeParser::handleGetById( > ASSERT(!getByIdStatus.makesCalls()); > GetByIdVariant variant = getByIdStatus[0]; > ASSERT(variant.domAttribute()); >- if (handleDOMJITGetter(destinationOperand, variant, base, identifierNumber, prediction)) { >+ if (handleDOMJITGetter(destination, variant, base, identifierNumber, prediction)) { > if (UNLIKELY(m_graph.compilation())) > m_graph.compilation()->noticeInlinedGetById(); > return; >@@ -4019,7 +4027,7 @@ void ByteCodeParser::handleGetById( > > ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !getByIdStatus.makesCalls()); > if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4032,7 +4040,7 @@ void ByteCodeParser::handleGetById( > if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode) > || !Options::usePolymorphicAccessInlining() > || getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4046,7 +4054,7 @@ void ByteCodeParser::handleGetById( > // of checks and those checks are not watchpointable. > for (const GetByIdVariant& variant : getByIdStatus.variants()) { > if (variant.intrinsic() != NoIntrinsic) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4061,7 +4069,7 @@ void ByteCodeParser::handleGetById( > > GetByOffsetMethod method = planLoad(variant.conditionSet()); > if (!method) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4076,7 +4084,7 @@ void ByteCodeParser::handleGetById( > MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); > data->cases = cases; > data->identifierNumber = identifierNumber; >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base)); > return; > } >@@ -4088,7 +4096,7 @@ void ByteCodeParser::handleGetById( > > Node* loadedValue = load(prediction, base, identifierNumber, variant); > if (!loadedValue) { >- set(VirtualRegister(destinationOperand), >+ set(destination, > addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); > return; > } >@@ -4098,13 +4106,13 @@ void ByteCodeParser::handleGetById( > > ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !variant.callLinkStatus()); > if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) { >- set(VirtualRegister(destinationOperand), loadedValue); >+ set(destination, loadedValue); > return; > } > > Node* getter = addToGraph(GetGetter, loadedValue); > >- if (handleIntrinsicGetter(destinationOperand, prediction, variant, base, >+ if (handleIntrinsicGetter(destination, prediction, variant, base, > [&] () { > addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter); > })) { >@@ -4150,7 +4158,7 @@ void ByteCodeParser::handleGetById( > addToGraph(ExitOK); > > handleCall( >- destinationOperand, Call, InlineCallFrame::GetterCall, instructionSize, >+ destination, Call, InlineCallFrame::GetterCall, instructionSize, > getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction); > } > >@@ -4338,7 +4346,7 @@ void ByteCodeParser::handlePutById( > addToGraph(ExitOK); > > handleCall( >- VirtualRegister().offset(), Call, InlineCallFrame::SetterCall, >+ VirtualRegister(), Call, InlineCallFrame::SetterCall, > OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset, > *variant.callLinkStatus(), SpecOther); > return; >@@ -4376,7 +4384,7 @@ static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutI > // Doesn't allow using `continue`. > #define NEXT_OPCODE(name) \ > if (true) { \ >- m_currentIndex += OPCODE_LENGTH(name); \ >+ m_currentIndex += currentInstruction->size(); \ > goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \ > } else \ > WTF_CONCAT(NEXT_OPCODE_, __LINE__): \ >@@ -4406,7 +4414,7 @@ static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutI > > void ByteCodeParser::parseBlock(unsigned limit) > { >- Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin(); >+ auto& instructions = m_inlineStackTop->m_codeBlock->instructions(); > unsigned blockBegin = m_currentIndex; > > // If we are the first basic block, introduce markers for arguments. This allows >@@ -4458,9 +4466,9 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > // Switch on the current bytecode opcode. >- Instruction* currentInstruction = instructionsBegin + m_currentIndex; >+ const Instruction* currentInstruction = instructions.at(m_currentIndex).ptr(); > m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls. >- OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode); >+ OpcodeID opcodeID = currentInstruction->opcodeID(); > > VERBOSE_LOG(" parsing ", currentCodeOrigin(), ": ", opcodeID, "\n"); > >@@ -4485,8 +4493,9 @@ void ByteCodeParser::parseBlock(unsigned limit) > case op_to_this: { > Node* op1 = getThis(); > if (op1->op() != ToThis) { >- Structure* cachedStructure = currentInstruction[2].u.structure.get(); >- if (currentInstruction[3].u.toThisStatus != ToThisOK >+ auto metadata = currentInstruction->as<OpToThis>().metadata(m_codeBlock); >+ Structure* cachedStructure = metadata.cachedStructure.get(); >+ if (metadata.toThisStatus != ToThisOK > || !cachedStructure > || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis > || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) >@@ -4504,12 +4513,12 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_create_this: { >- auto& bytecode = *reinterpret_cast<OpCreateThis*>(currentInstruction); >- Node* callee = get(VirtualRegister(bytecode.callee())); >+ auto bytecode = currentInstruction->as<OpCreateThis>(); >+ Node* callee = get(VirtualRegister(bytecode.callee)); > > JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm); > if (!function) { >- JSCell* cachedFunction = bytecode.cachedCallee().unvalidatedGet(); >+ JSCell* cachedFunction = bytecode.metadata(m_codeBlock).cachedCallee.unvalidatedGet(); > if (cachedFunction > && cachedFunction != JSCell::seenMultipleCalleeObjects() > && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { >@@ -4548,243 +4557,256 @@ void ByteCodeParser::parseBlock(unsigned limit) > ASSERT(isInlineOffset(knownPolyProtoOffset)); > addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype)); > } >- set(VirtualRegister(bytecode.dst()), object); >+ set(VirtualRegister(bytecode.dst), object); > alreadyEmitted = true; > } > } > } > } > if (!alreadyEmitted) { >- set(VirtualRegister(bytecode.dst()), >- addToGraph(CreateThis, OpInfo(bytecode.inlineCapacity()), callee)); >+ set(VirtualRegister(bytecode.dst), >+ addToGraph(CreateThis, OpInfo(bytecode.inlineCapacity), callee)); > } > NEXT_OPCODE(op_create_this); > } > > case op_new_object: { >- set(VirtualRegister(currentInstruction[1].u.operand), >+ auto bytecode = currentInstruction->as<OpNewObject>(); >+ set(bytecode.dst, > addToGraph(NewObject, >- OpInfo(m_graph.registerStructure(currentInstruction[3].u.objectAllocationProfile->structure())))); >+ OpInfo(m_graph.registerStructure(bytecode.metadata(m_codeBlock).allocationProfile.structure())))); > NEXT_OPCODE(op_new_object); > } > > case op_new_array: { >- int startOperand = currentInstruction[2].u.operand; >- int numOperands = currentInstruction[3].u.operand; >- ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile; >+ auto bytecode = currentInstruction->as<OpNewArray>(); >+ int startOperand = bytecode.argv.offset(); >+ int numOperands = bytecode.argc; >+ ArrayAllocationProfile& profile = bytecode.metadata(m_codeBlock).allocationProfile; > for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) > addVarArgChild(get(VirtualRegister(operandIdx))); >- unsigned vectorLengthHint = std::max<unsigned>(profile->vectorLengthHint(), numOperands); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(vectorLengthHint))); >+ unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHint(), numOperands); >+ set(bytecode.dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingType()), OpInfo(vectorLengthHint))); > NEXT_OPCODE(op_new_array); > } > > case op_new_array_with_spread: { >- int startOperand = currentInstruction[2].u.operand; >- int numOperands = currentInstruction[3].u.operand; >- const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(currentInstruction[4].u.unsignedValue); >+ auto bytecode = currentInstruction->as<OpNewArrayWithSpread>(); >+ int startOperand = bytecode.argv.offset(); >+ int numOperands = bytecode.argc; >+ const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.bitVector); > for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) > addVarArgChild(get(VirtualRegister(operandIdx))); > > BitVector* copy = m_graph.m_bitVectors.add(bitVector); > ASSERT(*copy == bitVector); > >- set(VirtualRegister(currentInstruction[1].u.operand), >+ set(bytecode.dst, > addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy))); > NEXT_OPCODE(op_new_array_with_spread); > } > > case op_spread: { >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(Spread, get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpSpread>(); >+ set(bytecode.dst, >+ addToGraph(Spread, get(bytecode.argument))); > NEXT_OPCODE(op_spread); > } > > case op_new_array_with_size: { >- int lengthOperand = currentInstruction[2].u.operand; >- ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile; >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand)))); >+ auto bytecode = currentInstruction->as<OpNewArrayWithSize>(); >+ ArrayAllocationProfile& profile = bytecode.metadata(m_codeBlock).allocationProfile; >+ set(bytecode.dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingType()), get(bytecode.length))); > NEXT_OPCODE(op_new_array_with_size); > } > > case op_new_array_buffer: { >- auto& bytecode = *reinterpret_cast<OpNewArrayBuffer*>(currentInstruction); >+ auto bytecode = currentInstruction->as<OpNewArrayBuffer>(); > // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we > // cannot allocate from compilation threads. > WTF::loadLoadFence(); >- FrozenValue* frozen = get(VirtualRegister(bytecode.immutableButterfly()))->constant(); >+ FrozenValue* frozen = get(VirtualRegister(bytecode.immutableButterfly))->constant(); > WTF::loadLoadFence(); > JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>(); > NewArrayBufferData data { }; > data.indexingMode = immutableButterfly->indexingMode(); > data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength(); > >- set(VirtualRegister(bytecode.dst()), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord))); >+ set(VirtualRegister(bytecode.dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord))); > NEXT_OPCODE(op_new_array_buffer); > } > > case op_new_regexp: { >- VirtualRegister regExpRegister(currentInstruction[2].u.operand); >- ASSERT(regExpRegister.isConstant()); >- FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(regExpRegister.offset())); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0)))); >+ auto bytecode = currentInstruction->as<OpNewRegexp>(); >+ ASSERT(bytecode.regexp.isConstant()); >+ FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.regexp.offset())); >+ set(bytecode.dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0)))); > NEXT_OPCODE(op_new_regexp); > } > > case op_get_rest_length: { >+ auto bytecode = currentInstruction->as<OpGetRestLength>(); > InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); > Node* length; > if (inlineCallFrame && !inlineCallFrame->isVarargs()) { > unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1; >- unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue; > JSValue restLength; >- if (argumentsLength <= numParamsToSkip) >+ if (argumentsLength <= bytecode.numParametersToSkip) > restLength = jsNumber(0); > else >- restLength = jsNumber(argumentsLength - numParamsToSkip); >+ restLength = jsNumber(argumentsLength - bytecode.numParametersToSkip); > > length = jsConstant(restLength); > } else >- length = addToGraph(GetRestLength, OpInfo(currentInstruction[2].u.unsignedValue)); >- set(VirtualRegister(currentInstruction[1].u.operand), length); >+ length = addToGraph(GetRestLength, OpInfo(bytecode.numParametersToSkip)); >+ set(bytecode.dst, length); > NEXT_OPCODE(op_get_rest_length); > } > > case op_create_rest: { >+ auto bytecode = currentInstruction->as<OpCreateRest>(); > noticeArgumentsUse(); >- Node* arrayLength = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(CreateRest, OpInfo(currentInstruction[3].u.unsignedValue), arrayLength)); >+ Node* arrayLength = get(bytecode.arraySize); >+ set(bytecode.dst, >+ addToGraph(CreateRest, OpInfo(bytecode.numParametersToSkip), arrayLength)); > NEXT_OPCODE(op_create_rest); > } > > // === Bitwise operations === > > case op_bitand: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBitand>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitAnd, op1, op2)); > NEXT_OPCODE(op_bitand); > } > > case op_bitor: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBitor>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitOr, op1, op2)); > NEXT_OPCODE(op_bitor); > } > > case op_bitxor: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBitxor>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitXor, op1, op2)); > NEXT_OPCODE(op_bitxor); > } > > case op_rshift: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(BitRShift, op1, op2)); >+ auto bytecode = currentInstruction->as<OpRshift>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitRShift, op1, op2)); > NEXT_OPCODE(op_rshift); > } > > case op_lshift: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(BitLShift, op1, op2)); >+ auto bytecode = currentInstruction->as<OpLshift>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitLShift, op1, op2)); > NEXT_OPCODE(op_lshift); > } > > case op_urshift: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(BitURShift, op1, op2)); >+ auto bytecode = currentInstruction->as<OpUrshift>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(BitURShift, op1, op2)); > NEXT_OPCODE(op_urshift); > } > > case op_unsigned: { >- set(VirtualRegister(currentInstruction[1].u.operand), >- makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand))))); >+ auto bytecode = currentInstruction->as<OpUnsigned>(); >+ set(bytecode.dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.operand)))); > NEXT_OPCODE(op_unsigned); > } > > // === Increment/Decrement opcodes === > > case op_inc: { >- int srcDst = currentInstruction[1].u.operand; >- VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); >- Node* op = get(srcDstVirtualRegister); >- set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); >+ auto bytecode = currentInstruction->as<OpInc>(); >+ Node* op = get(bytecode.srcDst); >+ set(bytecode.srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); > NEXT_OPCODE(op_inc); > } > > case op_dec: { >- int srcDst = currentInstruction[1].u.operand; >- VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); >- Node* op = get(srcDstVirtualRegister); >- set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); >+ auto bytecode = currentInstruction->as<OpDec>(); >+ Node* op = get(bytecode.srcDst); >+ set(bytecode.srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); > NEXT_OPCODE(op_dec); > } > > // === Arithmetic operations === > > case op_add: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpAdd>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > if (op1->hasNumberResult() && op2->hasNumberResult()) >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2))); >+ set(bytecode.dst, makeSafe(addToGraph(ArithAdd, op1, op2))); > else >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2))); >+ set(bytecode.dst, makeSafe(addToGraph(ValueAdd, op1, op2))); > NEXT_OPCODE(op_add); > } > > case op_sub: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2))); >+ auto bytecode = currentInstruction->as<OpSub>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, makeSafe(addToGraph(ArithSub, op1, op2))); > NEXT_OPCODE(op_sub); > } > > case op_negate: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpNegate>(); >+ Node* op1 = get(VirtualRegister(bytecode.operand)); > if (op1->hasNumberResult()) >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1))); >+ set(bytecode.dst, makeSafe(addToGraph(ArithNegate, op1))); > else >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueNegate, op1))); >+ set(bytecode.dst, makeSafe(addToGraph(ValueNegate, op1))); > NEXT_OPCODE(op_negate); > } > > case op_mul: { > // Multiply requires that the inputs are not truncated, unfortunately. >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2))); >+ auto bytecode = currentInstruction->as<OpMul>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, makeSafe(addToGraph(ArithMul, op1, op2))); > NEXT_OPCODE(op_mul); > } > > case op_mod: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2))); >+ auto bytecode = currentInstruction->as<OpMod>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, makeSafe(addToGraph(ArithMod, op1, op2))); > NEXT_OPCODE(op_mod); > } > > case op_pow: { > // FIXME: ArithPow(Untyped, Untyped) should be supported as the same to ArithMul, ArithSub etc. > // https://bugs.webkit.org/show_bug.cgi?id=160012 >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithPow, op1, op2)); >+ auto bytecode = currentInstruction->as<OpPow>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(ArithPow, op1, op2)); > NEXT_OPCODE(op_pow); > } > > case op_div: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2))); >+ auto bytecode = currentInstruction->as<OpDiv>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, makeDivSafe(addToGraph(ArithDiv, op1, op2))); > NEXT_OPCODE(op_div); > } > >@@ -4798,43 +4820,46 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_mov: { >- Node* op = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), op); >+ auto bytecode = currentInstruction->as<OpMov>(); >+ Node* op = get(bytecode.src); >+ set(bytecode.dst, op); > NEXT_OPCODE(op_mov); > } > > case op_check_tdz: { >- addToGraph(CheckNotEmpty, get(VirtualRegister(currentInstruction[1].u.operand))); >+ auto bytecode = currentInstruction->as<OpCheckTdz>(); >+ addToGraph(CheckNotEmpty, get(bytecode.target)); > NEXT_OPCODE(op_check_tdz); > } > > case op_overrides_has_instance: { >- auto& bytecode = *reinterpret_cast<OpOverridesHasInstance*>(currentInstruction); >+ auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); > JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction(); > >- Node* constructor = get(VirtualRegister(bytecode.constructor())); >- Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue())); >+ Node* constructor = get(VirtualRegister(bytecode.constructor)); >+ Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue)); > >- set(VirtualRegister(bytecode.dst()), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue)); >+ set(VirtualRegister(bytecode.dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue)); > NEXT_OPCODE(op_overrides_has_instance); > } > > case op_identity_with_profile: { >- Node* src = get(VirtualRegister(currentInstruction[1].u.operand)); >- SpeculatedType speculation = static_cast<SpeculatedType>(currentInstruction[2].u.operand) << 32 | static_cast<SpeculatedType>(currentInstruction[3].u.operand); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IdentityWithProfile, OpInfo(speculation), src)); >+ auto bytecode = currentInstruction->as<OpIdentityWithProfile>(); >+ Node* srcDst = get(bytecode.srcDst); >+ SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.topProfile) << 32 | static_cast<SpeculatedType>(bytecode.bottomProfile); >+ set(bytecode.srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst)); > NEXT_OPCODE(op_identity_with_profile); > } > > case op_instanceof: { >- auto& bytecode = *reinterpret_cast<OpInstanceof*>(currentInstruction); >+ auto bytecode = currentInstruction->as<OpInstanceof>(); > > InstanceOfStatus status = InstanceOfStatus::computeFor( > m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap, > m_currentIndex); > >- Node* value = get(VirtualRegister(bytecode.value())); >- Node* prototype = get(VirtualRegister(bytecode.prototype())); >+ Node* value = get(bytecode.value); >+ Node* prototype = get(bytecode.prototype); > > // Only inline it if it's Simple with a commonPrototype; bottom/top or variable > // prototypes both get handled by the IC. This makes sense for bottom (unprofiled) >@@ -4862,86 +4887,96 @@ void ByteCodeParser::parseBlock(unsigned limit) > > if (allOK) { > Node* match = addToGraph(MatchStructure, OpInfo(data), value); >- set(VirtualRegister(bytecode.dst()), match); >+ set(bytecode.dst, match); > NEXT_OPCODE(op_instanceof); > } > } > >- set(VirtualRegister(bytecode.dst()), addToGraph(InstanceOf, value, prototype)); >+ set(bytecode.dst, addToGraph(InstanceOf, value, prototype)); > NEXT_OPCODE(op_instanceof); > } > > case op_instanceof_custom: { >- auto& bytecode = *reinterpret_cast<OpInstanceofCustom*>(currentInstruction); >- Node* value = get(VirtualRegister(bytecode.value())); >- Node* constructor = get(VirtualRegister(bytecode.constructor())); >- Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue())); >- set(VirtualRegister(bytecode.dst()), addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue)); >+ auto bytecode = currentInstruction->as<OpInstanceofCustom>(); >+ Node* value = get(bytecode.value); >+ Node* constructor = get(bytecode.constructor); >+ Node* hasInstanceValue = get(bytecode.hasInstanceValue); >+ set(bytecode.dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue)); > NEXT_OPCODE(op_instanceof_custom); > } > case op_is_empty: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsEmpty, value)); >+ auto bytecode = currentInstruction->as<OpIsEmpty>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsEmpty, value)); > NEXT_OPCODE(op_is_empty); > } > case op_is_undefined: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value)); >+ auto bytecode = currentInstruction->as<OpIsUndefined>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsUndefined, value)); > NEXT_OPCODE(op_is_undefined); > } > > case op_is_boolean: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value)); >+ auto bytecode = currentInstruction->as<OpIsBoolean>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsBoolean, value)); > NEXT_OPCODE(op_is_boolean); > } > > case op_is_number: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value)); >+ auto bytecode = currentInstruction->as<OpIsNumber>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsNumber, value)); > NEXT_OPCODE(op_is_number); > } > > case op_is_cell_with_type: { >- JSType type = static_cast<JSType>(currentInstruction[3].u.operand); >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsCellWithType, OpInfo(type), value)); >+ auto bytecode = currentInstruction->as<OpIsCellWithType>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsCellWithType, OpInfo(bytecode.type), value)); > NEXT_OPCODE(op_is_cell_with_type); > } > > case op_is_object: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value)); >+ auto bytecode = currentInstruction->as<OpIsObject>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsObject, value)); > NEXT_OPCODE(op_is_object); > } > > case op_is_object_or_null: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value)); >+ auto bytecode = currentInstruction->as<OpIsObjectOrNull>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsObjectOrNull, value)); > NEXT_OPCODE(op_is_object_or_null); > } > > case op_is_function: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value)); >+ auto bytecode = currentInstruction->as<OpIsFunction>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(IsFunction, value)); > NEXT_OPCODE(op_is_function); > } > > case op_not: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value)); >+ auto bytecode = currentInstruction->as<OpNot>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(LogicalNot, value)); > NEXT_OPCODE(op_not); > } > > case op_to_primitive: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value)); >+ auto bytecode = currentInstruction->as<OpToPrimitive>(); >+ Node* value = get(bytecode.src); >+ set(bytecode.dst, addToGraph(ToPrimitive, value)); > NEXT_OPCODE(op_to_primitive); > } > > case op_strcat: { >- int startOperand = currentInstruction[2].u.operand; >- int numOperands = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpStrcat>(); >+ int startOperand = bytecode.src.offset(); >+ int numOperands = bytecode.count; > #if CPU(X86) > // X86 doesn't have enough registers to compile MakeRope with three arguments. The > // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever, >@@ -4966,104 +5001,116 @@ void ByteCodeParser::parseBlock(unsigned limit) > ASSERT(indexInOperands < maxArguments); > operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx)); > } >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(StrCat, operands[0], operands[1], operands[2])); >+ set(bytecode.dst, addToGraph(StrCat, operands[0], operands[1], operands[2])); > NEXT_OPCODE(op_strcat); > } > > case op_less: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2)); >+ auto bytecode = currentInstruction->as<OpLess>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareLess, op1, op2)); > NEXT_OPCODE(op_less); > } > > case op_lesseq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpLesseq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareLessEq, op1, op2)); > NEXT_OPCODE(op_lesseq); > } > > case op_greater: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2)); >+ auto bytecode = currentInstruction->as<OpGreater>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareGreater, op1, op2)); > NEXT_OPCODE(op_greater); > } > > case op_greatereq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpGreatereq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareGreaterEq, op1, op2)); > NEXT_OPCODE(op_greatereq); > } > > case op_below: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareBelow, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBelow>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareBelow, op1, op2)); > NEXT_OPCODE(op_below); > } > > case op_beloweq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareBelowEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpBeloweq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareBelowEq, op1, op2)); > NEXT_OPCODE(op_beloweq); > } > > case op_eq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpEq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareEq, op1, op2)); > NEXT_OPCODE(op_eq); > } > > case op_eq_null: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpEqNull>(); >+ Node* value = get(bytecode.operand); > Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, value, nullConstant)); >+ set(bytecode.dst, addToGraph(CompareEq, value, nullConstant)); > NEXT_OPCODE(op_eq_null); > } > > case op_stricteq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2)); >+ auto bytecode = currentInstruction->as<OpStricteq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(CompareStrictEq, op1, op2)); > NEXT_OPCODE(op_stricteq); > } > > case op_neq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); >+ auto bytecode = currentInstruction->as<OpNeq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); >+ set(bytecode.dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); > NEXT_OPCODE(op_neq); > } > > case op_neq_null: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpNeqNull>(); >+ Node* value = get(bytecode.operand); > Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant))); >+ set(bytecode.dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant))); > NEXT_OPCODE(op_neq_null); > } > > case op_nstricteq: { >- Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpNstricteq>(); >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* invertedResult; > invertedResult = addToGraph(CompareStrictEq, op1, op2); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult)); >+ set(bytecode.dst, addToGraph(LogicalNot, invertedResult)); > NEXT_OPCODE(op_nstricteq); > } > > // === Property access operations === > > case op_get_by_val: { >+ auto bytecode = currentInstruction->as<OpGetByVal>(); > SpeculatedType prediction = getPredictionWithoutOSRExit(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); > bool compiledAsGetById = false; > GetByIdStatus getByIdStatus; > unsigned identifierNumber = 0; >@@ -5097,9 +5144,9 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > if (compiledAsGetById) >- handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, OPCODE_LENGTH(op_get_by_val)); >+ handleGetById(bytecode.dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, OPCODE_LENGTH(op_get_by_val)); > else { >- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read); >+ ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_codeBlock).arrayProfile, Array::Read); > // FIXME: We could consider making this not vararg, since it only uses three child > // slots. > // https://bugs.webkit.org/show_bug.cgi?id=184192 >@@ -5108,87 +5155,40 @@ void ByteCodeParser::parseBlock(unsigned limit) > addVarArgChild(0); // Leave room for property storage. > Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction)); > m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic. >- set(VirtualRegister(currentInstruction[1].u.operand), getByVal); >+ set(bytecode.dst, getByVal); > } > > NEXT_OPCODE(op_get_by_val); > } > > case op_get_by_val_with_this: { >+ auto bytecode = currentInstruction->as<OpGetByValWithThis>(); > SpeculatedType prediction = getPrediction(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[4].u.operand)); >+ Node* base = get(bytecode.base); >+ Node* thisValue = get(bytecode.thisValue); >+ Node* property = get(bytecode.property); > Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property); >- set(VirtualRegister(currentInstruction[1].u.operand), getByValWithThis); >+ set(bytecode.dst, getByValWithThis); > > NEXT_OPCODE(op_get_by_val_with_this); > } > > case op_put_by_val_direct: >- case op_put_by_val: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); >- bool isDirect = opcodeID == op_put_by_val_direct; >- bool compiledAsPutById = false; >- { >- unsigned identifierNumber = std::numeric_limits<unsigned>::max(); >- PutByIdStatus putByIdStatus; >- { >- ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); >- ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo; >- // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. >- // At that time, there is no information. >- if (byValInfo >- && byValInfo->stubInfo >- && !byValInfo->tookSlowPath >- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) >- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType) >- && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { >- compiledAsPutById = true; >- identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); >- UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; >- >- if (Symbol* symbol = byValInfo->cachedSymbol.get()) { >- FrozenValue* frozen = m_graph.freezeStrong(symbol); >- addToGraph(CheckCell, OpInfo(frozen), property); >- } else { >- ASSERT(!uid->isSymbol()); >- addToGraph(CheckStringIdent, OpInfo(uid), property); >- } >- >- putByIdStatus = PutByIdStatus::computeForStubInfo( >- locker, m_inlineStackTop->m_profiledBlock, >- byValInfo->stubInfo, currentCodeOrigin(), uid); >- >- } >- } >- >- if (compiledAsPutById) >- handlePutById(base, identifierNumber, value, putByIdStatus, isDirect); >- } >- >- if (!compiledAsPutById) { >- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Write); >- >- addVarArgChild(base); >- addVarArgChild(property); >- addVarArgChild(value); >- addVarArgChild(0); // Leave room for property storage. >- addVarArgChild(0); // Leave room for length. >- addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); >- } >+ handlePutByVal(currentInstruction->as<OpPutByValDirect>()); >+ NEXT_OPCODE(op_put_by_val_direct); > >+ case op_put_by_val: { >+ handlePutByVal(currentInstruction->as<OpPutByVal>()); > NEXT_OPCODE(op_put_by_val); > } > > case op_put_by_val_with_this: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* value = get(VirtualRegister(currentInstruction[4].u.operand)); >+ auto bytecode = currentInstruction->as<OpPutByValWithThis>(); >+ Node* base = get(bytecode.base); >+ Node* thisValue = get(bytecode.thisValue); >+ Node* property = get(bytecode.property); >+ Node* value = get(bytecode.value); > > addVarArgChild(base); > addVarArgChild(thisValue); >@@ -5200,10 +5200,11 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_define_data_property: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* attributes = get(VirtualRegister(currentInstruction[4].u.operand)); >+ auto bytecode = currentInstruction->as<OpDefineDataProperty>(); >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); >+ Node* value = get(bytecode.value); >+ Node* attributes = get(bytecode.attributes); > > addVarArgChild(base); > addVarArgChild(property); >@@ -5215,11 +5216,12 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_define_accessor_property: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* getter = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* setter = get(VirtualRegister(currentInstruction[4].u.operand)); >- Node* attributes = get(VirtualRegister(currentInstruction[5].u.operand)); >+ auto bytecode = currentInstruction->as<OpDefineAccessorProperty>(); >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); >+ Node* getter = get(bytecode.getter); >+ Node* setter = get(bytecode.setter); >+ Node* attributes = get(bytecode.attributes); > > addVarArgChild(base); > addVarArgChild(property); >@@ -5233,14 +5235,14 @@ void ByteCodeParser::parseBlock(unsigned limit) > > case op_get_by_id_direct: > case op_try_get_by_id: >- case op_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: >- case op_get_array_length: { >+ // TODO >+ ASSERT_NOT_REACHED(); >+ case op_get_by_id: { >+ auto bytecode = currentInstruction->as<OpGetById>(); > SpeculatedType prediction = getPrediction(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > > UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; > GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( >@@ -5259,33 +5261,29 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > handleGetById( >- currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength); >+ bytecode.dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength); > >- // Opcode's length is different from others in try and direct cases. >- if (opcodeID == op_try_get_by_id) >- NEXT_OPCODE(op_try_get_by_id); >- else if (opcodeID == op_get_by_id_direct) >- NEXT_OPCODE(op_get_by_id_direct); >- else >- NEXT_OPCODE(op_get_by_id); >+ NEXT_OPCODE(op_get_by_id); > } > case op_get_by_id_with_this: { > SpeculatedType prediction = getPrediction(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand]; >+ auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); >+ Node* base = get(bytecode.base); >+ Node* thisValue = get(bytecode.thisValue); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > >- set(VirtualRegister(currentInstruction[1].u.operand), >+ set(bytecode.dst, > addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue)); > > NEXT_OPCODE(op_get_by_id_with_this); > } > case op_put_by_id: { >- Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; >- bool direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect; >+ auto bytecode = currentInstruction->as<OpPutById>(); >+ Node* value = get(bytecode.value); >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; >+ bool direct = bytecode.metadata(m_codeBlock).flags & PutByIdIsDirect; > > PutByIdStatus putByIdStatus = PutByIdStatus::computeFor( > m_inlineStackTop->m_profiledBlock, >@@ -5297,71 +5295,68 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_put_by_id_with_this: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* value = get(VirtualRegister(currentInstruction[4].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ auto bytecode = currentInstruction->as<OpPutByIdWithThis>(); >+ Node* base = get(bytecode.base); >+ Node* thisValue = get(bytecode.thisValue); >+ Node* value = get(bytecode.value); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > > addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value); > NEXT_OPCODE(op_put_by_id_with_this); > } > > case op_put_getter_by_id: >- case op_put_setter_by_id: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; >- unsigned attributes = currentInstruction[3].u.operand; >- Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand)); >- NodeType op = (opcodeID == op_put_getter_by_id) ? PutGetterById : PutSetterById; >- addToGraph(op, OpInfo(identifierNumber), OpInfo(attributes), base, accessor); >+ handlePutAccessorById(PutGetterById, currentInstruction->as<OpPutGetterById>()); > NEXT_OPCODE(op_put_getter_by_id); >+ case op_put_setter_by_id: { >+ handlePutAccessorById(PutSetterById, currentInstruction->as<OpPutSetterById>()); >+ NEXT_OPCODE(op_put_setter_by_id); > } > > case op_put_getter_setter_by_id: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; >- unsigned attributes = currentInstruction[3].u.operand; >- Node* getter = get(VirtualRegister(currentInstruction[4].u.operand)); >- Node* setter = get(VirtualRegister(currentInstruction[5].u.operand)); >- addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(attributes), base, getter, setter); >+ auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; >+ Node* getter = get(bytecode.getter); >+ Node* setter = get(bytecode.setter); >+ addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.attributes), base, getter, setter); > NEXT_OPCODE(op_put_getter_setter_by_id); > } > > case op_put_getter_by_val: >- case op_put_setter_by_val: { >- Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* subscript = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned attributes = currentInstruction[3].u.operand; >- Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand)); >- NodeType op = (opcodeID == op_put_getter_by_val) ? PutGetterByVal : PutSetterByVal; >- addToGraph(op, OpInfo(attributes), base, subscript, accessor); >+ handlePutAccessorByVal(PutGetterByVal, currentInstruction->as<OpPutGetterByVal>()); > NEXT_OPCODE(op_put_getter_by_val); >+ case op_put_setter_by_val: { >+ handlePutAccessorByVal(PutSetterByVal, currentInstruction->as<OpPutSetterByVal>()); >+ NEXT_OPCODE(op_put_setter_by_val); > } > > case op_del_by_id: { >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(DeleteById, OpInfo(identifierNumber), base)); >+ auto bytecode = currentInstruction->as<OpDelById>(); >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; >+ set(bytecode.dst, addToGraph(DeleteById, OpInfo(identifierNumber), base)); > NEXT_OPCODE(op_del_by_id); > } > > case op_del_by_val: { >- int dst = currentInstruction[1].u.operand; >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* key = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(dst), addToGraph(DeleteByVal, base, key)); >+ auto bytecode = currentInstruction->as<OpDelByVal>(); >+ Node* base = get(bytecode.base); >+ Node* key = get(bytecode.property); >+ set(bytecode.dst, addToGraph(DeleteByVal, base, key)); > NEXT_OPCODE(op_del_by_val); > } > > case op_profile_type: { >- Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand)); >- addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile); >+ auto bytecode = currentInstruction->as<OpProfileType>(); >+ Node* valueToProfile = get(bytecode.target); >+ addToGraph(ProfileType, OpInfo(bytecode.flag), valueToProfile); > NEXT_OPCODE(op_profile_type); > } > > case op_profile_control_flow: { >- BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; >+ auto bytecode = currentInstruction->as<OpProfileControlFlow>(); >+ BasicBlockLocation* basicBlockLocation = bytecode.metadata(m_codeBlock).basicBlockLocation; > addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation)); > NEXT_OPCODE(op_profile_control_flow); > } >@@ -5370,7 +5365,8 @@ void ByteCodeParser::parseBlock(unsigned limit) > > case op_jmp: { > ASSERT(!m_currentBlock->terminal()); >- int relativeOffset = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpJmp>(); >+ int relativeOffset = bytecode.target; > addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); > if (relativeOffset <= 0) > flushForTerminal(); >@@ -5378,168 +5374,205 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_jtrue: { >- unsigned relativeOffset = currentInstruction[2].u.operand; >- Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); >+ auto bytecode = currentInstruction->as<OpJtrue>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* condition = get(bytecode.condition); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition); > LAST_OPCODE(op_jtrue); > } > > case op_jfalse: { >- unsigned relativeOffset = currentInstruction[2].u.operand; >- Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); >+ auto bytecode = currentInstruction->as<OpJfalse>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* condition = get(bytecode.condition); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jfalse); > } > > case op_jeq_null: { >- unsigned relativeOffset = currentInstruction[2].u.operand; >- Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); >+ auto bytecode = currentInstruction->as<OpJeqNull>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* value = get(bytecode.condition); > Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); > Node* condition = addToGraph(CompareEq, value, nullConstant); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition); > LAST_OPCODE(op_jeq_null); > } > > case op_jneq_null: { >- unsigned relativeOffset = currentInstruction[2].u.operand; >- Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); >+ auto bytecode = currentInstruction->as<OpJneqNull>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* value = get(bytecode.condition); > Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); > Node* condition = addToGraph(CompareEq, value, nullConstant); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jneq_null); > } > > case op_jless: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJless>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareLess, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition); > LAST_OPCODE(op_jless); > } > > case op_jlesseq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJlesseq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareLessEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition); > LAST_OPCODE(op_jlesseq); > } > > case op_jgreater: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJgreater>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareGreater, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition); > LAST_OPCODE(op_jgreater); > } > > case op_jgreatereq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJgreatereq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareGreaterEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition); > LAST_OPCODE(op_jgreatereq); > } > > case op_jeq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJeq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq))), condition); > LAST_OPCODE(op_jeq); > } > > case op_jstricteq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJstricteq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareStrictEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jstricteq))), condition); > LAST_OPCODE(op_jstricteq); > } > > case op_jnless: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJnless>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareLess, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jnless); > } > > case op_jnlesseq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJnlesseq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareLessEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jnlesseq); > } > > case op_jngreater: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJngreater>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareGreater, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jngreater); > } > > case op_jngreatereq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJngreatereq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareGreaterEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jngreatereq); > } > > case op_jneq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJneq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jneq); > } > > case op_jnstricteq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJnstricteq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareStrictEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnstricteq), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jnstricteq); > } > > case op_jbelow: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJbelow>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareBelow, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jbelow))), condition); > LAST_OPCODE(op_jbelow); > } > > case op_jbeloweq: { >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpJbeloweq>(); >+ unsigned relativeOffset = bytecode.target; >+ Node* op1 = get(bytecode.lhs); >+ Node* op2 = get(bytecode.rhs); > Node* condition = addToGraph(CompareBelowEq, op1, op2); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jbeloweq))), condition); > LAST_OPCODE(op_jbeloweq); > } > > case op_switch_imm: { >+ auto bytecode = currentInstruction->as<OpSwitchImm>(); > SwitchData& data = *m_graph.m_switchData.add(); > data.kind = SwitchImm; >- data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; >- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); >+ data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.tableIndex]; >+ data.fallThrough.setBytecodeIndex(m_currentIndex + bytecode.defaultOffset); > SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); > for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { > if (!table.branchOffsets[i]) >@@ -5549,16 +5582,17 @@ void ByteCodeParser::parseBlock(unsigned limit) > continue; > data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target)); > } >- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); >+ addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); > flushIfTerminal(data); > LAST_OPCODE(op_switch_imm); > } > > case op_switch_char: { >+ auto bytecode = currentInstruction->as<OpSwitchChar>(); > SwitchData& data = *m_graph.m_switchData.add(); > data.kind = SwitchChar; >- data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; >- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); >+ data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.tableIndex]; >+ data.fallThrough.setBytecodeIndex(m_currentIndex + bytecode.defaultOffset); > SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); > for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { > if (!table.branchOffsets[i]) >@@ -5569,16 +5603,17 @@ void ByteCodeParser::parseBlock(unsigned limit) > data.cases.append( > SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target)); > } >- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); >+ addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); > flushIfTerminal(data); > LAST_OPCODE(op_switch_char); > } > > case op_switch_string: { >+ auto bytecode = currentInstruction->as<OpSwitchString>(); > SwitchData& data = *m_graph.m_switchData.add(); > data.kind = SwitchString; >- data.switchTableIndex = currentInstruction[1].u.operand; >- data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); >+ data.switchTableIndex = bytecode.tableIndex; >+ data.fallThrough.setBytecodeIndex(m_currentIndex + bytecode.defaultOffset); > StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); > StringJumpTable::StringOffsetTable::iterator iter; > StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); >@@ -5589,25 +5624,26 @@ void ByteCodeParser::parseBlock(unsigned limit) > data.cases.append( > SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target)); > } >- addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); >+ addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); > flushIfTerminal(data); > LAST_OPCODE(op_switch_string); > } > >- case op_ret: >+ case op_ret: { >+ auto bytecode = currentInstruction->as<OpRet>(); > ASSERT(!m_currentBlock->terminal()); > if (!inlineCallFrame()) { > // Simple case: we are just producing a return >- addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); >+ addToGraph(Return, get(bytecode.value)); > flushForReturn(); > LAST_OPCODE(op_ret); > } > > flushForReturn(); > if (m_inlineStackTop->m_returnValue.isValid()) >- setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush); >+ setDirect(m_inlineStackTop->m_returnValue, get(bytecode.value), ImmediateSetWithFlush); > >- if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size()) { >+ if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) { > // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one. > // It is untargetable, because we do not know the appropriate index. > // If this block turns out to be a jump target, parseCodeBlock will fix its bytecodeIndex before putting it in m_blockLinkingTargets >@@ -5621,26 +5657,27 @@ void ByteCodeParser::parseBlock(unsigned limit) > m_inlineStackTop->m_continuationBlock = m_currentBlock; > } > LAST_OPCODE_LINKED(op_ret); >- >+ } > case op_end: > ASSERT(!inlineCallFrame()); >- addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); >+ addToGraph(Return, get(currentInstruction->as<OpEnd>().value)); > flushForReturn(); > LAST_OPCODE(op_end); > > case op_throw: >- addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand))); >+ addToGraph(Throw, get(currentInstruction->as<OpThrow>().value)); > flushForTerminal(); > LAST_OPCODE(op_throw); > > case op_throw_static_error: { >- uint32_t errorType = currentInstruction[2].u.unsignedValue; >- addToGraph(ThrowStaticError, OpInfo(errorType), get(VirtualRegister(currentInstruction[1].u.operand))); >+ auto bytecode = currentInstruction->as<OpThrowStaticError>(); >+ addToGraph(ThrowStaticError, OpInfo(bytecode.errorType), get(bytecode.message)); > flushForTerminal(); > LAST_OPCODE(op_throw_static_error); > } > > case op_catch: { >+ auto bytecode = currentInstruction->as<OpCatch>(); > m_graph.m_hasExceptionHandlers = true; > > if (inlineCallFrame()) { >@@ -5654,7 +5691,7 @@ void ByteCodeParser::parseBlock(unsigned limit) > > RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution)); > >- ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(currentInstruction[3].u.pointer); >+ ValueProfileAndOperandBuffer* buffer = bytecode.metadata(m_codeBlock).buffer; > > if (!buffer) { > NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread. >@@ -5761,13 +5798,13 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_call: >- handleCall(currentInstruction, Call, CallMode::Regular); >+ handleCall<OpCall>(currentInstruction, Call, CallMode::Regular); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); > NEXT_OPCODE(op_call); > > case op_tail_call: { > flushForReturn(); >- Terminality terminality = handleCall(currentInstruction, TailCall, CallMode::Tail); >+ Terminality terminality = handleCall<OpTailCall>(currentInstruction, TailCall, CallMode::Tail); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); > // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. > // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean >@@ -5780,19 +5817,19 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_construct: >- handleCall(currentInstruction, Construct, CallMode::Construct); >+ handleCall<OpConstruct>(currentInstruction, Construct, CallMode::Construct); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); > NEXT_OPCODE(op_construct); > > case op_call_varargs: { >- handleVarargsCall(currentInstruction, CallVarargs, CallMode::Regular); >+ handleVarargsCall<OpCallVarargs>(currentInstruction, CallVarargs, CallMode::Regular); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); > NEXT_OPCODE(op_call_varargs); > } > > case op_tail_call_varargs: { > flushForReturn(); >- Terminality terminality = handleVarargsCall(currentInstruction, TailCallVarargs, CallMode::Tail); >+ Terminality terminality = handleVarargsCall<OpTailCallVarargs>(currentInstruction, TailCallVarargs, CallMode::Tail); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); > // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. > // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean >@@ -5808,7 +5845,7 @@ void ByteCodeParser::parseBlock(unsigned limit) > // done by the arguments object creation node as that node may not exist. > noticeArgumentsUse(); > flushForReturn(); >- Terminality terminality = handleVarargsCall(currentInstruction, TailCallForwardVarargs, CallMode::Tail); >+ Terminality terminality = handleVarargsCall<OpTailCallForwardArguments>(currentInstruction, TailCallForwardVarargs, CallMode::Tail); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); > // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. > // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean >@@ -5820,31 +5857,30 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_construct_varargs: { >- handleVarargsCall(currentInstruction, ConstructVarargs, CallMode::Construct); >+ handleVarargsCall<OpConstructVarargs>(currentInstruction, ConstructVarargs, CallMode::Construct); > ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); > NEXT_OPCODE(op_construct_varargs); > } > > case op_call_eval: { >- int result = currentInstruction[1].u.operand; >- int callee = currentInstruction[2].u.operand; >- int argumentCountIncludingThis = currentInstruction[3].u.operand; >- int registerOffset = -currentInstruction[4].u.operand; >- addCall(result, CallEval, nullptr, get(VirtualRegister(callee)), argumentCountIncludingThis, registerOffset, getPrediction()); >+ auto bytecode = currentInstruction->as<OpCallEval>(); >+ int registerOffset = -bytecode.argv; >+ addCall(bytecode.dst, CallEval, nullptr, get(bytecode.callee), bytecode.argc, registerOffset, getPrediction()); > NEXT_OPCODE(op_call_eval); > } > > case op_jneq_ptr: { >- Special::Pointer specialPointer = currentInstruction[2].u.specialPointer; >+ auto bytecode = currentInstruction->as<OpJneqPtr>(); >+ Special::Pointer specialPointer = bytecode.specialPointer; > ASSERT(pointerIsCell(specialPointer)); > JSCell* actualPointer = static_cast<JSCell*>( > actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer)); > FrozenValue* frozenPointer = m_graph.freeze(actualPointer); >- int operand = currentInstruction[1].u.operand; >- unsigned relativeOffset = currentInstruction[3].u.operand; >- Node* child = get(VirtualRegister(operand)); >- if (currentInstruction[4].u.operand) { >+ unsigned relativeOffset = bytecode.target; >+ Node* child = get(bytecode.condition); >+ if (bytecode.metadata(m_codeBlock).hasJumped) { > Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child); >+ // TODO: update (call to) `branchData` > addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr), m_currentIndex + relativeOffset)), condition); > LAST_OPCODE(op_jneq_ptr); > } >@@ -5853,75 +5889,73 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_resolve_scope: { >- int dst = currentInstruction[1].u.operand; >- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); >- unsigned depth = currentInstruction[5].u.operand; >- int scope = currentInstruction[2].u.operand; >- >- if (needsDynamicLookup(resolveType, op_resolve_scope)) { >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >- set(VirtualRegister(dst), addToGraph(ResolveScope, OpInfo(identifierNumber), get(VirtualRegister(scope)))); >+ auto bytecode = currentInstruction->as<OpResolveScope>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ unsigned depth = bytecode.localScopeDepth; >+ >+ if (needsDynamicLookup(bytecode.resolveType, op_resolve_scope)) { >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.var]; >+ set(bytecode.dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.scope))); > NEXT_OPCODE(op_resolve_scope); > } > > // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints. >- if (needsVarInjectionChecks(resolveType)) >+ if (needsVarInjectionChecks(bytecode.resolveType)) > m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint()); > >- switch (resolveType) { >+ switch (bytecode.resolveType) { > case GlobalProperty: > case GlobalVar: > case GlobalPropertyWithVarInjectionChecks: > case GlobalVarWithVarInjectionChecks: > case GlobalLexicalVar: > case GlobalLexicalVarWithVarInjectionChecks: { >- JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock); >+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(bytecode.resolveType, m_inlineStackTop->m_codeBlock); > RELEASE_ASSERT(constantScope); >- RELEASE_ASSERT(static_cast<JSScope*>(currentInstruction[6].u.pointer) == constantScope); >- set(VirtualRegister(dst), weakJSConstant(constantScope)); >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ RELEASE_ASSERT(metadata.scope.get() == constantScope); >+ set(bytecode.dst, weakJSConstant(constantScope)); >+ addToGraph(Phantom, get(bytecode.scope)); > break; > } > case ModuleVar: { > // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar, > // we need not to keep it alive by the Phantom node. >- JSModuleEnvironment* moduleEnvironment = jsCast<JSModuleEnvironment*>(currentInstruction[6].u.jsCell.get()); > // Module environment is already strongly referenced by the CodeBlock. >- set(VirtualRegister(dst), weakJSConstant(moduleEnvironment)); >+ set(bytecode.dst, weakJSConstant(metadata.moduleEnvironment.get())); > break; > } > case LocalClosureVar: > case ClosureVar: > case ClosureVarWithVarInjectionChecks: { >- Node* localBase = get(VirtualRegister(scope)); >+ Node* localBase = get(bytecode.scope); > addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope. > > // We have various forms of constant folding here. This is necessary to avoid > // spurious recompiles in dead-but-foldable code. >- if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) { >+ if (SymbolTable* symbolTable = metadata.symbolTable.get()) { > InferredValue* singleton = symbolTable->singletonScope(); > if (JSValue value = singleton->inferredValue()) { > m_graph.watchpoints().addLazily(singleton); >- set(VirtualRegister(dst), weakJSConstant(value)); >+ set(bytecode.dst, weakJSConstant(value)); > break; > } > } > if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) { > for (unsigned n = depth; n--;) > scope = scope->next(); >- set(VirtualRegister(dst), weakJSConstant(scope)); >+ set(bytecode.dst, weakJSConstant(scope)); > break; > } > for (unsigned n = depth; n--;) > localBase = addToGraph(SkipScope, localBase); >- set(VirtualRegister(dst), localBase); >+ set(bytecode.dst, localBase); > break; > } > case UnresolvedProperty: > case UnresolvedPropertyWithVarInjectionChecks: { >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ addToGraph(Phantom, get(bytecode.scope)); > addToGraph(ForceOSRExit); >- set(VirtualRegister(dst), addToGraph(JSConstant, OpInfo(m_constantNull))); >+ set(bytecode.dst, addToGraph(JSConstant, OpInfo(m_constantNull))); > break; > } > case Dynamic: >@@ -5931,21 +5965,20 @@ void ByteCodeParser::parseBlock(unsigned limit) > NEXT_OPCODE(op_resolve_scope); > } > case op_resolve_scope_for_hoisting_func_decl_in_eval: { >- int dst = currentInstruction[1].u.operand; >- int scope = currentInstruction[2].u.operand; >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>(); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > >- set(VirtualRegister(dst), addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(VirtualRegister(scope)))); >+ set(bytecode.dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.scope))); > > NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval); > } > > case op_get_from_scope: { >- int dst = currentInstruction[1].u.operand; >- int scope = currentInstruction[2].u.operand; >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ auto bytecode = currentInstruction->as<OpGetFromScope>(); >+ auto metadata = bytecode.metadata(m_codeBlock); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.var]; > UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; >- ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType(); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); > > Structure* structure = 0; > WatchpointSet* watchpoints = 0; >@@ -5953,17 +5986,17 @@ void ByteCodeParser::parseBlock(unsigned limit) > { > ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); > if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) >- watchpoints = currentInstruction[5].u.watchpointSet; >+ watchpoints = metadata.watchpointSet; > else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) >- structure = currentInstruction[5].u.structure.get(); >- operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer); >+ structure = metadata.structure.get(); >+ operand = metadata.operand; > } > > if (needsDynamicLookup(resolveType, op_get_from_scope)) { >- uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, currentInstruction[4].u.operand); >+ uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, bytecode.localScopeDepth); > SpeculatedType prediction = getPrediction(); >- set(VirtualRegister(dst), >- addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(VirtualRegister(scope)))); >+ set(bytecode.dst, >+ addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.scope))); > NEXT_OPCODE(op_get_from_scope); > } > >@@ -5980,21 +6013,21 @@ void ByteCodeParser::parseBlock(unsigned limit) > if (status.state() != GetByIdStatus::Simple > || status.numVariants() != 1 > || status[0].structureSet().size() != 1) { >- set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope)))); >+ set(bytecode.dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.scope))); > break; > } > > Node* base = weakJSConstant(globalObject); > Node* result = load(prediction, base, identifierNumber, status[0]); >- addToGraph(Phantom, get(VirtualRegister(scope))); >- set(VirtualRegister(dst), result); >+ addToGraph(Phantom, get(bytecode.scope)); >+ set(bytecode.dst, result); > break; > } > case GlobalVar: > case GlobalVarWithVarInjectionChecks: > case GlobalLexicalVar: > case GlobalLexicalVarWithVarInjectionChecks: { >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ addToGraph(Phantom, get(bytecode.scope)); > WatchpointSet* watchpointSet; > ScopeOffset offset; > JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock)); >@@ -6050,7 +6083,7 @@ void ByteCodeParser::parseBlock(unsigned limit) > JSValue value = pointer->get(); > if (value) { > m_graph.watchpoints().addLazily(watchpointSet); >- set(VirtualRegister(dst), weakJSConstant(value)); >+ set(bytecode.dst, weakJSConstant(value)); > break; > } > } >@@ -6064,13 +6097,13 @@ void ByteCodeParser::parseBlock(unsigned limit) > Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction)); > if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) > addToGraph(CheckNotEmpty, value); >- set(VirtualRegister(dst), value); >+ set(bytecode.dst, value); > break; > } > case LocalClosureVar: > case ClosureVar: > case ClosureVarWithVarInjectionChecks: { >- Node* scopeNode = get(VirtualRegister(scope)); >+ Node* scopeNode = get(bytecode.scope); > > // Ideally we wouldn't have to do this Phantom. But: > // >@@ -6086,11 +6119,11 @@ void ByteCodeParser::parseBlock(unsigned limit) > // prediction, we'd otherwise think that it has to exit. Then when it did execute, we > // would recompile. But if we can fold it here, we avoid the exit. > if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) { >- set(VirtualRegister(dst), weakJSConstant(value)); >+ set(bytecode.dst, weakJSConstant(value)); > break; > } > SpeculatedType prediction = getPrediction(); >- set(VirtualRegister(dst), >+ set(bytecode.dst, > addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode)); > break; > } >@@ -6105,13 +6138,12 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_put_to_scope: { >- unsigned scope = currentInstruction[1].u.operand; >- unsigned identifierNumber = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpPutToScope>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ unsigned identifierNumber = bytecode.var; > if (identifierNumber != UINT_MAX) > identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber]; >- unsigned value = currentInstruction[3].u.operand; >- GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand); >- ResolveType resolveType = getPutInfo.resolveType(); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); > UniquedStringImpl* uid; > if (identifierNumber != UINT_MAX) > uid = m_graph.identifiers()[identifierNumber]; >@@ -6124,18 +6156,18 @@ void ByteCodeParser::parseBlock(unsigned limit) > { > ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); > if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) >- watchpoints = currentInstruction[5].u.watchpointSet; >+ watchpoints = metadata.watchpointSet; > else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) >- structure = currentInstruction[5].u.structure.get(); >- operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer); >+ structure = metadata.structure.get(); >+ operand = metadata.operand; > } > > JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); > > if (needsDynamicLookup(resolveType, op_put_to_scope)) { > ASSERT(identifierNumber != UINT_MAX); >- uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, currentInstruction[4].u.operand); >- addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(VirtualRegister(scope)), get(VirtualRegister(value))); >+ uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, metadata.getPutInfo.operand()); >+ addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.scope), get(bytecode.value)); > NEXT_OPCODE(op_put_to_scope); > } > >@@ -6150,20 +6182,20 @@ void ByteCodeParser::parseBlock(unsigned limit) > if (status.numVariants() != 1 > || status[0].kind() != PutByIdVariant::Replace > || status[0].structure().size() != 1) { >- addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value))); >+ addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.scope), get(bytecode.value)); > break; > } > Node* base = weakJSConstant(globalObject); >- store(base, identifierNumber, status[0], get(VirtualRegister(value))); >+ store(base, identifierNumber, status[0], get(bytecode.value)); > // Keep scope alive until after put. >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ addToGraph(Phantom, get(bytecode.scope)); > break; > } > case GlobalLexicalVar: > case GlobalLexicalVarWithVarInjectionChecks: > case GlobalVar: > case GlobalVarWithVarInjectionChecks: { >- if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { >+ if (!isInitialization(metadata.getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { > SpeculatedType prediction = SpecEmpty; > Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction)); > addToGraph(CheckNotEmpty, value); >@@ -6174,21 +6206,21 @@ void ByteCodeParser::parseBlock(unsigned limit) > SymbolTableEntry entry = scopeObject->symbolTable()->get(uid); > ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet()); > } >- Node* valueNode = get(VirtualRegister(value)); >+ Node* valueNode = get(bytecode.value); > addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode); > if (watchpoints && watchpoints->state() != IsInvalidated) { > // Must happen after the store. See comment for GetGlobalVar. > addToGraph(NotifyWrite, OpInfo(watchpoints)); > } > // Keep scope alive until after put. >- addToGraph(Phantom, get(VirtualRegister(scope))); >+ addToGraph(Phantom, get(bytecode.scope)); > break; > } > case LocalClosureVar: > case ClosureVar: > case ClosureVarWithVarInjectionChecks: { >- Node* scopeNode = get(VirtualRegister(scope)); >- Node* valueNode = get(VirtualRegister(value)); >+ Node* scopeNode = get(bytecode.scope); >+ Node* valueNode = get(bytecode.value); > > addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode); > >@@ -6251,28 +6283,29 @@ void ByteCodeParser::parseBlock(unsigned limit) > } > > case op_create_lexical_environment: { >- VirtualRegister symbolTableRegister(currentInstruction[3].u.operand); >- VirtualRegister initialValueRegister(currentInstruction[4].u.operand); >- ASSERT(symbolTableRegister.isConstant() && initialValueRegister.isConstant()); >- FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(symbolTableRegister.offset())); >- FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(initialValueRegister.offset())); >- Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>(); >+ ASSERT(bytecode.symbolTable.isConstant() && bytecode.initialValue.isConstant()); >+ FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.symbolTable.offset())); >+ FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.initialValue.offset())); >+ Node* scope = get(bytecode.scope); > Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope); >- set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment); >+ set(bytecode.dst, lexicalEnvironment); > NEXT_OPCODE(op_create_lexical_environment); > } > > case op_push_with_scope: { >- Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* object = get(VirtualRegister(currentInstruction[3].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(PushWithScope, currentScope, object)); >+ auto bytecode = currentInstruction->as<OpPushWithScope>(); >+ Node* currentScope = get(bytecode.currentScope); >+ Node* object = get(bytecode.newScope); >+ set(bytecode.dst, addToGraph(PushWithScope, currentScope, object)); > NEXT_OPCODE(op_push_with_scope); > } > > case op_get_parent_scope: { >- Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpGetParentScope>(); >+ Node* currentScope = get(bytecode.scope); > Node* newScope = addToGraph(SkipScope, currentScope); >- set(VirtualRegister(currentInstruction[1].u.operand), newScope); >+ set(bytecode.dst, newScope); > addToGraph(Phantom, currentScope); > NEXT_OPCODE(op_get_parent_scope); > } >@@ -6282,67 +6315,74 @@ void ByteCodeParser::parseBlock(unsigned limit) > // only helps for the first basic block. It's extremely important not to constant fold > // loads from the scope register later, as that would prevent the DFG from tracking the > // bytecode-level liveness of the scope register. >+ auto bytecode = currentInstruction->as<OpGetScope>(); > Node* callee = get(VirtualRegister(CallFrameSlot::callee)); > Node* result; > if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm)) > result = weakJSConstant(function->scope()); > else > result = addToGraph(GetScope, callee); >- set(VirtualRegister(currentInstruction[1].u.operand), result); >+ set(bytecode.dst, result); > NEXT_OPCODE(op_get_scope); > } > > case op_argument_count: { >+ auto bytecode = currentInstruction->as<OpArgumentCount>(); > Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne))); >- >- set(VirtualRegister(currentInstruction[1].u.operand), sub); >+ set(bytecode.dst, sub); > NEXT_OPCODE(op_argument_count); > } > > case op_create_direct_arguments: { >+ auto bytecode = currentInstruction->as<OpCreateDirectArguments>(); > noticeArgumentsUse(); > Node* createArguments = addToGraph(CreateDirectArguments); >- set(VirtualRegister(currentInstruction[1].u.operand), createArguments); >+ set(bytecode.dst, createArguments); > NEXT_OPCODE(op_create_direct_arguments); > } > > case op_create_scoped_arguments: { >+ auto bytecode = currentInstruction->as<OpCreateScopedArguments>(); > noticeArgumentsUse(); >- Node* createArguments = addToGraph(CreateScopedArguments, get(VirtualRegister(currentInstruction[2].u.operand))); >- set(VirtualRegister(currentInstruction[1].u.operand), createArguments); >+ Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.scope)); >+ set(bytecode.dst, createArguments); > NEXT_OPCODE(op_create_scoped_arguments); > } > > case op_create_cloned_arguments: { >+ auto bytecode = currentInstruction->as<OpCreateClonedArguments>(); > noticeArgumentsUse(); > Node* createArguments = addToGraph(CreateClonedArguments); >- set(VirtualRegister(currentInstruction[1].u.operand), createArguments); >+ set(bytecode.dst, createArguments); > NEXT_OPCODE(op_create_cloned_arguments); > } > > case op_get_from_arguments: { >- set(VirtualRegister(currentInstruction[1].u.operand), >+ auto bytecode = currentInstruction->as<OpGetFromArguments>(); >+ set(bytecode.dst, > addToGraph( > GetFromArguments, >- OpInfo(currentInstruction[3].u.operand), >+ OpInfo(bytecode.index), > OpInfo(getPrediction()), >- get(VirtualRegister(currentInstruction[2].u.operand)))); >+ get(bytecode.arguments))); > NEXT_OPCODE(op_get_from_arguments); > } > > case op_put_to_arguments: { >+ auto bytecode = currentInstruction->as<OpPutToArguments>(); > addToGraph( > PutToArguments, >- OpInfo(currentInstruction[2].u.operand), >- get(VirtualRegister(currentInstruction[1].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand))); >+ OpInfo(bytecode.index), >+ get(bytecode.arguments), >+ get(bytecode.value)); > NEXT_OPCODE(op_put_to_arguments); > } > > case op_get_argument: { >+ auto bytecode = currentInstruction->as<OpGetArgument>(); > InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); > Node* argument; >- int32_t argumentIndexIncludingThis = currentInstruction[2].u.operand; >+ int32_t argumentIndexIncludingThis = bytecode.index; > if (inlineCallFrame && !inlineCallFrame->isVarargs()) { > int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size(); > if (argumentIndexIncludingThis < argumentCountIncludingThisWithFixup) >@@ -6351,125 +6391,84 @@ void ByteCodeParser::parseBlock(unsigned limit) > argument = addToGraph(JSConstant, OpInfo(m_constantUndefined)); > } else > argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction())); >- set(VirtualRegister(currentInstruction[1].u.operand), argument); >+ set(bytecode.dst, argument); > NEXT_OPCODE(op_get_argument); > } > case op_new_async_generator_func: >+ handleNewFunc(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFunc>()); >+ NEXT_OPCODE(op_new_async_generator_func); > case op_new_func: >- case op_new_generator_func: >- case op_new_async_func: { >- FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand); >- FrozenValue* frozen = m_graph.freezeStrong(decl); >- NodeType op; >- switch (opcodeID) { >- case op_new_generator_func: >- op = NewGeneratorFunction; >- break; >- case op_new_async_func: >- op = NewAsyncFunction; >- break; >- case op_new_async_generator_func: >- op = NewAsyncGeneratorFunction; >- break; >- default: >- op = NewFunction; >- } >- Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), scope)); >- // Ideally we wouldn't have to do this Phantom. But: >- // >- // For the constant case: we must do it because otherwise we would have no way of knowing >- // that the scope is live at OSR here. >- // >- // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation >- // won't be able to handle an Undefined scope. >- addToGraph(Phantom, scope); >- static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_generator_func), "The length of op_new_func should be equal to one of op_new_generator_func"); >- static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_func), "The length of op_new_func should be equal to one of op_new_async_func"); >- static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_generator_func), "The length of op_new_func should be equal to one of op_new_async_generator_func"); >+ handleNewFunc(NewFunction, currentInstruction->as<OpNewFunc>()); > NEXT_OPCODE(op_new_func); >- } >+ case op_new_generator_func: >+ handleNewFunc(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFunc>()); >+ NEXT_OPCODE(op_new_generator_func); >+ case op_new_async_func: >+ handleNewFunc(NewAsyncFunction, currentInstruction->as<OpNewAsyncFunc>()); >+ NEXT_OPCODE(op_new_async_func); > > case op_new_func_exp: >+ handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewFuncExp>()); >+ NEXT_OPCODE(op_new_func_exp); > case op_new_generator_func_exp: >+ handleNewFuncExp(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFuncExp>()); >+ NEXT_OPCODE(op_new_generator_func_exp); > case op_new_async_generator_func_exp: >- case op_new_async_func_exp: { >- FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand); >- FrozenValue* frozen = m_graph.freezeStrong(expr); >- NodeType op; >- switch (opcodeID) { >- case op_new_generator_func_exp: >- op = NewGeneratorFunction; >- break; >- case op_new_async_func_exp: >- op = NewAsyncFunction; >- break; >- case op_new_async_generator_func_exp: >- op = NewAsyncGeneratorFunction; >- break; >- default: >- op = NewFunction; >- } >- Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), scope)); >- // Ideally we wouldn't have to do this Phantom. But: >- // >- // For the constant case: we must do it because otherwise we would have no way of knowing >- // that the scope is live at OSR here. >- // >- // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation >- // won't be able to handle an Undefined scope. >- addToGraph(Phantom, scope); >- static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_generator_func_exp), "The length of op_new_func_exp should be equal to one of op_new_generator_func_exp"); >- static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_func_exp), "The length of op_new_func_exp should be equal to one of op_new_async_func_exp"); >- static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_generator_func_exp), "The length of op_new_func_exp should be equal to one of op_new_async_func_exp"); >- NEXT_OPCODE(op_new_func_exp); >- } >+ handleNewFuncExp(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFuncExp>()); >+ NEXT_OPCODE(op_new_async_generator_func_exp); >+ case op_new_async_func_exp: >+ handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewAsyncFuncExp>()); >+ NEXT_OPCODE(op_new_async_func_exp); > > case op_set_function_name: { >- Node* func = get(VirtualRegister(currentInstruction[1].u.operand)); >- Node* name = get(VirtualRegister(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpSetFunctionName>(); >+ Node* func = get(bytecode.function); >+ Node* name = get(bytecode.name); > addToGraph(SetFunctionName, func, name); > NEXT_OPCODE(op_set_function_name); > } > > case op_typeof: { >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpTypeof>(); >+ set(bytecode.dst, addToGraph(TypeOf, get(bytecode.value))); > NEXT_OPCODE(op_typeof); > } > > case op_to_number: { >+ auto bytecode = currentInstruction->as<OpToNumber>(); > SpeculatedType prediction = getPrediction(); >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value)); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value)); > NEXT_OPCODE(op_to_number); > } > > case op_to_string: { >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value)); >+ auto bytecode = currentInstruction->as<OpToString>(); >+ Node* value = get(bytecode.operand); >+ set(bytecode.dst, addToGraph(ToString, value)); > NEXT_OPCODE(op_to_string); > } > > case op_to_object: { >+ auto bytecode = currentInstruction->as<OpToObject>(); > SpeculatedType prediction = getPrediction(); >- Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value)); >+ Node* value = get(bytecode.operand); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.message]; >+ set(bytecode.dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value)); > NEXT_OPCODE(op_to_object); > } > > case op_in_by_val: { >- ArrayMode arrayMode = getArrayMode(currentInstruction[OPCODE_LENGTH(op_in_by_val) - 1].u.arrayProfile, Array::Read); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand)))); >+ auto bytecode = currentInstruction->as<OpInByVal>(); >+ ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_codeBlock).arrayProfile, Array::Read); >+ set(bytecode.dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.base), get(bytecode.property))); > NEXT_OPCODE(op_in_by_val); > } > > case op_in_by_id: { >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; >+ auto bytecode = currentInstruction->as<OpInById>(); >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; > UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; > > InByIdStatus status = InByIdStatus::computeFor( >@@ -6498,101 +6497,106 @@ void ByteCodeParser::parseBlock(unsigned limit) > addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses.addInByIdStatus(currentCodeOrigin(), status)), base); > > Node* match = addToGraph(MatchStructure, OpInfo(data), base); >- set(VirtualRegister(currentInstruction[1].u.operand), match); >+ set(bytecode.dst, match); > NEXT_OPCODE(op_in_by_id); > } > } > >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InById, OpInfo(identifierNumber), base)); >+ set(bytecode.dst, addToGraph(InById, OpInfo(identifierNumber), base)); > NEXT_OPCODE(op_in_by_id); > } > > case op_get_enumerable_length: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength, >- get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpGetEnumerableLength>(); >+ set(bytecode.dst, addToGraph(GetEnumerableLength, get(bytecode.base))); > NEXT_OPCODE(op_get_enumerable_length); > } > > case op_has_generic_property: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, >- get(VirtualRegister(currentInstruction[2].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand)))); >+ auto bytecode = currentInstruction->as<OpHasGenericProperty>(); >+ set(bytecode.dst, addToGraph(HasGenericProperty, get(bytecode.base), get(bytecode.property))); > NEXT_OPCODE(op_has_generic_property); > } > > case op_has_structure_property: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, >- get(VirtualRegister(currentInstruction[2].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand)), >- get(VirtualRegister(currentInstruction[4].u.operand)))); >+ auto bytecode = currentInstruction->as<OpHasStructureProperty>(); >+ set(bytecode.dst, addToGraph(HasStructureProperty, >+ get(bytecode.base), >+ get(bytecode.property), >+ get(bytecode.enumerator))); > NEXT_OPCODE(op_has_structure_property); > } > > case op_has_indexed_property: { >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read); >- Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); >+ Node* base = get(bytecode.base); >+ ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_codeBlock).arrayProfile, Array::Read); >+ Node* property = get(bytecode.property); > Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)), base, property); >- set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty); >+ set(bytecode.dst, hasIterableProperty); > NEXT_OPCODE(op_has_indexed_property); > } > > case op_get_direct_pname: { >+ auto bytecode = currentInstruction->as<OpGetDirectPname>(); > SpeculatedType prediction = getPredictionWithoutOSRExit(); > >- Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); >- Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); >- Node* index = get(VirtualRegister(currentInstruction[4].u.operand)); >- Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand)); >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); >+ Node* index = get(bytecode.index); >+ Node* enumerator = get(bytecode.enumerator); > > addVarArgChild(base); > addVarArgChild(property); > addVarArgChild(index); > addVarArgChild(enumerator); >- set(VirtualRegister(currentInstruction[1].u.operand), >- addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); >+ set(bytecode.dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); > > NEXT_OPCODE(op_get_direct_pname); > } > > case op_get_property_enumerator: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator, >- get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>(); >+ set(bytecode.dst, addToGraph(GetPropertyEnumerator, get(bytecode.base))); > NEXT_OPCODE(op_get_property_enumerator); > } > > case op_enumerator_structure_pname: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname, >- get(VirtualRegister(currentInstruction[2].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand)))); >+ auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); >+ set(bytecode.dst, addToGraph(GetEnumeratorStructurePname, >+ get(bytecode.enumerator), >+ get(bytecode.index))); > NEXT_OPCODE(op_enumerator_structure_pname); > } > > case op_enumerator_generic_pname: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname, >- get(VirtualRegister(currentInstruction[2].u.operand)), >- get(VirtualRegister(currentInstruction[3].u.operand)))); >+ auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); >+ set(bytecode.dst, addToGraph(GetEnumeratorGenericPname, >+ get(bytecode.enumerator), >+ get(bytecode.index))); > NEXT_OPCODE(op_enumerator_generic_pname); > } > > case op_to_index_string: { >- set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString, >- get(VirtualRegister(currentInstruction[2].u.operand)))); >+ auto bytecode = currentInstruction->as<OpToIndexString>(); >+ set(bytecode.dst, addToGraph(ToIndexString, get(bytecode.index))); > NEXT_OPCODE(op_to_index_string); > } > > case op_log_shadow_chicken_prologue: { >+ auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); > if (!m_inlineStackTop->m_inlineCallFrame) >- addToGraph(LogShadowChickenPrologue, get(VirtualRegister(currentInstruction[1].u.operand))); >+ addToGraph(LogShadowChickenPrologue, get(bytecode.scope)); > NEXT_OPCODE(op_log_shadow_chicken_prologue); > } > > case op_log_shadow_chicken_tail: { >+ auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); > if (!m_inlineStackTop->m_inlineCallFrame) { > // FIXME: The right solution for inlining is to elide these whenever the tail call > // ends up being inlined. > // https://bugs.webkit.org/show_bug.cgi?id=155686 >- addToGraph(LogShadowChickenTail, get(VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand))); >+ addToGraph(LogShadowChickenTail, get(bytecode.thisValue), get(bytecode.scope)); > } > NEXT_OPCODE(op_log_shadow_chicken_tail); > } >@@ -6796,7 +6800,7 @@ void ByteCodeParser::parseCodeBlock() > codeBlock->baselineVersion()->dumpBytecode(); > } > >- Vector<unsigned, 32> jumpTargets; >+ Vector<InstructionStream::Offset, 32> jumpTargets; > computePreciseJumpTargets(codeBlock, jumpTargets); > if (Options::dumpBytecodeAtDFGTime()) { > dataLog("Jump targets: "); >@@ -6853,6 +6857,115 @@ void ByteCodeParser::parseCodeBlock() > VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n"); > } > >+template <typename Bytecode> >+void ByteCodeParser::handlePutByVal(Bytecode bytecode) >+{ >+ Node* base = get(bytecode.base); >+ Node* property = get(bytecode.property); >+ Node* value = get(bytecode.value); >+ bool isDirect = Bytecode::opcodeID() == op_put_by_val_direct; >+ bool compiledAsPutById = false; >+ { >+ unsigned identifierNumber = std::numeric_limits<unsigned>::max(); >+ PutByIdStatus putByIdStatus; >+ { >+ ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); >+ ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo; >+ // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. >+ // At that time, there is no information. >+ if (byValInfo >+ && byValInfo->stubInfo >+ && !byValInfo->tookSlowPath >+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) >+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType) >+ && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { >+ compiledAsPutById = true; >+ identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); >+ UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; >+ >+ if (Symbol* symbol = byValInfo->cachedSymbol.get()) { >+ FrozenValue* frozen = m_graph.freezeStrong(symbol); >+ addToGraph(CheckCell, OpInfo(frozen), property); >+ } else { >+ ASSERT(!uid->isSymbol()); >+ addToGraph(CheckStringIdent, OpInfo(uid), property); >+ } >+ >+ putByIdStatus = PutByIdStatus::computeForStubInfo( >+ locker, m_inlineStackTop->m_profiledBlock, >+ byValInfo->stubInfo, currentCodeOrigin(), uid); >+ >+ } >+ } >+ >+ if (compiledAsPutById) >+ handlePutById(base, identifierNumber, value, putByIdStatus, isDirect); >+ } >+ >+ if (!compiledAsPutById) { >+ ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_codeBlock).arrayProfile, Array::Write); >+ >+ addVarArgChild(base); >+ addVarArgChild(property); >+ addVarArgChild(value); >+ addVarArgChild(0); // Leave room for property storage. >+ addVarArgChild(0); // Leave room for length. >+ addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); >+ } >+} >+ >+template <typename Bytecode> >+void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode) >+{ >+ Node* base = get(bytecode.base); >+ unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; >+ Node* accessor = get(bytecode.accessor); >+ addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.attributes), base, accessor); >+} >+ >+template <typename Bytecode> >+void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode) >+{ >+ Node* base = get(bytecode.base); >+ Node* subscript = get(bytecode.property); >+ Node* accessor = get(bytecode.accessor); >+ addToGraph(op, OpInfo(bytecode.attributes), base, subscript, accessor); >+} >+ >+template <typename Bytecode> >+void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode) >+{ >+ FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.functionDecl); >+ FrozenValue* frozen = m_graph.freezeStrong(decl); >+ Node* scope = get(bytecode.scope); >+ set(bytecode.dst, addToGraph(op, OpInfo(frozen), scope)); >+ // Ideally we wouldn't have to do this Phantom. But: >+ // >+ // For the constant case: we must do it because otherwise we would have no way of knowing >+ // that the scope is live at OSR here. >+ // >+ // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation >+ // won't be able to handle an Undefined scope. >+ addToGraph(Phantom, scope); >+} >+ >+template <typename Bytecode> >+void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode) >+{ >+ FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.functionDecl); >+ FrozenValue* frozen = m_graph.freezeStrong(expr); >+ Node* scope = get(bytecode.scope); >+ set(bytecode.dst, addToGraph(op, OpInfo(frozen), scope)); >+ // Ideally we wouldn't have to do this Phantom. But: >+ // >+ // For the constant case: we must do it because otherwise we would have no way of knowing >+ // that the scope is live at OSR here. >+ // >+ // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation >+ // won't be able to handle an Undefined scope. >+ addToGraph(Phantom, scope); >+} >+ > void ByteCodeParser::parse() > { > // Set during construction. >@@ -6941,9 +7054,7 @@ void ByteCodeParser::parse() > if (argument.isArgument() && !argument.isHeader()) { > const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame); > arguments[argument.toArgument()]->addVariable(variable); >- } >- >- insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable)); >+ } insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable)); > }; > auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) { > insertLivenessPreservingOp(inlineCallFrame, Flush, operand); >diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >index dadc92d867e4c44a3462d8d6dab6458075acdd75..916ce20fcf77d69fdd0faeda32d7e2abe3d5de00 100644 >--- a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >@@ -103,12 +103,14 @@ inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID, CapabilityLevel r > dataLog("DFG rejecting opcode in ", *codeBlock, " because of opcode ", opcodeNames[opcodeID], "\n"); > } > >-CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc) >+CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, const Instruction* pc) > { > UNUSED_PARAM(codeBlock); // This function does some bytecode parsing. Ordinarily bytecode parsing requires the owning CodeBlock. It's sort of strange that we don't use it here right now. > UNUSED_PARAM(pc); > > switch (opcodeID) { >+ case op_wide: >+ ASSERT_NOT_REACHED(); > case op_enter: > case op_to_this: > case op_argument_count: >@@ -164,12 +166,9 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc > case op_put_by_val_direct: > case op_try_get_by_id: > case op_get_by_id: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: > case op_get_by_id_with_this: > case op_get_by_id_direct: > case op_get_by_val_with_this: >- case op_get_array_length: > case op_put_by_id: > case op_put_by_id_with_this: > case op_put_by_val_with_this: >@@ -302,20 +301,17 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruc > > CapabilityLevel capabilityLevel(CodeBlock* codeBlock) > { >- Instruction* instructionsBegin = codeBlock->instructions().begin(); >- unsigned instructionCount = codeBlock->instructions().size(); > CapabilityLevel result = CanCompileAndInline; > >- for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) { >- switch (Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) { >+ for (const auto& instruction : codeBlock->instructions()) { >+ switch (instruction->opcodeID()) { > #define DEFINE_OP(opcode, length) \ > case opcode: { \ >- CapabilityLevel newResult = leastUpperBound(result, capabilityLevel(opcode, codeBlock, instructionsBegin + bytecodeOffset)); \ >+ CapabilityLevel newResult = leastUpperBound(result, capabilityLevel(opcode, codeBlock, instruction.ptr())); \ > if (newResult != result) { \ > debugFail(codeBlock, opcode, newResult); \ > result = newResult; \ > } \ >- bytecodeOffset += length; \ > break; \ > } > FOR_EACH_OPCODE_ID(DEFINE_OP) >diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h >index e03b2471eb23469abc8fe1ca98f042101af4c72d..52145e976ab5ef652ab4a8a4cf5d3f145038ef64 100644 >--- a/Source/JavaScriptCore/dfg/DFGCapabilities.h >+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h >@@ -45,7 +45,7 @@ bool mightInlineFunctionForClosureCall(CodeBlock*); > bool mightInlineFunctionForConstruct(CodeBlock*); > bool canUseOSRExitFuzzing(CodeBlock*); > >-inline CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc); >+inline CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, const Instruction* pc); > > CapabilityLevel capabilityLevel(CodeBlock*); > #else // ENABLE(DFG_JIT) >@@ -58,7 +58,7 @@ inline bool mightInlineFunctionForClosureCall(CodeBlock*) { return false; } > inline bool mightInlineFunctionForConstruct(CodeBlock*) { return false; } > inline bool canUseOSRExitFuzzing(CodeBlock*) { return false; } > >-inline CapabilityLevel capabilityLevel(OpcodeID, CodeBlock*, Instruction*) { return CannotCompile; } >+inline CapabilityLevel capabilityLevel(OpcodeID, CodeBlock*, const Instruction*) { return CannotCompile; } > inline CapabilityLevel capabilityLevel(CodeBlock*) { return CannotCompile; } > #endif // ENABLE(DFG_JIT) > >diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp >index ca4b0e253a9f4bda0c3a879764db637b9d1130d1..99a6952cdac0c1d4729b6a56acd24844129136fb 100644 >--- a/Source/JavaScriptCore/dfg/DFGGraph.cpp >+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp >@@ -1661,8 +1661,9 @@ MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* curren > } > } > >- if (node->hasHeapPrediction()) >- return &profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >+ // TODO >+ //if (node->hasHeapPrediction()) >+ //return &profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); > > if (profiledBlock->hasBaselineJITProfiling()) { > if (ArithProfile* result = profiledBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex)) >diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp >index 438f8f21dfcfb9af9ddd6bbd4ca8fb459dad2a5f..74b20710636ca5ec664d0f732296f3b6fc5a748f 100644 >--- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp >+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp >@@ -389,8 +389,9 @@ MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(ExecState* ex > if (UNLIKELY(!vm.ensureStackCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck).offset()]))) > return nullptr; > >- ASSERT(Interpreter::getOpcodeID(exec->codeBlock()->instructions()[exec->bytecodeOffset()].u.opcode) == op_catch); >- ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(exec->codeBlock()->instructions()[exec->bytecodeOffset() + 3].u.pointer); >+ auto instruction = exec->codeBlock()->instructions().at(exec->bytecodeOffset()); >+ ASSERT(instruction->is<OpCatch>()); >+ ValueProfileAndOperandBuffer* buffer = instruction->as<OpCatch>().metadata(exec).buffer; > JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer()); > unsigned index = 0; > buffer->forEach([&] (ValueProfileAndOperand& profile) { >diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp >index b845f4066eef2f04ac9798062e05c3d97b535531..be5da0deab992e1f6b8613cfec39e8bff5a99103 100644 >--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp >+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp >@@ -3811,7 +3811,7 @@ void SpeculativeJIT::compileValueAdd(Node* node) > > CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); > JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile, instruction); > auto repatchingFunction = operationValueAddOptimize; > auto nonRepatchingFunction = operationValueAdd; >@@ -4465,7 +4465,7 @@ void SpeculativeJIT::compileArithSub(Node* node) > > CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); > JITSubIC* subIC = m_jit.codeBlock()->addJITSubIC(arithProfile, instruction); > auto repatchingFunction = operationValueSubOptimize; > auto nonRepatchingFunction = operationValueSub; >@@ -4484,7 +4484,7 @@ void SpeculativeJIT::compileValueNegate(Node* node) > { > CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); > JITNegIC* negIC = m_jit.codeBlock()->addJITNegIC(arithProfile, instruction); > auto repatchingFunction = operationArithNegateOptimize; > auto nonRepatchingFunction = operationArithNegate; >@@ -4826,7 +4826,7 @@ void SpeculativeJIT::compileArithMul(Node* node) > > CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); > JITMulIC* mulIC = m_jit.codeBlock()->addJITMulIC(arithProfile, instruction); > auto repatchingFunction = operationValueMulOptimize; > auto nonRepatchingFunction = operationValueMul; >diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp >index 3ba87e17c20a1cb84f95dbb11080e6d67d6a39b4..ac313dc5417d3366e2849ee46a1cf0c67f57905d 100644 >--- a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp >+++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp >@@ -1846,7 +1846,7 @@ private: > { > CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); > auto repatchingFunction = operationValueAddOptimize; > auto nonRepatchingFunction = operationValueAdd; > compileBinaryMathIC<JITAddGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction); >@@ -1854,7 +1854,7 @@ private: > > template <typename Generator, typename Func1, typename Func2, > typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>> >- void compileUnaryMathIC(ArithProfile* arithProfile, Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) >+ void compileUnaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) > { > Node* node = m_node; > >@@ -1940,7 +1940,7 @@ private: > > template <typename Generator, typename Func1, typename Func2, > typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>> >- void compileBinaryMathIC(ArithProfile* arithProfile, Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) >+ void compileBinaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) > { > Node* node = m_node; > >@@ -2107,7 +2107,7 @@ private: > > CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); > auto repatchingFunction = operationValueSubOptimize; > auto nonRepatchingFunction = operationValueSub; > compileBinaryMathIC<JITSubGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction); >@@ -2203,7 +2203,7 @@ private: > case UntypedUse: { > CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); > auto repatchingFunction = operationValueMulOptimize; > auto nonRepatchingFunction = operationValueMul; > compileBinaryMathIC<JITMulGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction); >@@ -2740,7 +2740,7 @@ private: > DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse); > CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); > ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); >- Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex]; >+ const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); > auto repatchingFunction = operationArithNegateOptimize; > auto nonRepatchingFunction = operationArithNegate; > compileUnaryMathIC<JITNegGenerator>(arithProfile, instruction, repatchingFunction, nonRepatchingFunction); >diff --git a/Source/JavaScriptCore/ftl/FTLOperations.cpp b/Source/JavaScriptCore/ftl/FTLOperations.cpp >index 147bb2c12fb53c4e0b79e5bee5bcae6e1669e14b..caa1890e23b2cadbe8232a9095e7859ac288e70c 100644 >--- a/Source/JavaScriptCore/ftl/FTLOperations.cpp >+++ b/Source/JavaScriptCore/ftl/FTLOperations.cpp >@@ -474,10 +474,10 @@ extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR( > // For now, we use array allocation profile in the actual CodeBlock. It is OK since current NewArrayBuffer > // and PhantomNewArrayBuffer are always bound to a specific op_new_array_buffer. > CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(materialization->origin(), exec->codeBlock()); >- Instruction* currentInstruction = &codeBlock->instructions()[materialization->origin().bytecodeIndex]; >- RELEASE_ASSERT(Interpreter::getOpcodeID(currentInstruction[0].u.opcode) == op_new_array_buffer); >- auto* newArrayBuffer = bitwise_cast<OpNewArrayBuffer*>(currentInstruction); >- ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile; >+ const Instruction* currentInstruction = codeBlock->instructions().at(materialization->origin().bytecodeIndex).ptr(); >+ RELEASE_ASSERT(currentInstruction->is<OpNewArrayBuffer>()); >+ auto newArrayBuffer = currentInstruction->as<OpNewArrayBuffer>(); >+ ArrayAllocationProfile* profile = &newArrayBuffer.metadata(codeBlock).allocationProfile; > > // FIXME: Share the code with CommonSlowPaths. Currently, codeBlock etc. are slightly different. > IndexingType indexingMode = profile->selectIndexingType(); >@@ -495,7 +495,7 @@ extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR( > // We also cannot allocate a new butterfly from compilation threads since it's invalid to allocate cells from > // a compilation thread. > WTF::storeStoreFence(); >- codeBlock->constantRegister(newArrayBuffer->immutableButterfly()).set(vm, codeBlock, immutableButterfly); >+ codeBlock->constantRegister(newArrayBuffer.immutableButterfly.offset()).set(vm, codeBlock, immutableButterfly); > WTF::storeStoreFence(); > } > >diff --git a/Source/JavaScriptCore/generate-bytecode-files b/Source/JavaScriptCore/generate-bytecode-files >deleted file mode 100644 >index fa25fd2ef31be4c1eb3c3a585be529d67cfed6d8..0000000000000000000000000000000000000000 >--- a/Source/JavaScriptCore/generate-bytecode-files >+++ /dev/null >@@ -1,302 +0,0 @@ >-#! /usr/bin/env python >- >-# Copyright (C) 2014-2017 Apple Inc. All rights reserved. >-# >-# Redistribution and use in source and binary forms, with or without >-# modification, are permitted provided that the following conditions >-# are met: >-# >-# 1. Redistributions of source code must retain the above copyright >-# notice, this list of conditions and the following disclaimer. >-# 2. Redistributions in binary form must reproduce the above copyright >-# notice, this list of conditions and the following disclaimer in the >-# documentation and/or other materials provided with the distribution. >-# >-# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >-# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- >-# This tool processes the bytecode list to create Bytecodes.h and InitBytecodes.asm >- >-import hashlib >-import json >-import optparse >-import os >-import re >-import sys >- >-cCopyrightMsg = """/* >-* Copyright (C) 2014 Apple Inc. All rights reserved. >-* >-* Redistribution and use in source and binary forms, with or without >-* modification, are permitted provided that the following conditions >-* are met: >-* >-* 1. Redistributions of source code must retain the above copyright >-* notice, this list of conditions and the following disclaimer. >-* 2. Redistributions in binary form must reproduce the above copyright >-* notice, this list of conditions and the following disclaimer in the >-* documentation and/or other materials provided with the distribution. >-* >-* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >-* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >-* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >-* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >-* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >-* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >-* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >-* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- >-* Autogenerated from %s, do not modify. >-*/ >- >-""" >- >-asmCopyrightMsg = """# Copyright (C) 2014 Apple Inc. All rights reserved. >-# >-# Redistribution and use in source and binary forms, with or without >-# modification, are permitted provided that the following conditions >-# are met: >-# >-# 1. Redistributions of source code must retain the above copyright >-# notice, this list of conditions and the following disclaimer. >-# 2. Redistributions in binary form must reproduce the above copyright >-# notice, this list of conditions and the following disclaimer in the >-# documentation and/or other materials provided with the distribution. >-# >-# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >-# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >- >-# Autogenerated from %s, do not modify. >- >-""" >-def openOrExit(path, mode): >- try: >- return open(path, mode) >- except IOError as e: >- print("I/O error opening {0}, ({1}): {2}".format(path, e.errno, e.strerror)) >- exit(1) >- >-def hashFile(file): >- sha1 = hashlib.sha1() >- file.seek(0) >- for line in file: >- sha1.update(line) >- >- file.seek(0) >- >- return sha1.hexdigest() >- >- >-def toCpp(name): >- camelCase = re.sub(r'([^a-z0-9].)', lambda c: c.group(0)[1].upper(), name) >- CamelCase = camelCase[:1].upper() + camelCase[1:] >- return CamelCase >- >- >-def writeInstructionAccessor(bytecodeHFile, typeName, name): >- bytecodeHFile.write(" {0}& {1}() {{ return *bitwise_cast<{0}*>(&m_{1}); }}\n".format(typeName, name)) >- bytecodeHFile.write(" const {0}& {1}() const {{ return *bitwise_cast<const {0}*>(&m_{1}); }}\n".format(typeName, name)) >- >- >-def writeInstructionMember(bytecodeHFile, typeName, name): >- bytecodeHFile.write(" std::aligned_storage<sizeof({0}), sizeof(Instruction)>::type m_{1};\n".format(typeName, name)) >- bytecodeHFile.write(" static_assert(sizeof({0}) <= sizeof(Instruction), \"Size of {0} shouldn't be bigger than an Instruction.\");\n".format(typeName, name)) >- >-def writeStruct(bytecodeHFile, bytecode): >- bytecodeHFile.write("struct {0} {{\n".format(toCpp(bytecode["name"]))) >- bytecodeHFile.write("public:\n") >- >- writeInstructionAccessor(bytecodeHFile, "Opcode", "opcode") >- for offset in bytecode["offsets"]: >- for name, typeName in offset.iteritems(): >- writeInstructionAccessor(bytecodeHFile, typeName, name) >- >- bytecodeHFile.write("\nprivate:\n") >- bytecodeHFile.write(" friend class LLIntOffsetsExtractor;\n\n") >- >- writeInstructionMember(bytecodeHFile, "Opcode", "opcode") >- for offset in bytecode["offsets"]: >- for name, typeName in offset.iteritems(): >- writeInstructionMember(bytecodeHFile, typeName, name) >- bytecodeHFile.write("};\n\n") >- >- >-if __name__ == "__main__": >- parser = optparse.OptionParser(usage = "usage: %prog [--bytecodes_h <FILE>] [--init_bytecodes_asm <FILE>] <bytecode-json-file>") >- parser.add_option("-b", "--bytecodes_h", dest = "bytecodesHFileName", help = "generate bytecodes macro .h FILE", metavar = "FILE") >- parser.add_option("-s", "--bytecode_structs_h", dest = "bytecodeStructsHFileName", help = "generate bytecodes macro .h FILE", metavar = "FILE") >- parser.add_option("-a", "--init_bytecodes_asm", dest = "initASMFileName", help="generate ASM bytecodes init FILE", metavar = "FILE") >- (options, args) = parser.parse_args() >- >- if len(args) != 1: >- parser.error("missing <bytecode-json-file>") >- >- bytecodeJSONFile = args[0] >- bytecodeFile = openOrExit(bytecodeJSONFile, "rb") >- sha1Hash = hashFile(bytecodeFile) >- >- hFileHashString = "// SHA1Hash: {0}\n".format(sha1Hash) >- asmFileHashString = "# SHA1Hash: {0}\n".format(sha1Hash) >- >- bytecodeHFilename = options.bytecodesHFileName >- bytecodeStructsHFilename = options.bytecodeStructsHFileName >- initASMFileName = options.initASMFileName >- >- if not bytecodeHFilename and not initASMFileName and not bytecodeStructsHFilename: >- parser.print_help() >- exit(0) >- >- needToGenerate = False >- >- if bytecodeHFilename: >- try: >- bytecodeHReadFile = open(bytecodeHFilename, "rb") >- >- hashLine = bytecodeHReadFile.readline() >- if hashLine != hFileHashString: >- needToGenerate = True >- except: >- needToGenerate = True >- else: >- bytecodeHReadFile.close() >- >- if bytecodeStructsHFilename: >- try: >- bytecodeStructsHReadFile = open(bytecodeStructsHFilename, "rb") >- >- hashLine = bytecodeStructsHReadFile.readline() >- if hashLine != hFileHashString: >- needToGenerate = True >- except: >- needToGenerate = True >- else: >- bytecodeStructsHReadFile.close() >- >- if initASMFileName: >- try: >- initBytecodesReadFile = open(initASMFileName, "rb") >- >- hashLine = initBytecodesReadFile.readline() >- if hashLine != asmFileHashString: >- needToGenerate = True >- except: >- needToGenerate = True >- else: >- initBytecodesReadFile.close() >- >- if not needToGenerate: >- exit(0) >- >- if bytecodeHFilename: >- bytecodeHFile = openOrExit(bytecodeHFilename, "wb") >- >- if bytecodeStructsHFilename: >- bytecodeStructsHFile = openOrExit(bytecodeStructsHFilename, "wb") >- >- if initASMFileName: >- initBytecodesFile = openOrExit(initASMFileName, "wb") >- >- try: >- bytecodeSections = json.load(bytecodeFile, encoding = "utf-8") >- except: >- print("Unexpected error parsing {0}: {1}".format(bytecodeJSONFile, sys.exc_info())) >- >- if bytecodeHFilename: >- bytecodeHFile.write(hFileHashString) >- bytecodeHFile.write(cCopyrightMsg % bytecodeJSONFile) >- bytecodeHFile.write("#pragma once\n\n") >- >- if bytecodeStructsHFilename: >- bytecodeStructsHFile.write(hFileHashString) >- bytecodeStructsHFile.write(cCopyrightMsg % bytecodeJSONFile) >- bytecodeStructsHFile.write("#pragma once\n\n") >- bytecodeStructsHFile.write("#include \"Instruction.h\"\n") >- bytecodeStructsHFile.write("\n") >- >- if initASMFileName: >- initBytecodesFile.write(asmFileHashString) >- initBytecodesFile.write(asmCopyrightMsg % bytecodeJSONFile) >- initASMBytecodeNum = 0 >- >- for section in bytecodeSections: >- if bytecodeHFilename and section['emitInHFile']: >- bytecodeHFile.write("#define FOR_EACH_{0}_ID(macro) \\\n".format(section["macroNameComponent"])) >- firstMacro = True >- defaultLength = 1 >- if "defaultLength" in section: >- defaultLength = section["defaultLength"] >- >- bytecodeNum = 0 >- for bytecode in section["bytecodes"]: >- if not firstMacro: >- bytecodeHFile.write(" \\\n") >- >- length = defaultLength >- if "length" in bytecode: >- length = bytecode["length"] >- elif "offsets" in bytecode: >- # Add one for the opcode >- length = len(bytecode["offsets"]) + 1 >- >- bytecodeHFile.write(" macro({0}, {1})".format(bytecode["name"], length)) >- firstMacro = False >- bytecodeNum = bytecodeNum + 1 >- >- bytecodeHFile.write("\n\n") >- bytecodeHFile.write("#define NUMBER_OF_{0}_IDS {1}\n\n".format(section["macroNameComponent"], bytecodeNum)) >- >- >- if bytecodeStructsHFilename and section['emitInStructsFile']: >- bytecodeStructsHFile.write("namespace JSC {\n\n") >- >- for bytecode in section["bytecodes"]: >- if not "offsets" in bytecode: >- continue >- writeStruct(bytecodeStructsHFile, bytecode) >- >- bytecodeStructsHFile.write("} // namespace JSC \n") >- >- if bytecodeHFilename and section['emitOpcodeIDStringValuesInHFile']: >- bytecodeNum = 0 >- for bytecode in section["bytecodes"]: >- bytecodeHFile.write("#define {0}_value_string \"{1}\"\n".format(bytecode["name"], bytecodeNum)) >- firstMacro = False >- bytecodeNum = bytecodeNum + 1 >- >- bytecodeHFile.write("\n") >- >- if initASMFileName and section['emitInASMFile']: >- prefix = "" >- if "asmPrefix" in section: >- prefix = section["asmPrefix"] >- for bytecode in section["bytecodes"]: >- initBytecodesFile.write("setEntryAddress({0}, _{1}{2})\n".format(initASMBytecodeNum, prefix, bytecode["name"])) >- initASMBytecodeNum = initASMBytecodeNum + 1 >- >- if bytecodeHFilename: >- bytecodeHFile.close() >- >- if initASMFileName: >- initBytecodesFile.close() >- >- bytecodeFile.close() >- >- exit(0) >diff --git a/Source/JavaScriptCore/generator/Argument.rb b/Source/JavaScriptCore/generator/Argument.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..0a8e8be3abe201a65944d381b83e473a01521b06 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Argument.rb >@@ -0,0 +1,63 @@ >+require_relative 'Fits' >+ >+class Argument >+ attr_reader :name >+ >+ def initialize(name, type, index) >+ @optional = name[-1] == "?" >+ @name = @optional ? name[0...-1] : name >+ @type = type >+ @index = index >+ end >+ >+ def field >+ "#{@type.to_s} #{@name};" >+ end >+ >+ def create_param >+ "#{@type.to_s} #{@name}" >+ end >+ >+ def fits_check(size) >+ Fits::check size, @name, @type >+ end >+ >+ def fits_write(size) >+ Fits::write size, @name, @type >+ end >+ >+ def assert_fits(size) >+ "ASSERT((#{fits_check size}));" >+ end >+ >+ def load_from_stream(index, size) >+ "#{@name}(#{Fits::convert(size, "stream[#{index+1}]", @type)})" >+ end >+ >+ def setter >+ <<-EOF >+ void set#{capitalized_name}(#{@type.to_s} value) >+ { >+ if (isWide()) >+ set#{capitalized_name}<OpcodeSize::Wide>(value); >+ else >+ set#{capitalized_name}<OpcodeSize::Narrow>(value); >+ } >+ >+ template <OpcodeSize size> >+ void set#{capitalized_name}(#{@type.to_s} value) >+ { >+ auto* stream = reinterpret_cast<typename TypeBySize<size>::type*>(this + #{@index} * size + PaddingBySize<size>::value); >+ *stream = #{Fits::convert "size", "value", @type}; >+ } >+ EOF >+ end >+ >+ def capitalized_name >+ @capitalized_name ||= @name.to_s.split('_').map do |word| >+ letters = word.split('') >+ letters.first.upcase! >+ letters.join >+ end.join >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Assertion.rb b/Source/JavaScriptCore/generator/Assertion.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..a93dd4d9feff9750471fb66d94fd73b2da5faee0 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Assertion.rb >@@ -0,0 +1,9 @@ >+class AssertionError < RuntimeError >+ def initialize(msg) >+ super >+ end >+end >+ >+def assert(msg, &block) >+ raise AssertionError, msg unless yield >+end >diff --git a/Source/JavaScriptCore/generator/DSL.rb b/Source/JavaScriptCore/generator/DSL.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..facfe449c8a31d8508272d7bc7a8df9da43cb1b6 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/DSL.rb >@@ -0,0 +1,126 @@ >+require_relative 'Assertion' >+require_relative 'Section' >+require_relative 'Template' >+require_relative 'Type' >+require_relative 'GeneratedFile' >+ >+module DSL >+ @sections = [] >+ @current_section = nil >+ @context = binding() >+ @namespaces = [] >+ >+ def self.begin_section(name, config={}) >+ assert("must call `end_section` before beginning a new section") { @current_section.nil? } >+ @current_section = Section.new name, config >+ end >+ >+ def self.end_section(name) >+ assert("current section's name is `#{@current_section.name}`, but end_section was called with `#{name}`") { @current_section.name == name } >+ @sections << @current_section >+ @current_section = nil >+ end >+ >+ def self.op(name, config = {}) >+ assert("`op` can only be called in between `begin_section` and `end_section`") { not @current_section.nil? } >+ @current_section.add_opcode(name, config) >+ end >+ >+ def self.op_group(desc, ops, config) >+ assert("`op_group` can only be called in between `begin_section` and `end_section`") { not @current_section.nil? } >+ @current_section.add_opcode_group(desc, ops, config) >+ end >+ >+ def self.types(types) >+ types.map do |type| >+ type = (@namespaces + [type]).join "::" >+ @context.eval("#{type} = Type.new '#{type}'") >+ end >+ end >+ >+ def self.templates(types) >+ types.map do |type| >+ type = (@namespaces + [type]).join "::" >+ @context.eval("#{type} = Template.new '#{type}'") >+ end >+ end >+ >+ def self.namespace(name) >+ @namespaces << name.to_s >+ ctx = @context >+ @context = @context.eval(" >+ module #{name} >+ def self.get_binding >+ binding() >+ end >+ end >+ #{name}.get_binding >+ ") >+ yield >+ @context = ctx >+ @namespaces.pop >+ end >+ >+ def self.run(options) >+ bytecodeListPath = options[:bytecodeList] >+ bytecodeList = File.open(bytecodeListPath) >+ @context.eval(bytecodeList.read, bytecodeListPath) >+ assert("must end last section") { @current_section.nil? } >+ >+ write_bytecodes(bytecodeList, options[:bytecodesFilename]) >+ write_bytecode_structs(bytecodeList, options[:bytecodeStructsFilename]) >+ write_init_asm(bytecodeList, options[:initAsmFilename]) >+ end >+ >+ def self.write_bytecodes(bytecode_list, bytecodes_filename) >+ GeneratedFile::create(bytecodes_filename, bytecode_list) do |template| >+ template.prefix = "#pragma once" >+ num_opcodes = @sections.map(&:opcodes).flatten.size >+ template.body = @sections.map { |s| s.header_helpers(num_opcodes) }.join("\n") >+ end >+ end >+ >+ def self.write_bytecode_structs(bytecode_list, bytecode_structs_filename) >+ GeneratedFile::create(bytecode_structs_filename, bytecode_list) do |template| >+ opcodes = opcodes_for(:emit_in_structs_file) >+ >+ template.prefix = <<-EOF >+ #pragma once >+ >+ #include "ArithProfile.h" >+ #include "BytecodeDumper.h" >+ #include "BytecodeGenerator.h" >+ #include "Fits.h" >+ #include "GetByIdMetadata.h" >+ #include "Instruction.h" >+ #include "Opcode.h" >+ #include "ToThisStatus.h" >+ >+ namespace JSC { >+ EOF >+ >+ template.body = <<-EOF >+ #{opcodes.map(&:cpp_class).join("\n")} >+ >+ #{Opcode.dump_bytecode(opcodes)} >+ EOF >+ >+ template.suffix = "} // namespace JSC" >+ end >+ end >+ >+ def self.write_init_asm(bytecode_list, init_asm_filename) >+ opcodes = opcodes_for(:emit_in_asm_file) >+ >+ GeneratedFile::create(init_asm_filename, bytecode_list) do |template| >+ template.multiline_comment = nil >+ template.line_comment = "#" >+ template.body = (opcodes.map.with_index(&:set_entry_address) + opcodes.map.with_index(&:set_entry_address_wide)) .join("\n") >+ end >+ end >+ >+ def self.opcodes_for(file) >+ sections = @sections.select { |s| s.config[file] } >+ sections.map(&:opcodes).flatten >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Fits.rb b/Source/JavaScriptCore/generator/Fits.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..60a0b47635a66ed6361b36337f93d6fa2d2ca968 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Fits.rb >@@ -0,0 +1,13 @@ >+module Fits >+ def self.convert(size, name, type) >+ "Fits<#{type.to_s}, #{size}>::convert(#{name})" >+ end >+ >+ def self.check(size, name, type) >+ "Fits<#{type.to_s}, #{size}>::check(#{name})" >+ end >+ >+ def self.write(size, name, type) >+ "__generator->write(#{convert(size, name, type)});" >+ end >+end >diff --git a/Source/JavaScriptCore/generator/GeneratedFile.rb b/Source/JavaScriptCore/generator/GeneratedFile.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..69e6657d8f60983c02e759573e2ab4192e03009a >--- /dev/null >+++ b/Source/JavaScriptCore/generator/GeneratedFile.rb >@@ -0,0 +1,79 @@ >+require 'date' >+require 'digest' >+ >+$LICENSE = <<-EOF >+Copyright (C) #{Date.today.year} Apple Inc. All rights reserved. >+ >+Redistribution and use in source and binary forms, with or without >+modification, are permitted provided that the following conditions >+are met: >+ >+1. Redistributions of source code must retain the above copyright >+ notice, this list of conditions and the following disclaimer. >+2. Redistributions in binary form must reproduce the above copyright >+ notice, this list of conditions and the following disclaimer in the >+ documentation and/or other materials provided with the distribution. >+ >+THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY >+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED >+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE >+DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY >+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES >+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; >+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND >+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF >+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+EOF >+ >+module GeneratedFile >+ class Template < Struct.new(:multiline_comment, :line_comment, :prefix, :suffix, :body) >+ def initialize >+ super(["/*", " *", "*/"], "// ", nil, nil, nil) >+ end >+ end >+ >+ def self.create(filename, dependency) >+ template = Template.new >+ yield template >+ >+ file = File.open(filename, "w") >+ self.sha1(file, template, dependency) >+ self.license(file, template, dependency) >+ >+ unless template.prefix.nil? >+ write(file, template.prefix.to_s, "\n") >+ end >+ unless template.body.nil? >+ write(file, template.body.to_s, "\n") >+ end >+ unless template.suffix.nil? >+ write(file, template.suffix.to_s, "\n") >+ end >+ end >+ >+ def self.sha1(file, template, dependency) >+ write(file, template.line_comment, " SHA1Hash: ", Digest::SHA1.hexdigest(dependency.read), "\n") >+ end >+ >+ def self.license(file, template, dependency) >+ unless template.multiline_comment.nil? >+ write(file, template.multiline_comment[0], "\n") >+ end >+ >+ comment = if template.multiline_comment.nil? then template.line_comment else template.multiline_comment[1] end >+ write(file, $LICENSE.strip.split("\n").map { |line| "#{comment} #{line}" }.join("\n"), "\n\n") >+ write(file, comment, " Autogenerated from ", dependency.path, ", do not modify.\n") >+ >+ unless template.multiline_comment.nil? >+ write(file, template.multiline_comment[2], "\n") >+ end >+ >+ write(file, "\n") >+ end >+ >+ def self.write(file, *strings) >+ file.write(strings.map(&:to_s).join) >+ end >+end >+ >diff --git a/Source/JavaScriptCore/generator/Metadata.rb b/Source/JavaScriptCore/generator/Metadata.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..fc7e3f54cbd2ccdd863850114dcdcb900cc2e0bf >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Metadata.rb >@@ -0,0 +1,83 @@ >+require_relative 'Fits' >+ >+class Metadata >+ @@emitter_local = nil >+ >+ def initialize(fields, initializers) >+ @fields = fields >+ @initializers = initializers >+ end >+ >+ def empty? >+ @fields.nil? >+ end >+ >+ def cpp_class(op) >+ return if empty? >+ >+ fields = @fields.map { |field, type| "#{type.to_s} #{field.to_s};" }.join "\n" >+ inits = nil >+ if @initializers && (not @initializers.empty?) >+ inits = ": " + @initializers.map do |metadata, arg| >+ "#{metadata}(__op.#{arg})" >+ end.join(", ") >+ end >+ >+ <<-EOF >+ struct Metadata { >+ Metadata(const #{op.capitalized_name}&#{" __op" if inits}) >+ #{inits} >+ { } >+ >+ #{fields} >+ }; >+ EOF >+ end >+ >+ def accessor >+ return if empty? >+ >+ <<-EOF >+ Metadata& metadata(CodeBlock* codeBlock) const >+ { >+ auto*& it = codeBlock->metadata<Metadata>(opcodeID(), metadataID); >+ if (!it) >+ it = new Metadata { *this }; >+ return *it; >+ } >+ >+ Metadata& metadata(ExecState* exec) const >+ { >+ return metadata(exec->codeBlock()); >+ } >+ EOF >+ end >+ >+ def field >+ return if empty? >+ >+ "unsigned metadataID;" >+ end >+ >+ def load_from_stream(index, size) >+ return if empty? >+ >+ "metadataID(#{Fits::convert(size, "stream[#{index}]", :unsigned)})" >+ end >+ >+ def create_emitter_local >+ return if empty? >+ >+ <<-EOF >+ auto #{emitter_local.name} = __generator->addMetadataFor(opcodeID()); >+ EOF >+ end >+ >+ def emitter_local >+ unless @@emitter_local >+ @@emitter_local = Argument.new("__metadataID", :unsigned, -1) >+ end >+ >+ return @@emitter_local >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Opcode.rb b/Source/JavaScriptCore/generator/Opcode.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..2eab66ca669e5ac850d1a7a6c630611a2ed98fb7 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Opcode.rb >@@ -0,0 +1,198 @@ >+require_relative 'Argument' >+require_relative 'Fits' >+require_relative 'Metadata' >+ >+class Opcode >+ attr_reader :id >+ attr_reader :args >+ attr_reader :metadata >+ >+ module Size >+ Narrow = "OpcodeSize::Narrow" >+ Wide = "OpcodeSize::Wide" >+ end >+ >+ @@id = 0 >+ >+ def self.id >+ tid = @@id >+ @@id = @@id + 1 >+ tid >+ end >+ >+ def initialize(section, name, args, metadata, metadata_initializers) >+ @id = self.class.id >+ @section = section >+ @name = name >+ @metadata = Metadata.new metadata, metadata_initializers >+ @args = args.map.with_index { |(arg_name, type), index| Argument.new arg_name, type, index + 1 } unless args.nil? >+ end >+ >+ def print_args(&block) >+ return if @args.nil? >+ >+ @args.map(&block).join "\n" >+ end >+ >+ def capitalized_name >+ name.split('_').map(&:capitalize).join >+ end >+ >+ def typed_args >+ return if @args.nil? >+ >+ @args.map(&:create_param).unshift("").join(", ") >+ end >+ >+ def map_fields_with_size(size, &block) >+ args = [Argument.new("opcodeID()", :unsigned, 0)] >+ args += @args.dup if @args >+ unless @metadata.empty? >+ args << @metadata.emitter_local >+ end >+ args.map { |arg| block.call(arg, size) } >+ end >+ >+ def cpp_class >+ <<-EOF >+ struct #{capitalized_name} : public Instruction { >+ #{opcodeID} >+ >+ #{emitter} >+ >+ #{dumper} >+ >+ #{constructors} >+ >+ #{setters} >+ >+ #{metadata} >+ >+ #{members} >+ }; >+ EOF >+ end >+ >+ def opcodeID >+ "static constexpr OpcodeID opcodeID() { return static_cast<OpcodeID>(#{@id}); }" >+ end >+ >+ def emitter >+ op_wide = Argument.new("op_wide", :unsigned, 0) >+ <<-EOF >+ static void emit(BytecodeGenerator* __generator#{typed_args}) >+ { >+ __generator->recordOpcode(opcodeID()); >+ #{@metadata.create_emitter_local} >+ if (#{map_fields_with_size(Size::Narrow, &:fits_check).join " && "}) { >+ #{map_fields_with_size(Size::Narrow, &:fits_write).join "\n"} >+ } else { >+ #{op_wide.assert_fits Size::Narrow} >+ #{map_fields_with_size(Size::Wide, &:assert_fits).join "\n"} >+ >+ #{op_wide.fits_write Size::Narrow} >+ #{map_fields_with_size(Size::Wide, &:fits_write).join "\n"} >+ } >+ } >+ EOF >+ end >+ >+ def dumper >+ <<-EOF >+ template<typename Block> >+ void dump(BytecodeDumper<Block>* __dumper, InstructionStream::Offset __location) >+ { >+ __dumper->printLocationAndOp(__location, "#{@name}"); >+ #{print_args { |arg| >+ <<-EOF >+ __dumper->dumpOperand(#{arg.name}); >+ EOF >+ }} >+ } >+ EOF >+ end >+ >+ def constructors >+ fields = (@args || []) + (@metadata.empty? ? [] : [@metadata]) >+ init = ->(size) { fields.empty? ? "" : ": #{fields.map.with_index { |arg, i| arg.load_from_stream(i, size) }.join ",\n" }" } >+ >+ <<-EOF >+ #{capitalized_name}(const uint8_t* stream) >+ #{init.call("OpcodeSize::Narrow")} >+ { ASSERT(stream[0] == opcodeID()); } >+ >+ #{capitalized_name}(const uint32_t* stream) >+ #{init.call("OpcodeSize::Wide")} >+ { ASSERT(stream[0] == opcodeID()); } >+ >+ static #{capitalized_name} decode(const uint8_t* stream) >+ { >+ if (*stream != op_wide) >+ return { stream }; >+ >+ auto wideStream = reinterpret_cast<const uint32_t*>(stream + 1); >+ return { wideStream }; >+ } >+ >+ EOF >+ end >+ >+ def setters >+ print_args(&:setter) >+ end >+ >+ def metadata >+ <<-EOF >+ #{@metadata.cpp_class(self)} >+ >+ #{@metadata.accessor} >+ EOF >+ end >+ >+ def members >+ <<-EOF >+ #{print_args(&:field)} >+ #{@metadata.field} >+ EOF >+ end >+ >+ def set_entry_address(id) >+ "setEntryAddress(#{id}, _#{full_name})" >+ end >+ >+ def set_entry_address_wide(id) >+ "setEntryAddressWide(#{id}, _#{full_name}_wide)" >+ end >+ >+ def full_name >+ "#{@section.config[:asm_prefix]}#{@section.config[:op_prefix]}#{@name}" >+ end >+ >+ def name >+ "#{@section.config[:op_prefix]}#{@name}" >+ end >+ >+ def length >+ 1 + (@args.nil? ? 0 : @args.length) + (@metadata.empty? ? 0 : 1) >+ end >+ >+ def self.dump_bytecode(opcodes) >+ <<-EOF >+ template<typename Block> >+ static void dumpBytecode(BytecodeDumper<Block>* __dumper, InstructionStream::Offset __location, const Instruction* __instruction) >+ { >+ switch (__instruction->opcodeID()) { >+ #{opcodes.map { |op| >+ <<-EOF >+ case #{op.name}: >+ __instruction->as<#{op.capitalized_name}>().dump(__dumper, __location); >+ break; >+ EOF >+ }.join "\n"} >+ default: >+ ASSERT_NOT_REACHED(); >+ } >+ } >+ EOF >+ end >+end >diff --git a/Source/JavaScriptCore/generator/OpcodeGroup.rb b/Source/JavaScriptCore/generator/OpcodeGroup.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..0b7971f9a67ba7c66f9906d77cf98dc6d641035e >--- /dev/null >+++ b/Source/JavaScriptCore/generator/OpcodeGroup.rb >@@ -0,0 +1,14 @@ >+require_relative 'Opcode' >+ >+class OpcodeGroup >+ attr_reader :name >+ attr_reader :opcodes >+ attr_reader :config >+ >+ def initialize(section, desc, opcodes, config) >+ @section = section >+ @name = name >+ @opcodes = opcodes >+ @config = config >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Options.rb b/Source/JavaScriptCore/generator/Options.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..2ca194a17dd25a03dbda9cce58d077eb3270f6da >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Options.rb >@@ -0,0 +1,59 @@ >+require 'optparse' >+ >+$config = { >+ bytecodesFilename: { >+ short: "-b", >+ long: "--bytecodes_h FILE", >+ desc: "generate bytecodes macro .h FILE", >+ }, >+ bytecodeStructsFilename: { >+ short: "-s", >+ long: "--bytecode_structs_h FILE", >+ desc: "generate bytecode structs .h FILE", >+ }, >+ initAsmFilename: { >+ short: "-a", >+ long: "--init_bytecodes_asm FILE", >+ desc: "generate ASM bytecodes init FILE", >+ }, >+}; >+ >+module Options >+ def self.optparser(options) >+ OptionParser.new do |opts| >+ opts.banner = "usage: #{opts.program_name} [options] <bytecode-list-file>" >+ $config.map do |key, option| >+ opts.on(option[:short], option[:long], option[:desc]) do |v| >+ options[key] = v >+ end >+ end >+ end >+ end >+ >+ def self.check(argv, options) >+ missing = $config.keys.select{ |param| options[param].nil? } >+ unless missing.empty? >+ raise OptionParser::MissingArgument.new(missing.join(', ')) >+ end >+ unless argv.length == 1 >+ raise OptionParser::MissingArgument.new("<bytecode-list-file>") >+ end >+ end >+ >+ def self.parse(argv) >+ options = {} >+ parser = optparser(options) >+ >+ begin >+ parser.parse!(argv) >+ check(argv, options) >+ rescue OptionParser::MissingArgument, OptionParser::InvalidOption >+ puts $!.to_s >+ puts parser >+ exit 1 >+ end >+ >+ options[:bytecodeList] = argv[0] >+ options >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Section.rb b/Source/JavaScriptCore/generator/Section.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..7b73a63d77991c64079a2c14982ec718ee961b84 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Section.rb >@@ -0,0 +1,50 @@ >+require_relative 'Opcode' >+require_relative 'OpcodeGroup' >+ >+class Section >+ attr_reader :name >+ attr_reader :config >+ attr_reader :opcodes >+ >+ def initialize(name, config) >+ @name = name >+ @config = config >+ @opcodes = [] >+ @opcode_groups = [] >+ end >+ >+ def add_opcode(name, config) >+ @opcodes << create_opcode(name, config) >+ end >+ >+ def create_opcode(name, config) >+ Opcode.new(self, name, config[:args], config[:metadata], config[:metadata_initializers]) >+ end >+ >+ def add_opcode_group(name, opcodes, config) >+ opcodes = opcodes.map { |opcode| create_opcode(opcode, config) } >+ @opcode_groups << OpcodeGroup.new(self, name, opcodes, config) >+ @opcodes += opcodes >+ end >+ >+ def header_helpers(num_opcodes) >+ out = StringIO.new >+ if config[:emit_in_h_file] >+ out.write("#define FOR_EACH_#{config[:macro_name_component]}_ID(macro) \\\n") >+ opcodes.each { |opcode| out.write("macro(#{opcode.name}, #{opcode.length}) \\\n") } >+ out << "\n" >+ out.write("#define NUMBER_OF_#{config[:macro_name_component]}_IDS #{opcodes.length}\n") >+ end >+ >+ if config[:emit_opcode_id_string_values_in_h_file] >+ out << "\n" >+ opcodes.each { |opcode| >+ out.write("#define #{opcode.name}_value_string \"#{opcode.id}\"\n") >+ } >+ opcodes.each { |opcode| >+ out.write("#define #{opcode.name}_wide_value_string \"#{num_opcodes + opcode.id}\"\n") >+ } >+ end >+ out.string >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Template.rb b/Source/JavaScriptCore/generator/Template.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..a4e429ecbc1fc2956df4142c70fadf8c21fb89a7 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Template.rb >@@ -0,0 +1,7 @@ >+require_relative 'Type' >+ >+class Template < Type >+ def [](*types) >+ Type.new "#{@name}<#{types.map(&:to_s).join ","}>" >+ end >+end >diff --git a/Source/JavaScriptCore/generator/Type.rb b/Source/JavaScriptCore/generator/Type.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..3b148bdcbd8ee70859f05f29bf41edbf1fbec41c >--- /dev/null >+++ b/Source/JavaScriptCore/generator/Type.rb >@@ -0,0 +1,13 @@ >+class Type >+ def initialize(name) >+ @name = name >+ end >+ >+ def * >+ Type.new "#{@name}*" >+ end >+ >+ def to_s >+ @name.to_s >+ end >+end >diff --git a/Source/JavaScriptCore/generator/main.rb b/Source/JavaScriptCore/generator/main.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..dba40135f8f715fd130d30f6eeb5c9cf42ca20dc >--- /dev/null >+++ b/Source/JavaScriptCore/generator/main.rb >@@ -0,0 +1,16 @@ >+require_relative 'DSL' >+require_relative 'Options' >+ >+# for some reason, lower case variables are not accessible until the next invocation of eval >+# so we bind them here, before eval'ing the file >+DSL::types [ >+ :bool, >+ :int, >+ :unsigned, >+ :uintptr_t, >+] >+ >+ >+ >+options = Options::parse(ARGV) >+DSL::run(options) >diff --git a/Source/JavaScriptCore/generator/runtime/Fits.h b/Source/JavaScriptCore/generator/runtime/Fits.h >new file mode 100644 >index 0000000000000000000000000000000000000000..77ef6ad43bca167135a31db6b3cd4cb66120cbad >--- /dev/null >+++ b/Source/JavaScriptCore/generator/runtime/Fits.h >@@ -0,0 +1,256 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#pragma once >+ >+#include "GetPutInfo.h" >+#include "Interpreter.h" >+#include "ProfileTypeBytecodeFlag.h" >+#include "ResultType.h" >+#include "ScopeOffset.h" >+#include "SpecialPointer.h" >+#include "VirtualRegister.h" >+#include <type_traits> >+ >+namespace JSC { >+ >+enum OpcodeSize { >+ Narrow = 1, >+ Wide = 4, >+}; >+ >+template<OpcodeSize> >+struct TypeBySize; >+ >+template<> >+struct TypeBySize<OpcodeSize::Narrow> { >+ using type = uint8_t; >+}; >+ >+template<> >+struct TypeBySize<OpcodeSize::Wide> { >+ using type = uint32_t; >+}; >+ >+template<OpcodeSize> >+struct PaddingBySize; >+ >+template<> >+struct PaddingBySize<OpcodeSize::Narrow> { >+ static constexpr uint8_t value = 0; >+}; >+ >+template<> >+struct PaddingBySize<OpcodeSize::Wide> { >+ static constexpr uint8_t value = 1; >+}; >+ >+// Fits template >+template<typename, OpcodeSize, typename = std::true_type> >+struct Fits; >+ >+// Implicit conversion for types of the same size >+template<typename T, OpcodeSize size> >+struct Fits<T, size, std::enable_if_t<sizeof(T) == size, std::true_type>> { >+ static bool check(T) { return true; } >+ >+ static typename TypeBySize<size>::type convert(T t) { return *reinterpret_cast<typename TypeBySize<size>::type*>(&t); } >+ >+ template<class T1 = T, OpcodeSize size1 = size, typename = std::enable_if_t<!std::is_same<T1, typename TypeBySize<size1>::type>::value, std::true_type>> >+ static T1 convert(typename TypeBySize<size1>::type t) { return *reinterpret_cast<T1*>(&t); } >+}; >+ >+template<typename T, OpcodeSize size> >+struct Fits<T, size, std::enable_if_t<sizeof(T) < size, std::true_type>> { >+ static bool check(T) { return true; } >+ >+ static typename TypeBySize<size>::type convert(T t) { return static_cast<typename TypeBySize<size>::type>(t); } >+ >+ template<class T1 = T, OpcodeSize size1 = size, typename = std::enable_if_t<!std::is_same<T1, typename TypeBySize<size1>::type>::value, std::true_type>> >+ static T1 convert(typename TypeBySize<size1>::type t) { return static_cast<T1>(t); } >+}; >+ >+template<> >+struct Fits<uint32_t, OpcodeSize::Narrow> { >+ static bool check(unsigned u) { return u <= UINT8_MAX; } >+ >+ static uint8_t convert(unsigned u) >+ { >+ assert(check(u)); >+ return static_cast<uint8_t>(u); >+ } >+ static unsigned convert(uint8_t u) >+ { >+ return u; >+ } >+}; >+ >+template<> >+struct Fits<int, OpcodeSize::Narrow> { >+ static bool check(int i) >+ { >+ return i >= INT8_MIN && i <= INT8_MAX; >+ } >+ >+ static uint8_t convert(int i) >+ { >+ return static_cast<uint8_t>(i); >+ } >+ >+ static int convert(uint8_t i) >+ { >+ return static_cast<int8_t>(i); >+ } >+}; >+ >+template<> >+struct Fits<VirtualRegister, OpcodeSize::Narrow> : public Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(const VirtualRegister& r) { return Base::check(r.offset()); } >+ static uint8_t convert(const VirtualRegister& r) >+ { >+ return Base::convert(r.offset()); >+ } >+ static VirtualRegister convert(uint8_t i) >+ { >+ return VirtualRegister { Base::convert(i) }; >+ } >+}; >+ >+template<> >+struct Fits<Special::Pointer, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(Special::Pointer sp) { return Base::check(static_cast<int>(sp)); } >+ static uint8_t convert(Special::Pointer sp) >+ { >+ return Base::convert(static_cast<int>(sp)); >+ } >+ static Special::Pointer convert(uint8_t sp) >+ { >+ return static_cast<Special::Pointer>(Base::convert(sp)); >+ } >+}; >+ >+template<> >+struct Fits<ScopeOffset, OpcodeSize::Narrow> : Fits<unsigned, OpcodeSize::Narrow> { >+ using Base = Fits<unsigned, OpcodeSize::Narrow>; >+ static bool check(ScopeOffset so) { return Base::check(so.offsetUnchecked()); } >+ static uint8_t convert(ScopeOffset so) >+ { >+ return Base::convert(so.offsetUnchecked()); >+ } >+ static ScopeOffset convert(uint8_t so) >+ { >+ return ScopeOffset { Base::convert(so) }; >+ } >+}; >+ >+template<> >+struct Fits<GetPutInfo, OpcodeSize::Narrow> : Fits<unsigned, OpcodeSize::Narrow> { >+ using Base = Fits<unsigned, OpcodeSize::Narrow>; >+ static bool check(GetPutInfo gpi) { return Base::check(gpi.operand()); } >+ static uint8_t convert(GetPutInfo gpi) >+ { >+ return Base::convert(gpi.operand()); >+ } >+ static GetPutInfo convert(uint8_t gpi) >+ { >+ return GetPutInfo { Base::convert(gpi) }; >+ } >+}; >+ >+template<> >+struct Fits<DebugHookType, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(DebugHookType dht) { return Base::check(static_cast<int>(dht)); } >+ static uint8_t convert(DebugHookType dht) >+ { >+ return Base::convert(static_cast<int>(dht)); >+ } >+ static DebugHookType convert(uint8_t dht) >+ { >+ return static_cast<DebugHookType>(Base::convert(dht)); >+ } >+}; >+ >+template<> >+struct Fits<ProfileTypeBytecodeFlag, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(ProfileTypeBytecodeFlag ptbf) { return Base::check(static_cast<int>(ptbf)); } >+ static uint8_t convert(ProfileTypeBytecodeFlag ptbf) >+ { >+ return Base::convert(static_cast<int>(ptbf)); >+ } >+ static ProfileTypeBytecodeFlag convert(uint8_t ptbf) >+ { >+ return static_cast<ProfileTypeBytecodeFlag>(Base::convert(ptbf)); >+ } >+}; >+ >+template<> >+struct Fits<ResolveType, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(ResolveType rt) { return Base::check(static_cast<int>(rt)); } >+ static uint8_t convert(ResolveType rt) >+ { >+ return Base::convert(static_cast<int>(rt)); >+ } >+ >+ static ResolveType convert(uint8_t rt) >+ { >+ return static_cast<ResolveType>(Base::convert(rt)); >+ } >+}; >+ >+template<> >+struct Fits<OperandTypes, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(OperandTypes types) { return Base::check(types.toInt()); } >+ static uint8_t convert(OperandTypes types) >+ { >+ return Base::convert(types.toInt()); >+ } >+ static OperandTypes convert(uint8_t types) >+ { >+ return OperandTypes::fromInt(Base::convert(types)); >+ } >+}; >+ >+template<> >+struct Fits<PutByIdFlags, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >+ using Base = Fits<int, OpcodeSize::Narrow>; >+ static bool check(PutByIdFlags flags) { return Base::check(static_cast<int>(flags)); } >+ static uint8_t convert(PutByIdFlags flags) >+ { >+ return Base::convert(static_cast<int>(flags)); >+ } >+ >+ static PutByIdFlags convert(uint8_t flags) >+ { >+ return static_cast<PutByIdFlags>(Base::convert(flags)); >+ } >+}; >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/generator/runtime/Instruction.h b/Source/JavaScriptCore/generator/runtime/Instruction.h >new file mode 100644 >index 0000000000000000000000000000000000000000..e86b68db63e7e2dcf01a384c0bd86886d42eee52 >--- /dev/null >+++ b/Source/JavaScriptCore/generator/runtime/Instruction.h >@@ -0,0 +1,114 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+ * THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#pragma once >+ >+#include "Fits.h" >+#include "Opcode.h" >+ >+namespace JSC { >+ >+struct Instruction { >+protected: >+ Instruction() >+ { } >+ >+private: >+ template<OpcodeSize Width> >+ class Impl { >+ public: >+ OpcodeID opcodeID() const { return static_cast<OpcodeID>(m_opcode); } >+ >+ private: >+ typename TypeBySize<Width>::type m_opcode; >+ }; >+ >+public: >+ OpcodeID opcodeID() const >+ { >+ if (isWide()) >+ return wide()->opcodeID(); >+ return narrow()->opcodeID(); >+ } >+ >+ const char* name() const >+ { >+ return opcodeNames[opcodeID()]; >+ } >+ >+ bool isWide() const >+ { >+ return narrow()->opcodeID() == op_wide; >+ } >+ >+ size_t size() const >+ { >+ auto wide = isWide(); >+ auto padding = wide ? 1 : 0; >+ auto size = wide ? 4 : 1; >+ return opcodeLengths[opcodeID()] * size + padding; >+ } >+ >+ template<class T> >+ bool is() const >+ { >+ return opcodeID() == T::opcodeID(); >+ } >+ >+ template<class T> >+ T as() const >+ { >+ ASSERT(is<T>()); >+ return T::decode(reinterpret_cast<const uint8_t*>(this)); >+ } >+ >+ template<class T> >+ T* cast() >+ { >+ ASSERT(is<T>()); >+ return reinterpret_cast<T*>(this); >+ } >+ >+ template<class T> >+ const T* cast() const >+ { >+ ASSERT(is<T>()); >+ return reinterpret_cast<const T*>(this); >+ } >+ >+ const Impl<OpcodeSize::Narrow>* narrow() const >+ { >+ return reinterpret_cast<const Impl<OpcodeSize::Narrow>*>(this); >+ } >+ >+ const Impl<OpcodeSize::Wide>* wide() const >+ { >+ >+ ASSERT(isWide()); >+ return reinterpret_cast<const Impl<OpcodeSize::Wide>*>((uintptr_t)this + 1); >+ } >+}; >+ >+} // namespace JSC >diff --git a/Source/JavaScriptCore/interpreter/AbstractPC.h b/Source/JavaScriptCore/interpreter/AbstractPC.h >index 877d3d04ac5e4fc76c6343f34c9136937f4f6492..4832d6de8253856cdb0e5125439a9f97946d8f68 100644 >--- a/Source/JavaScriptCore/interpreter/AbstractPC.h >+++ b/Source/JavaScriptCore/interpreter/AbstractPC.h >@@ -62,7 +62,7 @@ public: > > private: > #if ENABLE(JIT) >- void* m_pointer { nullptr }; >+ const void* m_pointer { nullptr }; > #endif > > enum Mode { None, JIT, Interpreter }; >diff --git a/Source/JavaScriptCore/interpreter/CallFrame.cpp b/Source/JavaScriptCore/interpreter/CallFrame.cpp >index 6325dc27c51f62e01420e54bd8774f7b56386dbd..b9f6b49d197b3582e3e646e1ce759ee001249b2f 100644 >--- a/Source/JavaScriptCore/interpreter/CallFrame.cpp >+++ b/Source/JavaScriptCore/interpreter/CallFrame.cpp >@@ -106,12 +106,12 @@ SUPPRESS_ASAN CallSiteIndex CallFrame::unsafeCallSiteIndex() const > } > > #if USE(JSVALUE32_64) >-Instruction* CallFrame::currentVPC() const >+const Instruction* CallFrame::currentVPC() const > { > return bitwise_cast<Instruction*>(callSiteIndex().bits()); > } > >-void CallFrame::setCurrentVPC(Instruction* vpc) >+void CallFrame::setCurrentVPC(const Instruction* vpc) > { > CallSiteIndex callSite(vpc); > this[CallFrameSlot::argumentCount].tag() = callSite.bits(); >@@ -125,13 +125,13 @@ unsigned CallFrame::callSiteBitsAsBytecodeOffset() const > } > > #else // USE(JSVALUE32_64) >-Instruction* CallFrame::currentVPC() const >+const Instruction* CallFrame::currentVPC() const > { > ASSERT(callSiteBitsAreBytecodeOffset()); >- return &codeBlock()->instructions()[callSiteBitsAsBytecodeOffset()]; >+ return codeBlock()->instructions().at(callSiteBitsAsBytecodeOffset()).ptr(); > } > >-void CallFrame::setCurrentVPC(Instruction* vpc) >+void CallFrame::setCurrentVPC(const Instruction* vpc) > { > CallSiteIndex callSite(codeBlock()->bytecodeOffset(vpc)); > this[CallFrameSlot::argumentCount].tag() = static_cast<int32_t>(callSite.bits()); >diff --git a/Source/JavaScriptCore/interpreter/CallFrame.h b/Source/JavaScriptCore/interpreter/CallFrame.h >index 6d5d72379b7954b582bcf87edf23616f5c337d0f..7e3c1b89ef4490aa5b1fb406dd1a212debe9fa6e 100644 >--- a/Source/JavaScriptCore/interpreter/CallFrame.h >+++ b/Source/JavaScriptCore/interpreter/CallFrame.h >@@ -53,7 +53,7 @@ namespace JSC { > : m_bits(bits) > { } > #if USE(JSVALUE32_64) >- explicit CallSiteIndex(Instruction* instruction) >+ explicit CallSiteIndex(const Instruction* instruction) > : m_bits(bitwise_cast<uint32_t>(instruction)) > { } > #endif >@@ -69,7 +69,7 @@ namespace JSC { > > struct CallerFrameAndPC { > CallFrame* callerFrame; >- Instruction* pc; >+ const Instruction* pc; > static const int sizeInRegisters = 2 * sizeof(void*) / sizeof(Register); > }; > static_assert(CallerFrameAndPC::sizeInRegisters == sizeof(CallerFrameAndPC) / sizeof(Register), "CallerFrameAndPC::sizeInRegisters is incorrect."); >@@ -187,8 +187,8 @@ namespace JSC { > return topOfFrameInternal(); > } > >- Instruction* currentVPC() const; // This only makes sense in the LLInt and baseline. >- void setCurrentVPC(Instruction* vpc); >+ const Instruction* currentVPC() const; // This only makes sense in the LLInt and baseline. >+ void setCurrentVPC(const Instruction* vpc); > > void setCallerFrame(CallFrame* frame) { callerFrameAndPC().callerFrame = frame; } > void setScope(int scopeRegisterOffset, JSScope* scope) { static_cast<Register*>(this)[scopeRegisterOffset] = scope; } >@@ -260,7 +260,7 @@ namespace JSC { > void setArgumentCountIncludingThis(int count) { static_cast<Register*>(this)[CallFrameSlot::argumentCount].payload() = count; } > void setCallee(JSObject* callee) { static_cast<Register*>(this)[CallFrameSlot::callee] = callee; } > void setCodeBlock(CodeBlock* codeBlock) { static_cast<Register*>(this)[CallFrameSlot::codeBlock] = codeBlock; } >- void setReturnPC(void* value) { callerFrameAndPC().pc = reinterpret_cast<Instruction*>(value); } >+ void setReturnPC(void* value) { callerFrameAndPC().pc = reinterpret_cast<const Instruction*>(value); } > > String friendlyFunctionName(); > >diff --git a/Source/JavaScriptCore/interpreter/Interpreter.cpp b/Source/JavaScriptCore/interpreter/Interpreter.cpp >index e8e0b50a391674a61e60aa8e31d4f39b49135f11..098a31bd155cc2d91a818bb3db4f4c0a0537a1e4 100644 >--- a/Source/JavaScriptCore/interpreter/Interpreter.cpp >+++ b/Source/JavaScriptCore/interpreter/Interpreter.cpp >@@ -1361,3 +1361,34 @@ NEVER_INLINE void Interpreter::debug(CallFrame* callFrame, DebugHookType debugHo > } > > } // namespace JSC >+ >+namespace WTF { >+ >+void printInternal(PrintStream& out, JSC::DebugHookType type) >+{ >+ switch (type) { >+ case JSC::WillExecuteProgram: >+ out.print("WillExecuteProgram"); >+ return; >+ case JSC::DidExecuteProgram: >+ out.print("DidExecuteProgram"); >+ return; >+ case JSC::DidEnterCallFrame: >+ out.print("DidEnterCallFrame"); >+ return; >+ case JSC::DidReachBreakpoint: >+ out.print("DidReachBreakpoint"); >+ return; >+ case JSC::WillLeaveCallFrame: >+ out.print("WillLeaveCallFrame"); >+ return; >+ case JSC::WillExecuteStatement: >+ out.print("WillExecuteStatement"); >+ return; >+ case JSC::WillExecuteExpression: >+ out.print("WillExecuteExpression"); >+ return; >+ } >+} >+ >+} //namespace WTF >diff --git a/Source/JavaScriptCore/interpreter/Interpreter.h b/Source/JavaScriptCore/interpreter/Interpreter.h >index 49227ebe515663ffde03c9e0a3fcb64967a0f568..33ea3336a6c2cefd98acda6470bbad9a3750c6cd 100644 >--- a/Source/JavaScriptCore/interpreter/Interpreter.h >+++ b/Source/JavaScriptCore/interpreter/Interpreter.h >@@ -62,7 +62,6 @@ namespace JSC { > struct HandlerInfo; > struct Instruction; > struct ProtoCallFrame; >- struct UnlinkedInstruction; > > enum UnwindStart : uint8_t { UnwindFromCurrentFrame, UnwindFromCallerFrame }; > >@@ -102,8 +101,7 @@ namespace JSC { > static inline Opcode getOpcode(OpcodeID); > > static inline OpcodeID getOpcodeID(Opcode); >- static inline OpcodeID getOpcodeID(const Instruction&); >- static inline OpcodeID getOpcodeID(const UnlinkedInstruction&); >+ static inline OpcodeID getOpcodeID(OpcodeID); > > #if !ASSERT_DISABLED > static bool isOpcode(Opcode); >@@ -194,3 +192,11 @@ namespace JSC { > void setupForwardArgumentsFrameAndSetThis(CallFrame* execCaller, CallFrame* execCallee, JSValue thisValue, uint32_t length); > > } // namespace JSC >+ >+namespace WTF { >+ >+class PrintStream; >+ >+void printInternal(PrintStream&, JSC::DebugHookType); >+ >+} //namespace WTF >diff --git a/Source/JavaScriptCore/interpreter/InterpreterInlines.h b/Source/JavaScriptCore/interpreter/InterpreterInlines.h >index fc89a189d6057d8e4e0ab10a8791f856b49f9071..b9f82c92c8230887a84be9042626982ff7279d42 100644 >--- a/Source/JavaScriptCore/interpreter/InterpreterInlines.h >+++ b/Source/JavaScriptCore/interpreter/InterpreterInlines.h >@@ -63,14 +63,9 @@ inline OpcodeID Interpreter::getOpcodeID(Opcode opcode) > #endif > } > >-inline OpcodeID Interpreter::getOpcodeID(const Instruction& instruction) >+inline OpcodeID Interpreter::getOpcodeID(OpcodeID opcode) > { >- return getOpcodeID(instruction.u.opcode); >-} >- >-inline OpcodeID Interpreter::getOpcodeID(const UnlinkedInstruction& instruction) >-{ >- return instruction.u.opcode; >+ return opcode; > } > > ALWAYS_INLINE JSValue Interpreter::execute(CallFrameClosure& closure) >diff --git a/Source/JavaScriptCore/interpreter/StackVisitor.cpp b/Source/JavaScriptCore/interpreter/StackVisitor.cpp >index cae3d9a442ecfbed489626f125454eda8f9f092f..615ece7a59dbdca42a598185765bb690ede78280 100644 >--- a/Source/JavaScriptCore/interpreter/StackVisitor.cpp >+++ b/Source/JavaScriptCore/interpreter/StackVisitor.cpp >@@ -443,7 +443,7 @@ void StackVisitor::Frame::dump(PrintStream& out, Indenter indent, WTF::Function< > > CallFrame* callFrame = m_callFrame; > CallFrame* callerFrame = this->callerFrame(); >- void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : nullptr; >+ const void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : nullptr; > > out.print(indent, "name: ", functionName(), "\n"); > out.print(indent, "sourceURL: ", sourceURL(), "\n"); >diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp >index bb635db256803cf15cf80ad4ada06402aa27b9ed..cf84e82e879f0849724e42ba85ae03e941d7b417 100644 >--- a/Source/JavaScriptCore/jit/JIT.cpp >+++ b/Source/JavaScriptCore/jit/JIT.cpp >@@ -76,6 +76,7 @@ void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr<CFu > JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) > : JSInterfaceJIT(vm, codeBlock) > , m_interpreter(vm->interpreter) >+ , m_instructions(codeBlock->instructions()) > , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) > , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) > , m_pcToCodeOriginMapBuilder(*vm) >@@ -169,7 +170,7 @@ void JIT::assertStackPointerOffset() > NEXT_OPCODE(op_##name); \ > } > >-void JIT::emitSlowCaseCall(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, SlowPathFunction stub) >+void JIT::emitSlowCaseCall(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, SlowPathFunction stub) > { > linkAllSlowCases(iter); > >@@ -185,7 +186,7 @@ void JIT::privateCompileMainPass() > jitAssertTagsInPlace(); > jitAssertArgumentCountSane(); > >- Instruction* instructionsBegin = m_codeBlock->instructions().begin(); >+ auto& instructions = m_codeBlock->instructions(); > unsigned instructionCount = m_instructions.size(); > > m_callLinkInfoIndex = 0; >@@ -221,12 +222,11 @@ void JIT::privateCompileMainPass() > // Also add catch blocks for bytecodes that throw. > if (m_codeBlock->numberOfExceptionHandlers()) { > for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { >- OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); >+ auto instruction = instructions.at(bytecodeOffset); > if (auto* handler = m_codeBlock->handlerForBytecodeOffset(bytecodeOffset)) > worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target)); > >- unsigned opcodeLength = opcodeLengths[opcodeID]; >- bytecodeOffset += opcodeLength; >+ bytecodeOffset += instruction->size(); > } > } > } >@@ -242,8 +242,9 @@ void JIT::privateCompileMainPass() > > if (m_disassembler) > m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); >- Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; >- ASSERT_WITH_MESSAGE(Interpreter::isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); >+ const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr(); >+ // TODO >+ // ASSERT_WITH_MESSAGE(Interpreter::isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); > > m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); > >@@ -257,7 +258,7 @@ void JIT::privateCompileMainPass() > if (JITInternal::verbose) > dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); > >- OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode); >+ OpcodeID opcodeID = currentInstruction->opcodeID(); > > if (UNLIKELY(m_compilation)) { > add64( >@@ -337,9 +338,6 @@ void JIT::privateCompileMainPass() > DEFINE_OP(op_beloweq) > DEFINE_OP(op_try_get_by_id) > DEFINE_OP(op_in_by_id) >- case op_get_array_length: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: > DEFINE_OP(op_get_by_id) > DEFINE_OP(op_get_by_id_with_this) > DEFINE_OP(op_get_by_id_direct) >@@ -469,8 +467,6 @@ void JIT::privateCompileLinkPass() > > void JIT::privateCompileSlowCases() > { >- Instruction* instructionsBegin = m_codeBlock->instructions().begin(); >- > m_getByIdIndex = 0; > m_getByIdWithThisIndex = 0; > m_putByIdIndex = 0; >@@ -494,7 +490,7 @@ void JIT::privateCompileSlowCases() > > unsigned firstTo = m_bytecodeOffset; > >- Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeOffset).ptr(); > > RareCaseProfile* rareCaseProfile = 0; > if (shouldEmitProfiling()) >@@ -506,7 +502,7 @@ void JIT::privateCompileSlowCases() > if (m_disassembler) > m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label()); > >- switch (Interpreter::getOpcodeID(currentInstruction->u.opcode)) { >+ switch (currentInstruction->opcodeID()) { > DEFINE_SLOWCASE_OP(op_add) > DEFINE_SLOWCASE_OP(op_call) > DEFINE_SLOWCASE_OP(op_tail_call) >@@ -519,9 +515,6 @@ void JIT::privateCompileSlowCases() > DEFINE_SLOWCASE_OP(op_eq) > DEFINE_SLOWCASE_OP(op_try_get_by_id) > DEFINE_SLOWCASE_OP(op_in_by_id) >- case op_get_array_length: >- case op_get_by_id_proto_load: >- case op_get_by_id_unset: > DEFINE_SLOWCASE_OP(op_get_by_id) > DEFINE_SLOWCASE_OP(op_get_by_id_with_this) > DEFINE_SLOWCASE_OP(op_get_by_id_direct) >@@ -615,11 +608,6 @@ void JIT::compileWithoutLinking(JITCompilationEffort effort) > if (UNLIKELY(computeCompileTimes())) > before = MonotonicTime::now(); > >- { >- ConcurrentJSLocker locker(m_codeBlock->m_lock); >- m_instructions = m_codeBlock->instructions().clone(); >- } >- > DFG::CapabilityLevel level = m_codeBlock->capabilityLevel(); > switch (level) { > case DFG::CannotCompile: >diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h >index 593d5bf38be7f86a2ee275cac2a1846a974526c7..fca9a1e6f083d92c384fd1b0b6ff4e9177aedc79 100644 >--- a/Source/JavaScriptCore/jit/JIT.h >+++ b/Source/JavaScriptCore/jit/JIT.h >@@ -42,6 +42,7 @@ > #include "JITDisassembler.h" > #include "JITInlineCacheGenerator.h" > #include "JITMathIC.h" >+#include "JITRightShiftGenerator.h" > #include "JSInterfaceJIT.h" > #include "PCToCodeOriginMap.h" > #include "UnusedPointer.h" >@@ -308,16 +309,35 @@ namespace JSC { > void addJump(const JumpList&, int); > void emitJumpSlowToHot(Jump, int); > >- void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex); >- void compileOpCallSlowCase(OpcodeID, Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex); >- void compileSetupVarargsFrame(OpcodeID, Instruction*, CallLinkInfo*); >- void compileCallEval(Instruction*); >- void compileCallEvalSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitPutCallResult(Instruction*); >+ template<typename Op> >+ void compileOpCall(const Instruction*, unsigned callLinkInfoIndex); >+ template<typename Op> >+ void compileOpCallSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex); >+ template<typename Op> >+ std::enable_if_t< >+ Op::opcodeID() != op_call_varargs && Op::opcodeID() != op_construct_varargs && >+ Op::opcodeID() != op_tail_call_varargs && Op::opcodeID() != op_tail_call_forward_arguments >+ , void> compileSetupFrame(const Op&, CallLinkInfo*); >+ >+ template<typename Op> >+ std::enable_if_t< >+ Op::opcodeID() == op_call_varargs || Op::opcodeID() == op_construct_varargs || >+ Op::opcodeID() == op_tail_call_varargs || Op::opcodeID() == op_tail_call_forward_arguments >+ , void> compileSetupFrame(const Op&, CallLinkInfo*); >+ >+ template<typename Op> >+ bool compileTailCall(const Op&, CallLinkInfo*, unsigned callLinkInfoIndex); >+ template<typename Op> >+ bool compileCallEval(const Op&); >+ void compileCallEvalSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ template<typename Op> >+ void emitPutCallResult(const Op&); > > enum class CompileOpStrictEqType { StrictEq, NStrictEq }; >- void compileOpStrictEq(Instruction*, CompileOpStrictEqType); >- void compileOpStrictEqJump(Instruction*, CompileOpStrictEqType); >+ template<typename Op> >+ void compileOpStrictEq(const Instruction*, CompileOpStrictEqType); >+ template<typename Op> >+ void compileOpStrictEqJump(const Instruction*, CompileOpStrictEqType); > enum class CompileOpEqType { Eq, NEq }; > void compileOpEqJumpSlow(Vector<SlowCaseEntry>::iterator&, CompileOpEqType, int jumpTarget); > bool isOperandConstantDouble(int src); >@@ -335,8 +355,8 @@ namespace JSC { > // This assumes that the value to profile is in regT0 and that regT3 is available for > // scratch. > void emitValueProfilingSite(ValueProfile&); >- void emitValueProfilingSite(unsigned bytecodeOffset); >- void emitValueProfilingSite(); >+ //template<typename Op> void emitValueProfilingSite(Op); >+ template<typename Metadata> void emitValueProfilingSite(Metadata&); > void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*); > void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex); > void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*); >@@ -348,47 +368,47 @@ namespace JSC { > // Property is int-checked and zero extended. Base is cell checked. > // Structure is already profiled. Returns the slow cases. Fall-through > // case contains result in regT0, and it is not yet profiled. >- JumpList emitInt32Load(Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); } >- JumpList emitDoubleLoad(Instruction*, PatchableJump& badType); >- JumpList emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); >- JumpList emitArrayStorageLoad(Instruction*, PatchableJump& badType); >- JumpList emitLoadForArrayMode(Instruction*, JITArrayMode, PatchableJump& badType); >- >- JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); } >- JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType); >- JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); >- JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType); >- JumpList emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType); >- JumpList emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType); >- JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType); >- JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType); >+ JumpList emitInt32Load(const Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); } >+ JumpList emitDoubleLoad(const Instruction*, PatchableJump& badType); >+ JumpList emitContiguousLoad(const Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); >+ JumpList emitArrayStorageLoad(const Instruction*, PatchableJump& badType); >+ JumpList emitLoadForArrayMode(const Instruction*, JITArrayMode, PatchableJump& badType); >+ >+ JumpList emitInt32GetByVal(const Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); } >+ JumpList emitDoubleGetByVal(const Instruction*, PatchableJump& badType); >+ JumpList emitContiguousGetByVal(const Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); >+ JumpList emitArrayStorageGetByVal(const Instruction*, PatchableJump& badType); >+ JumpList emitDirectArgumentsGetByVal(const Instruction*, PatchableJump& badType); >+ JumpList emitScopedArgumentsGetByVal(const Instruction*, PatchableJump& badType); >+ JumpList emitIntTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType); >+ JumpList emitFloatTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType); > > // Property is in regT1, base is in regT0. regT2 contains indecing type. > // The value to store is not yet loaded. Property is int-checked and > // zero-extended. Base is cell checked. Structure is already profiled. > // returns the slow cases. >- JumpList emitInt32PutByVal(Instruction* currentInstruction, PatchableJump& badType) >+ JumpList emitInt32PutByVal(OpPutByVal bytecode, PatchableJump& badType) > { >- return emitGenericContiguousPutByVal(currentInstruction, badType, Int32Shape); >+ return emitGenericContiguousPutByVal(bytecode, badType, Int32Shape); > } >- JumpList emitDoublePutByVal(Instruction* currentInstruction, PatchableJump& badType) >+ JumpList emitDoublePutByVal(OpPutByVal bytecode, PatchableJump& badType) > { >- return emitGenericContiguousPutByVal(currentInstruction, badType, DoubleShape); >+ return emitGenericContiguousPutByVal(bytecode, badType, DoubleShape); > } >- JumpList emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType) >+ JumpList emitContiguousPutByVal(OpPutByVal bytecode, PatchableJump& badType) > { >- return emitGenericContiguousPutByVal(currentInstruction, badType); >+ return emitGenericContiguousPutByVal(bytecode, badType); > } >- JumpList emitGenericContiguousPutByVal(Instruction*, PatchableJump& badType, IndexingType indexingShape = ContiguousShape); >- JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType); >- JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); >- JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); >+ JumpList emitGenericContiguousPutByVal(OpPutByVal, PatchableJump& badType, IndexingType indexingShape = ContiguousShape); >+ JumpList emitArrayStoragePutByVal(OpPutByVal, PatchableJump& badType); >+ JumpList emitIntTypedArrayPutByVal(OpPutByVal, PatchableJump& badType, TypedArrayType); >+ JumpList emitFloatTypedArrayPutByVal(OpPutByVal, PatchableJump& badType, TypedArrayType); > > // Identifier check helper for GetByVal and PutByVal. > void emitByValIdentifierCheck(ByValInfo*, RegisterID cell, RegisterID scratch, const Identifier&, JumpList& slowCases); > >- JITGetByIdGenerator emitGetByValWithCachedId(ByValInfo*, Instruction*, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases); >- JITPutByIdGenerator emitPutByValWithCachedId(ByValInfo*, Instruction*, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases); >+ JITGetByIdGenerator emitGetByValWithCachedId(ByValInfo*, OpGetByVal, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases); >+ JITPutByIdGenerator emitPutByValWithCachedId(ByValInfo*, OpPutByVal, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases); > > enum FinalObjectMode { MayBeFinal, KnownNotFinal }; > >@@ -454,206 +474,210 @@ namespace JSC { > > #endif // USE(JSVALUE32_64) > >- void emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition); >- void emit_compareUnsigned(int dst, int op1, int op2, RelationalCondition); >- void emit_compareUnsignedAndJump(int op1, int op2, unsigned target, RelationalCondition); >- void emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&); >+ template<typename Op> >+ void emit_compareAndJump(const Instruction*, RelationalCondition); >+ template<typename Op> >+ void emit_compareUnsigned(const Instruction*, RelationalCondition); >+ template<typename Op> >+ void emit_compareUnsignedAndJump(const Instruction*, RelationalCondition); >+ template<typename Op> >+ void emit_compareAndJumpSlow(const Instruction*, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&); > > void assertStackPointerOffset(); > >- void emit_op_add(Instruction*); >- void emit_op_bitand(Instruction*); >- void emit_op_bitor(Instruction*); >- void emit_op_bitxor(Instruction*); >- void emit_op_call(Instruction*); >- void emit_op_tail_call(Instruction*); >- void emit_op_call_eval(Instruction*); >- void emit_op_call_varargs(Instruction*); >- void emit_op_tail_call_varargs(Instruction*); >- void emit_op_tail_call_forward_arguments(Instruction*); >- void emit_op_construct_varargs(Instruction*); >- void emit_op_catch(Instruction*); >- void emit_op_construct(Instruction*); >- void emit_op_create_this(Instruction*); >- void emit_op_to_this(Instruction*); >- void emit_op_get_argument(Instruction*); >- void emit_op_argument_count(Instruction*); >- void emit_op_get_rest_length(Instruction*); >- void emit_op_check_tdz(Instruction*); >- void emit_op_identity_with_profile(Instruction*); >- void emit_op_debug(Instruction*); >- void emit_op_del_by_id(Instruction*); >- void emit_op_del_by_val(Instruction*); >- void emit_op_div(Instruction*); >- void emit_op_end(Instruction*); >- void emit_op_enter(Instruction*); >- void emit_op_get_scope(Instruction*); >- void emit_op_eq(Instruction*); >- void emit_op_eq_null(Instruction*); >- void emit_op_below(Instruction*); >- void emit_op_beloweq(Instruction*); >- void emit_op_try_get_by_id(Instruction*); >- void emit_op_get_by_id(Instruction*); >- void emit_op_get_by_id_with_this(Instruction*); >- void emit_op_get_by_id_direct(Instruction*); >- void emit_op_get_arguments_length(Instruction*); >- void emit_op_get_by_val(Instruction*); >- void emit_op_get_argument_by_val(Instruction*); >- void emit_op_in_by_id(Instruction*); >- void emit_op_init_lazy_reg(Instruction*); >- void emit_op_overrides_has_instance(Instruction*); >- void emit_op_instanceof(Instruction*); >- void emit_op_instanceof_custom(Instruction*); >- void emit_op_is_empty(Instruction*); >- void emit_op_is_undefined(Instruction*); >- void emit_op_is_boolean(Instruction*); >- void emit_op_is_number(Instruction*); >- void emit_op_is_object(Instruction*); >- void emit_op_is_cell_with_type(Instruction*); >- void emit_op_jeq_null(Instruction*); >- void emit_op_jfalse(Instruction*); >- void emit_op_jmp(Instruction*); >- void emit_op_jneq_null(Instruction*); >- void emit_op_jneq_ptr(Instruction*); >- void emit_op_jless(Instruction*); >- void emit_op_jlesseq(Instruction*); >- void emit_op_jgreater(Instruction*); >- void emit_op_jgreatereq(Instruction*); >- void emit_op_jnless(Instruction*); >- void emit_op_jnlesseq(Instruction*); >- void emit_op_jngreater(Instruction*); >- void emit_op_jngreatereq(Instruction*); >- void emit_op_jeq(Instruction*); >- void emit_op_jneq(Instruction*); >- void emit_op_jstricteq(Instruction*); >- void emit_op_jnstricteq(Instruction*); >- void emit_op_jbelow(Instruction*); >- void emit_op_jbeloweq(Instruction*); >- void emit_op_jtrue(Instruction*); >- void emit_op_loop_hint(Instruction*); >- void emit_op_check_traps(Instruction*); >- void emit_op_nop(Instruction*); >- void emit_op_super_sampler_begin(Instruction*); >- void emit_op_super_sampler_end(Instruction*); >- void emit_op_lshift(Instruction*); >- void emit_op_mod(Instruction*); >- void emit_op_mov(Instruction*); >- void emit_op_mul(Instruction*); >- void emit_op_negate(Instruction*); >- void emit_op_neq(Instruction*); >- void emit_op_neq_null(Instruction*); >- void emit_op_new_array(Instruction*); >- void emit_op_new_array_with_size(Instruction*); >- void emit_op_new_func(Instruction*); >- void emit_op_new_func_exp(Instruction*); >- void emit_op_new_generator_func(Instruction*); >- void emit_op_new_generator_func_exp(Instruction*); >- void emit_op_new_async_func(Instruction*); >- void emit_op_new_async_func_exp(Instruction*); >- void emit_op_new_async_generator_func(Instruction*); >- void emit_op_new_async_generator_func_exp(Instruction*); >- void emit_op_new_object(Instruction*); >- void emit_op_new_regexp(Instruction*); >- void emit_op_not(Instruction*); >- void emit_op_nstricteq(Instruction*); >- void emit_op_dec(Instruction*); >- void emit_op_inc(Instruction*); >- void emit_op_profile_type(Instruction*); >- void emit_op_profile_control_flow(Instruction*); >- void emit_op_get_parent_scope(Instruction*); >- void emit_op_put_by_id(Instruction*); >- void emit_op_put_by_val(Instruction*); >- void emit_op_put_getter_by_id(Instruction*); >- void emit_op_put_setter_by_id(Instruction*); >- void emit_op_put_getter_setter_by_id(Instruction*); >- void emit_op_put_getter_by_val(Instruction*); >- void emit_op_put_setter_by_val(Instruction*); >- void emit_op_ret(Instruction*); >- void emit_op_rshift(Instruction*); >- void emit_op_set_function_name(Instruction*); >- void emit_op_stricteq(Instruction*); >- void emit_op_sub(Instruction*); >- void emit_op_switch_char(Instruction*); >- void emit_op_switch_imm(Instruction*); >- void emit_op_switch_string(Instruction*); >- void emit_op_tear_off_arguments(Instruction*); >- void emit_op_throw(Instruction*); >- void emit_op_to_number(Instruction*); >- void emit_op_to_string(Instruction*); >- void emit_op_to_object(Instruction*); >- void emit_op_to_primitive(Instruction*); >- void emit_op_unexpected_load(Instruction*); >- void emit_op_unsigned(Instruction*); >- void emit_op_urshift(Instruction*); >- void emit_op_has_structure_property(Instruction*); >- void emit_op_has_indexed_property(Instruction*); >- void emit_op_get_direct_pname(Instruction*); >- void emit_op_enumerator_structure_pname(Instruction*); >- void emit_op_enumerator_generic_pname(Instruction*); >- void emit_op_log_shadow_chicken_prologue(Instruction*); >- void emit_op_log_shadow_chicken_tail(Instruction*); >- >- void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_tail_call(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_tail_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_tail_call_forward_arguments(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_construct_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_get_callee(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_try_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_get_by_id_with_this(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_get_by_id_direct(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_in_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_instanceof_custom(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jgreater(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jgreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jngreater(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jngreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jeq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jneq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jnstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_check_traps(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_new_object(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_has_indexed_property(Instruction*, Vector<SlowCaseEntry>::iterator&); >- >- void emit_op_resolve_scope(Instruction*); >- void emit_op_get_from_scope(Instruction*); >- void emit_op_put_to_scope(Instruction*); >- void emit_op_get_from_arguments(Instruction*); >- void emit_op_put_to_arguments(Instruction*); >- void emitSlow_op_get_from_scope(Instruction*, Vector<SlowCaseEntry>::iterator&); >- void emitSlow_op_put_to_scope(Instruction*, Vector<SlowCaseEntry>::iterator&); >- >- void emitSlowCaseCall(Instruction*, Vector<SlowCaseEntry>::iterator&, SlowPathFunction); >- >- void emitRightShift(Instruction*, bool isUnsigned); >- void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned); >- >- void emitNewFuncCommon(Instruction*); >- void emitNewFuncExprCommon(Instruction*); >+ void emit_op_add(const Instruction*); >+ void emit_op_bitand(const Instruction*); >+ void emit_op_bitor(const Instruction*); >+ void emit_op_bitxor(const Instruction*); >+ void emit_op_call(const Instruction*); >+ void emit_op_tail_call(const Instruction*); >+ void emit_op_call_eval(const Instruction*); >+ void emit_op_call_varargs(const Instruction*); >+ void emit_op_tail_call_varargs(const Instruction*); >+ void emit_op_tail_call_forward_arguments(const Instruction*); >+ void emit_op_construct_varargs(const Instruction*); >+ void emit_op_catch(const Instruction*); >+ void emit_op_construct(const Instruction*); >+ void emit_op_create_this(const Instruction*); >+ void emit_op_to_this(const Instruction*); >+ void emit_op_get_argument(const Instruction*); >+ void emit_op_argument_count(const Instruction*); >+ void emit_op_get_rest_length(const Instruction*); >+ void emit_op_check_tdz(const Instruction*); >+ void emit_op_identity_with_profile(const Instruction*); >+ void emit_op_debug(const Instruction*); >+ void emit_op_del_by_id(const Instruction*); >+ void emit_op_del_by_val(const Instruction*); >+ void emit_op_div(const Instruction*); >+ void emit_op_end(const Instruction*); >+ void emit_op_enter(const Instruction*); >+ void emit_op_get_scope(const Instruction*); >+ void emit_op_eq(const Instruction*); >+ void emit_op_eq_null(const Instruction*); >+ void emit_op_below(const Instruction*); >+ void emit_op_beloweq(const Instruction*); >+ void emit_op_try_get_by_id(const Instruction*); >+ void emit_op_get_by_id(const Instruction*); >+ void emit_op_get_by_id_with_this(const Instruction*); >+ void emit_op_get_by_id_direct(const Instruction*); >+ void emit_op_get_by_val(const Instruction*); >+ void emit_op_get_argument_by_val(const Instruction*); >+ void emit_op_in_by_id(const Instruction*); >+ void emit_op_init_lazy_reg(const Instruction*); >+ void emit_op_overrides_has_instance(const Instruction*); >+ void emit_op_instanceof(const Instruction*); >+ void emit_op_instanceof_custom(const Instruction*); >+ void emit_op_is_empty(const Instruction*); >+ void emit_op_is_undefined(const Instruction*); >+ void emit_op_is_boolean(const Instruction*); >+ void emit_op_is_number(const Instruction*); >+ void emit_op_is_object(const Instruction*); >+ void emit_op_is_cell_with_type(const Instruction*); >+ void emit_op_jeq_null(const Instruction*); >+ void emit_op_jfalse(const Instruction*); >+ void emit_op_jmp(const Instruction*); >+ void emit_op_jneq_null(const Instruction*); >+ void emit_op_jneq_ptr(const Instruction*); >+ void emit_op_jless(const Instruction*); >+ void emit_op_jlesseq(const Instruction*); >+ void emit_op_jgreater(const Instruction*); >+ void emit_op_jgreatereq(const Instruction*); >+ void emit_op_jnless(const Instruction*); >+ void emit_op_jnlesseq(const Instruction*); >+ void emit_op_jngreater(const Instruction*); >+ void emit_op_jngreatereq(const Instruction*); >+ void emit_op_jeq(const Instruction*); >+ void emit_op_jneq(const Instruction*); >+ void emit_op_jstricteq(const Instruction*); >+ void emit_op_jnstricteq(const Instruction*); >+ void emit_op_jbelow(const Instruction*); >+ void emit_op_jbeloweq(const Instruction*); >+ void emit_op_jtrue(const Instruction*); >+ void emit_op_loop_hint(const Instruction*); >+ void emit_op_check_traps(const Instruction*); >+ void emit_op_nop(const Instruction*); >+ void emit_op_super_sampler_begin(const Instruction*); >+ void emit_op_super_sampler_end(const Instruction*); >+ void emit_op_lshift(const Instruction*); >+ void emit_op_mod(const Instruction*); >+ void emit_op_mov(const Instruction*); >+ void emit_op_mul(const Instruction*); >+ void emit_op_negate(const Instruction*); >+ void emit_op_neq(const Instruction*); >+ void emit_op_neq_null(const Instruction*); >+ void emit_op_new_array(const Instruction*); >+ void emit_op_new_array_with_size(const Instruction*); >+ void emit_op_new_func(const Instruction*); >+ void emit_op_new_func_exp(const Instruction*); >+ void emit_op_new_generator_func(const Instruction*); >+ void emit_op_new_generator_func_exp(const Instruction*); >+ void emit_op_new_async_func(const Instruction*); >+ void emit_op_new_async_func_exp(const Instruction*); >+ void emit_op_new_async_generator_func(const Instruction*); >+ void emit_op_new_async_generator_func_exp(const Instruction*); >+ void emit_op_new_object(const Instruction*); >+ void emit_op_new_regexp(const Instruction*); >+ void emit_op_not(const Instruction*); >+ void emit_op_nstricteq(const Instruction*); >+ void emit_op_dec(const Instruction*); >+ void emit_op_inc(const Instruction*); >+ void emit_op_profile_type(const Instruction*); >+ void emit_op_profile_control_flow(const Instruction*); >+ void emit_op_get_parent_scope(const Instruction*); >+ void emit_op_put_by_id(const Instruction*); >+ void emit_op_put_by_val(const Instruction*); >+ void emit_op_put_getter_by_id(const Instruction*); >+ void emit_op_put_setter_by_id(const Instruction*); >+ void emit_op_put_getter_setter_by_id(const Instruction*); >+ void emit_op_put_getter_by_val(const Instruction*); >+ void emit_op_put_setter_by_val(const Instruction*); >+ void emit_op_ret(const Instruction*); >+ void emit_op_rshift(const Instruction*); >+ void emit_op_set_function_name(const Instruction*); >+ void emit_op_stricteq(const Instruction*); >+ void emit_op_sub(const Instruction*); >+ void emit_op_switch_char(const Instruction*); >+ void emit_op_switch_imm(const Instruction*); >+ void emit_op_switch_string(const Instruction*); >+ void emit_op_tear_off_arguments(const Instruction*); >+ void emit_op_throw(const Instruction*); >+ void emit_op_to_number(const Instruction*); >+ void emit_op_to_string(const Instruction*); >+ void emit_op_to_object(const Instruction*); >+ void emit_op_to_primitive(const Instruction*); >+ void emit_op_unexpected_load(const Instruction*); >+ void emit_op_unsigned(const Instruction*); >+ void emit_op_urshift(const Instruction*); >+ void emit_op_has_structure_property(const Instruction*); >+ void emit_op_has_indexed_property(const Instruction*); >+ void emit_op_get_direct_pname(const Instruction*); >+ void emit_op_enumerator_structure_pname(const Instruction*); >+ void emit_op_enumerator_generic_pname(const Instruction*); >+ void emit_op_log_shadow_chicken_prologue(const Instruction*); >+ void emit_op_log_shadow_chicken_tail(const Instruction*); >+ >+ void emitSlow_op_add(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_call(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_tail_call(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_call_eval(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_call_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_tail_call_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_tail_call_forward_arguments(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_construct_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_construct(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_eq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_get_callee(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_try_get_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_get_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_get_by_id_with_this(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_get_by_id_direct(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_get_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_get_argument_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_in_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_instanceof(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_instanceof_custom(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jless(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jlesseq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jgreater(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jgreatereq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jnless(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jnlesseq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jngreater(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jngreatereq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jeq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jneq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jnstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_jtrue(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_loop_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_mul(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_negate(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_neq(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_new_object(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_put_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_put_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_sub(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_has_indexed_property(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ >+ void emit_op_resolve_scope(const Instruction*); >+ void emit_op_get_from_scope(const Instruction*); >+ void emit_op_put_to_scope(const Instruction*); >+ void emit_op_get_from_arguments(const Instruction*); >+ void emit_op_put_to_arguments(const Instruction*); >+ void emitSlow_op_get_from_scope(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_put_to_scope(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ >+ void emitSlowCaseCall(const Instruction*, Vector<SlowCaseEntry>::iterator&, SlowPathFunction); >+ >+ void emitRightShift(const Instruction*, bool isUnsigned); >+ void emitRightShiftSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned); >+ >+ template<typename Op> >+ void emitNewFuncCommon(const Instruction*); >+ template<typename Op> >+ void emitNewFuncExprCommon(const Instruction*); > void emitVarInjectionCheck(bool needsVarInjectionChecks); > void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth); > void emitLoadWithStructureCheck(int scope, Structure** structureSlot); >@@ -679,15 +703,15 @@ namespace JSC { > bool isOperandConstantInt(int src); > bool isOperandConstantChar(int src); > >- template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction> >- void emitMathICFast(JITUnaryMathIC<Generator>*, Instruction*, ProfiledFunction, NonProfiledFunction); >- template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction> >- void emitMathICFast(JITBinaryMathIC<Generator>*, Instruction*, ProfiledFunction, NonProfiledFunction); >+ template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> >+ void emitMathICFast(JITUnaryMathIC<Generator>*, const Instruction*, ProfiledFunction, NonProfiledFunction); >+ template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> >+ void emitMathICFast(JITBinaryMathIC<Generator>*, const Instruction*, ProfiledFunction, NonProfiledFunction); > >- template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> >- void emitMathICSlow(JITBinaryMathIC<Generator>*, Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); >- template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> >- void emitMathICSlow(JITUnaryMathIC<Generator>*, Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); >+ template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> >+ void emitMathICSlow(JITBinaryMathIC<Generator>*, const Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); >+ template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> >+ void emitMathICSlow(JITUnaryMathIC<Generator>*, const Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); > > Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter) > { >@@ -719,7 +743,8 @@ namespace JSC { > #endif > MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr<CFunctionPtrTag>); > MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr<CFunctionPtrTag>, int); >- MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr<CFunctionPtrTag>, int); >+ template<typename Metadata> >+ MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata&, const FunctionPtr<CFunctionPtrTag>, int); > > template<typename OperationType, typename... Args> > std::enable_if_t<FunctionTraits<OperationType>::hasResult, MacroAssembler::Call> >@@ -761,12 +786,12 @@ namespace JSC { > } > #endif // OS(WINDOWS) && CPU(X86_64) > >- template<typename OperationType, typename... Args> >+ template<typename Metadata, typename OperationType, typename... Args> > std::enable_if_t<FunctionTraits<OperationType>::hasResult, MacroAssembler::Call> >- callOperationWithProfile(OperationType operation, int result, Args... args) >+ callOperationWithProfile(Metadata& metadata, OperationType operation, int result, Args... args) > { > setupArguments<OperationType>(args...); >- return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, result); >+ return appendCallWithExceptionCheckSetJSValueResultWithProfile(metadata, operation, result); > } > > template<typename OperationType, typename... Args> >@@ -793,10 +818,13 @@ namespace JSC { > return appendCallWithCallFrameRollbackOnException(operation); > } > >- template<typename SnippetGenerator> >- void emitBitBinaryOpFastPath(Instruction* currentInstruction); >+ template<typename Op, typename SnippetGenerator> >+ void emitBitBinaryOpFastPath(const Instruction* currentInstruction); >+ >+ void emitRightShiftFastPath(const Instruction* currentInstruction, OpcodeID); > >- void emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID); >+ template<typename Op> >+ void emitRightShiftFastPath(const Instruction* currentInstruction, JITRightShiftGenerator::ShiftType); > > void updateTopCallFrame(); > >@@ -826,7 +854,7 @@ namespace JSC { > #endif > > #if ENABLE(OPCODE_SAMPLING) >- void sampleInstruction(Instruction*, bool = false); >+ void sampleInstruction(const Instruction*, bool = false); > #endif > > #if ENABLE(CODEBLOCK_SAMPLING) >@@ -853,11 +881,11 @@ namespace JSC { > // If you need to check the value of an instruction multiple times and the instruction is > // part of a LLInt inline cache, then you want to use this. It will give you the value of > // the instruction at the start of JITing. >- Instruction* copiedInstruction(Instruction*); >+ const Instruction* copiedInstruction(const Instruction*); > > Interpreter* m_interpreter; > >- PoisonedRefCountedArray<CodeBlockPoison, Instruction> m_instructions; >+ const InstructionStream& m_instructions; > > Vector<CallRecord> m_calls; > Vector<Label> m_labels; >@@ -894,8 +922,8 @@ namespace JSC { > > PCToCodeOriginMapBuilder m_pcToCodeOriginMapBuilder; > >- HashMap<Instruction*, void*> m_instructionToMathIC; >- HashMap<Instruction*, MathICGenerationState> m_instructionToMathICGenerationState; >+ HashMap<const Instruction*, void*> m_instructionToMathIC; >+ HashMap<const Instruction*, MathICGenerationState> m_instructionToMathICGenerationState; > > bool m_canBeOptimized; > bool m_canBeOptimizedOrInlined; >diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp >index 3981d0388189713f6b5b55de6a550be0024e024a..d8dd7669d33da0b6f466fce4d50ff4dc87fa3d5a 100644 >--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp >+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp >@@ -41,7 +41,6 @@ > #include "JITMulGenerator.h" > #include "JITNegGenerator.h" > #include "JITOperations.h" >-#include "JITRightShiftGenerator.h" > #include "JITSubGenerator.h" > #include "JSArray.h" > #include "JSFunction.h" >@@ -53,190 +52,113 @@ > > namespace JSC { > >-void JIT::emit_op_jless(Instruction* currentInstruction) >+void JIT::emit_op_jless(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJump(op_jless, op1, op2, target, LessThan); >+ emit_compareAndJump<OpJless>(currentInstruction, LessThan); > } > >-void JIT::emit_op_jlesseq(Instruction* currentInstruction) >+void JIT::emit_op_jlesseq(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual); >+ emit_compareAndJump<OpJlesseq>(currentInstruction, LessThan); > } > >-void JIT::emit_op_jgreater(Instruction* currentInstruction) >+void JIT::emit_op_jgreater(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan); >+ emit_compareAndJump<OpJgreater>(currentInstruction, LessThan); > } > >-void JIT::emit_op_jgreatereq(Instruction* currentInstruction) >+void JIT::emit_op_jgreatereq(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual); >+ emit_compareAndJump<OpJgreatereq>(currentInstruction, LessThan); > } > >-void JIT::emit_op_jnless(Instruction* currentInstruction) >+void JIT::emit_op_jnless(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual); >+ emit_compareAndJump<OpJnless>(currentInstruction, LessThan); > } > >-void JIT::emit_op_jnlesseq(Instruction* currentInstruction) >+void JIT::emit_op_jnlesseq(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan); >+ emit_compareAndJump<OpJnlesseq>(currentInstruction, LessThan); > } > >-void JIT::emit_op_jngreater(Instruction* currentInstruction) >+void JIT::emit_op_jngreater(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual); >+ emit_compareAndJump<OpJngreater>(currentInstruction, LessThan); > } > >-void JIT::emit_op_jngreatereq(Instruction* currentInstruction) >+void JIT::emit_op_jngreatereq(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan); >+ emit_compareAndJump<OpJngreatereq>(currentInstruction, LessThan); > } > >-void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jless(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, operationCompareLess, false, iter); >+ emit_compareAndJumpSlow<OpJless>(currentInstruction, DoubleLessThan, operationCompareLess, false, iter); > } > >-void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jlesseq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, operationCompareLessEq, false, iter); >+ emit_compareAndJumpSlow<OpJlesseq>(currentInstruction, DoubleLessThanOrEqual, operationCompareLessEq, false, iter); > } > >-void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jgreater(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, operationCompareGreater, false, iter); >+ emit_compareAndJumpSlow<OpJgreater>(currentInstruction, DoubleGreaterThan, operationCompareGreater, false, iter); > } > >-void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jgreatereq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter); >+ emit_compareAndJumpSlow<OpJgreatereq>(currentInstruction, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter); > } > >-void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jnless(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter); >+ emit_compareAndJumpSlow<OpJnless>(currentInstruction, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter); > } > >-void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jnlesseq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter); >+ emit_compareAndJumpSlow<OpJnlesseq>(currentInstruction, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter); > } > >-void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jngreater(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter); >+ emit_compareAndJumpSlow<OpJngreater>(currentInstruction, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter); > } > >-void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jngreatereq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter); >+ emit_compareAndJumpSlow<OpJngreatereq>(currentInstruction, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter); > } > >-void JIT::emit_op_below(Instruction* currentInstruction) >+void JIT::emit_op_below(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >- int op2 = currentInstruction[3].u.operand; >- emit_compareUnsigned(dst, op1, op2, Below); >+ emit_compareUnsigned<OpBelow>(currentInstruction, Below); > } > >-void JIT::emit_op_beloweq(Instruction* currentInstruction) >+void JIT::emit_op_beloweq(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >- int op2 = currentInstruction[3].u.operand; >- emit_compareUnsigned(dst, op1, op2, BelowOrEqual); >+ emit_compareUnsigned<OpBeloweq>(currentInstruction, BelowOrEqual); > } > >-void JIT::emit_op_jbelow(Instruction* currentInstruction) >+void JIT::emit_op_jbelow(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareUnsignedAndJump(op1, op2, target, Below); >+ emit_compareUnsignedAndJump<OpJbelow>(currentInstruction, Below); > } > >-void JIT::emit_op_jbeloweq(Instruction* currentInstruction) >+void JIT::emit_op_jbeloweq(const Instruction* currentInstruction) > { >- int op1 = currentInstruction[1].u.operand; >- int op2 = currentInstruction[2].u.operand; >- unsigned target = currentInstruction[3].u.operand; >- >- emit_compareUnsignedAndJump(op1, op2, target, BelowOrEqual); >+ emit_compareUnsignedAndJump<OpJbeloweq>(currentInstruction, BelowOrEqual); > } > > #if USE(JSVALUE64) > >-void JIT::emit_op_unsigned(Instruction* currentInstruction) >+void JIT::emit_op_unsigned(const Instruction* currentInstruction) > { >- int result = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpUnsigned>(); >+ int result = bytecode.dst.offset(); >+ int op1 = bytecode.operand.offset(); > > emitGetVirtualRegister(op1, regT0); > emitJumpSlowCaseIfNotInt(regT0); >@@ -245,13 +167,18 @@ void JIT::emit_op_unsigned(Instruction* currentInstruction) > emitPutVirtualRegister(result, regT0); > } > >-void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition condition) >+template<typename Op> >+void JIT::emit_compareAndJump(const Instruction* instruction, RelationalCondition condition) > { > // We generate inline code for the following cases in the fast path: > // - int immediate to constant int immediate > // - constant int immediate to int immediate > // - int immediate to int immediate > >+ auto bytecode = instruction->as<Op>(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); >+ unsigned target = bytecode.target; > if (isOperandConstantChar(op1)) { > emitGetVirtualRegister(op2, regT0); > addSlowCase(branchIfNotCell(regT0)); >@@ -292,8 +219,13 @@ void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, Relat > addJump(branch32(condition, regT0, regT1), target); > } > >-void JIT::emit_compareUnsignedAndJump(int op1, int op2, unsigned target, RelationalCondition condition) >+template<typename Op> >+void JIT::emit_compareUnsignedAndJump(const Instruction* instruction, RelationalCondition condition) > { >+ auto bytecode = instruction->as<Op>(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); >+ unsigned target = bytecode.rhs.offset(); > if (isOperandConstantInt(op2)) { > emitGetVirtualRegister(op1, regT0); > int32_t op2imm = getOperandConstantInt(op2); >@@ -308,8 +240,13 @@ void JIT::emit_compareUnsignedAndJump(int op1, int op2, unsigned target, Relatio > } > } > >-void JIT::emit_compareUnsigned(int dst, int op1, int op2, RelationalCondition condition) >+template<typename Op> >+void JIT::emit_compareUnsigned(const Instruction* instruction, RelationalCondition condition) > { >+ auto bytecode = instruction->as<Op>(); >+ int dst = bytecode.dst.offset(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); > if (isOperandConstantInt(op2)) { > emitGetVirtualRegister(op1, regT0); > int32_t op2imm = getOperandConstantInt(op2); >@@ -326,16 +263,14 @@ void JIT::emit_compareUnsigned(int dst, int op1, int op2, RelationalCondition co > emitPutVirtualRegister(dst); > } > >-void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition condition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter) >+template<typename Op> >+void JIT::emit_compareAndJumpSlow(const Instruction* instruction, DoubleCondition condition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter) > { >- COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless); >- COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless); >- COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnlesseq), OPCODE_LENGTH_op_jnlesseq_equals_op_jless); >- COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreater), OPCODE_LENGTH_op_jgreater_equals_op_jless); >- COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreatereq), OPCODE_LENGTH_op_jgreatereq_equals_op_jless); >- COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreater), OPCODE_LENGTH_op_jngreater_equals_op_jless); >- COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreatereq), OPCODE_LENGTH_op_jngreatereq_equals_op_jless); >- >+ auto bytecode = instruction->as<Op>(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); >+ unsigned target = bytecode.target; >+ > // We generate inline code for the following cases in the slow path: > // - floating-point number to constant int immediate > // - constant int immediate to floating-point number >@@ -427,9 +362,10 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi > emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target); > } > >-void JIT::emit_op_inc(Instruction* currentInstruction) >+void JIT::emit_op_inc(const Instruction* currentInstruction) > { >- int srcDst = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpInc>(); >+ int srcDst = bytecode.srcDst.offset(); > > emitGetVirtualRegister(srcDst, regT0); > emitJumpSlowCaseIfNotInt(regT0); >@@ -438,9 +374,10 @@ void JIT::emit_op_inc(Instruction* currentInstruction) > emitPutVirtualRegister(srcDst); > } > >-void JIT::emit_op_dec(Instruction* currentInstruction) >+void JIT::emit_op_dec(const Instruction* currentInstruction) > { >- int srcDst = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpDec>(); >+ int srcDst = bytecode.srcDst.offset(); > > emitGetVirtualRegister(srcDst, regT0); > emitJumpSlowCaseIfNotInt(regT0); >@@ -453,11 +390,12 @@ void JIT::emit_op_dec(Instruction* currentInstruction) > > #if CPU(X86_64) > >-void JIT::emit_op_mod(Instruction* currentInstruction) >+void JIT::emit_op_mod(const Instruction* currentInstruction) > { >- int result = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >- int op2 = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpMod>(); >+ int result = bytecode.dst.offset(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); > > // Make sure registers are correct for x86 IDIV instructions. > ASSERT(regT0 == X86Registers::eax); >@@ -484,7 +422,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction) > emitPutVirtualRegister(result); > } > >-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_mod(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >@@ -494,13 +432,13 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry> > > #else // CPU(X86_64) > >-void JIT::emit_op_mod(Instruction* currentInstruction) >+void JIT::emit_op_mod(const Instruction* currentInstruction) > { > JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod); > slowPathCall.call(); > } > >-void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&) >+void JIT::emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&) > { > UNREACHABLE_FOR_PLATFORM(); > } >@@ -511,28 +449,29 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&) > > #endif // USE(JSVALUE64) > >-void JIT::emit_op_negate(Instruction* currentInstruction) >+void JIT::emit_op_negate(const Instruction* currentInstruction) > { > ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction); > JITNegIC* negateIC = m_codeBlock->addJITNegIC(arithProfile, currentInstruction); > m_instructionToMathIC.add(currentInstruction, negateIC); >- emitMathICFast(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate); >+ emitMathICFast<OpNegate>(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate); > } > >-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_negate(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > > JITNegIC* negIC = bitwise_cast<JITNegIC*>(m_instructionToMathIC.get(currentInstruction)); >- emitMathICSlow(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize); >+ emitMathICSlow<OpNegate>(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize); > } > >-template<typename SnippetGenerator> >-void JIT::emitBitBinaryOpFastPath(Instruction* currentInstruction) >+template<typename Op, typename SnippetGenerator> >+void JIT::emitBitBinaryOpFastPath(const Instruction* currentInstruction) > { >- int result = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >- int op2 = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ int result = bytecode.dst.offset(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); > > #if USE(JSVALUE64) > JSValueRegs leftRegs = JSValueRegs(regT0); >@@ -572,36 +511,48 @@ void JIT::emitBitBinaryOpFastPath(Instruction* currentInstruction) > addSlowCase(gen.slowPathJumpList()); > } > >-void JIT::emit_op_bitand(Instruction* currentInstruction) >+void JIT::emit_op_bitand(const Instruction* currentInstruction) > { >- emitBitBinaryOpFastPath<JITBitAndGenerator>(currentInstruction); >+ emitBitBinaryOpFastPath<OpBitand, JITBitAndGenerator>(currentInstruction); > } > >-void JIT::emit_op_bitor(Instruction* currentInstruction) >+void JIT::emit_op_bitor(const Instruction* currentInstruction) > { >- emitBitBinaryOpFastPath<JITBitOrGenerator>(currentInstruction); >+ emitBitBinaryOpFastPath<OpBitor, JITBitOrGenerator>(currentInstruction); > } > >-void JIT::emit_op_bitxor(Instruction* currentInstruction) >+void JIT::emit_op_bitxor(const Instruction* currentInstruction) > { >- emitBitBinaryOpFastPath<JITBitXorGenerator>(currentInstruction); >+ emitBitBinaryOpFastPath<OpBitxor, JITBitXorGenerator>(currentInstruction); > } > >-void JIT::emit_op_lshift(Instruction* currentInstruction) >+void JIT::emit_op_lshift(const Instruction* currentInstruction) > { >- emitBitBinaryOpFastPath<JITLeftShiftGenerator>(currentInstruction); >+ emitBitBinaryOpFastPath<OpLshift, JITLeftShiftGenerator>(currentInstruction); > } > >-void JIT::emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID opcodeID) >+void JIT::emitRightShiftFastPath(const Instruction* currentInstruction, OpcodeID opcodeID) > { > ASSERT(opcodeID == op_rshift || opcodeID == op_urshift); >+ switch (opcodeID) { >+ case op_rshift: >+ emitRightShiftFastPath<OpRshift>(currentInstruction, JITRightShiftGenerator::SignedShift); >+ break; >+ case op_urshift: >+ emitRightShiftFastPath<OpUrshift>(currentInstruction, JITRightShiftGenerator::UnsignedShift); >+ break; >+ default: >+ ASSERT_NOT_REACHED(); >+ } >+} > >- JITRightShiftGenerator::ShiftType snippetShiftType = opcodeID == op_rshift ? >- JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift; >- >- int result = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >- int op2 = currentInstruction[3].u.operand; >+template<typename Op> >+void JIT::emitRightShiftFastPath(const Instruction* currentInstruction, JITRightShiftGenerator::ShiftType snippetShiftType) >+{ >+ auto bytecode = currentInstruction->as<Op>(); >+ int result = bytecode.dst.offset(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); > > #if USE(JSVALUE64) > JSValueRegs leftRegs = JSValueRegs(regT0); >@@ -644,42 +595,44 @@ void JIT::emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID opcod > addSlowCase(gen.slowPathJumpList()); > } > >-void JIT::emit_op_rshift(Instruction* currentInstruction) >+void JIT::emit_op_rshift(const Instruction* currentInstruction) > { > emitRightShiftFastPath(currentInstruction, op_rshift); > } > >-void JIT::emit_op_urshift(Instruction* currentInstruction) >+void JIT::emit_op_urshift(const Instruction* currentInstruction) > { > emitRightShiftFastPath(currentInstruction, op_urshift); > } > >-ALWAYS_INLINE static OperandTypes getOperandTypes(Instruction* instruction) >+template<typename Metadata> >+ALWAYS_INLINE static OperandTypes getOperandTypes(const Metadata& metadata) > { >- return OperandTypes(ArithProfile::fromInt(instruction[4].u.operand).lhsResultType(), ArithProfile::fromInt(instruction[4].u.operand).rhsResultType()); >+ return OperandTypes(metadata.arithProfile.lhsResultType(), metadata.arithProfile.rhsResultType()); > } > >-void JIT::emit_op_add(Instruction* currentInstruction) >+void JIT::emit_op_add(const Instruction* currentInstruction) > { > ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction); > JITAddIC* addIC = m_codeBlock->addJITAddIC(arithProfile, currentInstruction); > m_instructionToMathIC.add(currentInstruction, addIC); >- emitMathICFast(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd); >+ emitMathICFast<OpAdd>(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd); > } > >-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_add(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > > JITAddIC* addIC = bitwise_cast<JITAddIC*>(m_instructionToMathIC.get(currentInstruction)); >- emitMathICSlow(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize); >+ emitMathICSlow<OpAdd>(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize); > } > >-template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction> >-void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction) >+template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> >+void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction) > { >- int result = currentInstruction[1].u.operand; >- int operand = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ int result = bytecode.dst.offset(); >+ int operand = bytecode.operand.offset(); > > #if USE(JSVALUE64) > // ArithNegate benefits from using the same register as src and dst. >@@ -724,22 +677,24 @@ void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, Instruction* current > emitPutVirtualRegister(result, resultRegs); > } > >-template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction> >-void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction) >+template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> >+void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction) > { >- int result = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >- int op2 = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int result = bytecode.dst.offset(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); > > #if USE(JSVALUE64) >- OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction)); >+ OperandTypes types = getOperandTypes(metadata); > JSValueRegs leftRegs = JSValueRegs(regT1); > JSValueRegs rightRegs = JSValueRegs(regT2); > JSValueRegs resultRegs = JSValueRegs(regT0); > GPRReg scratchGPR = regT3; > FPRReg scratchFPR = fpRegT2; > #else >- OperandTypes types = getOperandTypes(currentInstruction); >+ OperandTypes types = getOperandTypes(metadata); > JSValueRegs leftRegs = JSValueRegs(regT1, regT0); > JSValueRegs rightRegs = JSValueRegs(regT3, regT2); > JSValueRegs resultRegs = leftRegs; >@@ -797,13 +752,14 @@ void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, Instruction* curren > emitPutVirtualRegister(result, resultRegs); > } > >-template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> >-void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction) >+template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> >+void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction) > { > MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value; > mathICGenerationState.slowPathStart = label(); > >- int result = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ int result = bytecode.dst.offset(); > > #if USE(JSVALUE64) > JSValueRegs srcRegs = JSValueRegs(regT1); >@@ -842,23 +798,25 @@ void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, Instruction* current > }); > } > >-template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> >-void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction) >+template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> >+void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction) > { > MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value; > mathICGenerationState.slowPathStart = label(); > >- int result = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >- int op2 = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int result = bytecode.dst.offset(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); > > #if USE(JSVALUE64) >- OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction)); >+ OperandTypes types = getOperandTypes(metadata); > JSValueRegs leftRegs = JSValueRegs(regT1); > JSValueRegs rightRegs = JSValueRegs(regT2); > JSValueRegs resultRegs = JSValueRegs(regT0); > #else >- OperandTypes types = getOperandTypes(currentInstruction); >+ OperandTypes types = getOperandTypes(metadata); > JSValueRegs leftRegs = JSValueRegs(regT1, regT0); > JSValueRegs rightRegs = JSValueRegs(regT3, regT2); > JSValueRegs resultRegs = leftRegs; >@@ -908,20 +866,22 @@ void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, Instruction* curren > }); > } > >-void JIT::emit_op_div(Instruction* currentInstruction) >+void JIT::emit_op_div(const Instruction* currentInstruction) > { >- int result = currentInstruction[1].u.operand; >- int op1 = currentInstruction[2].u.operand; >- int op2 = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpDiv>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int result = bytecode.dst.offset(); >+ int op1 = bytecode.lhs.offset(); >+ int op2 = bytecode.rhs.offset(); > > #if USE(JSVALUE64) >- OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction)); >+ OperandTypes types = getOperandTypes(metadata); > JSValueRegs leftRegs = JSValueRegs(regT0); > JSValueRegs rightRegs = JSValueRegs(regT1); > JSValueRegs resultRegs = leftRegs; > GPRReg scratchGPR = regT2; > #else >- OperandTypes types = getOperandTypes(currentInstruction); >+ OperandTypes types = getOperandTypes(metadata); > JSValueRegs leftRegs = JSValueRegs(regT1, regT0); > JSValueRegs rightRegs = JSValueRegs(regT3, regT2); > JSValueRegs resultRegs = leftRegs; >@@ -974,36 +934,36 @@ void JIT::emit_op_div(Instruction* currentInstruction) > } > } > >-void JIT::emit_op_mul(Instruction* currentInstruction) >+void JIT::emit_op_mul(const Instruction* currentInstruction) > { > ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction); > JITMulIC* mulIC = m_codeBlock->addJITMulIC(arithProfile, currentInstruction); > m_instructionToMathIC.add(currentInstruction, mulIC); >- emitMathICFast(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul); >+ emitMathICFast<OpMul>(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul); > } > >-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_mul(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > > JITMulIC* mulIC = bitwise_cast<JITMulIC*>(m_instructionToMathIC.get(currentInstruction)); >- emitMathICSlow(mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize); >+ emitMathICSlow<OpMul>(mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize); > } > >-void JIT::emit_op_sub(Instruction* currentInstruction) >+void JIT::emit_op_sub(const Instruction* currentInstruction) > { > ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction); > JITSubIC* subIC = m_codeBlock->addJITSubIC(arithProfile, currentInstruction); > m_instructionToMathIC.add(currentInstruction, subIC); >- emitMathICFast(subIC, currentInstruction, operationValueSubProfiled, operationValueSub); >+ emitMathICFast<OpSub>(subIC, currentInstruction, operationValueSubProfiled, operationValueSub); > } > >-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_sub(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > > JITSubIC* subIC = bitwise_cast<JITSubIC*>(m_instructionToMathIC.get(currentInstruction)); >- emitMathICSlow(subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize); >+ emitMathICSlow<OpSub>(subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize); > } > > /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL, OP_POW ------------------------------ */ >diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp >index 50ab48b15af6d56cd1ed0c3df2596d2528c21626..5d78c8ac6d6a3dcbada5703ce321231607d6b0d9 100644 >--- a/Source/JavaScriptCore/jit/JITCall.cpp >+++ b/Source/JavaScriptCore/jit/JITCall.cpp >@@ -46,23 +46,52 @@ > > namespace JSC { > >-void JIT::emitPutCallResult(Instruction* instruction) >+template<typename Op> >+void JIT::emitPutCallResult(const Op& bytecode) > { >- int dst = instruction[1].u.operand; >- emitValueProfilingSite(); >- emitPutVirtualRegister(dst); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); >+ emitPutVirtualRegister(bytecode.dst.offset()); > } > >-void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, CallLinkInfo* info) >+template<typename Op> >+std::enable_if_t< >+ Op::opcodeID() != op_call_varargs && Op::opcodeID() != op_construct_varargs && >+ Op::opcodeID() != op_tail_call_varargs && Op::opcodeID() != op_tail_call_forward_arguments >+, void> >+JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo*) > { >- int thisValue = instruction[3].u.operand; >- int arguments = instruction[4].u.operand; >- int firstFreeRegister = instruction[5].u.operand; >- int firstVarArgOffset = instruction[6].u.operand; >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int argCount = bytecode.argc; >+ int registerOffset = -static_cast<int>(bytecode.argv); >+ >+ if (Op::opcodeID() == op_call && shouldEmitProfiling()) { >+ emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); >+ Jump done = branchIfNotCell(regT0); >+ load32(Address(regT0, JSCell::structureIDOffset()), regT0); >+ store32(regT0, metadata.arrayProfile.addressOfLastSeenStructureID()); >+ done.link(this); >+ } >+ >+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); >+ store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); >+} >+ >+ >+template<typename Op> >+std::enable_if_t< >+ Op::opcodeID() == op_call_varargs || Op::opcodeID() == op_construct_varargs || >+ Op::opcodeID() == op_tail_call_varargs || Op::opcodeID() == op_tail_call_forward_arguments >+, void> >+JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo* info) >+{ >+ int thisValue = bytecode.thisValue.offset(); >+ int arguments = bytecode.arguments.offset(); >+ int firstFreeRegister = bytecode.firstFree.offset(); >+ int firstVarArgOffset = bytecode.firstVarArg; > > emitGetVirtualRegister(arguments, regT1); > Z_JITOperation_EJZZ sizeOperation; >- if (opcode == op_tail_call_forward_arguments) >+ if (Op::opcodeID() == op_tail_call_forward_arguments) > sizeOperation = operationSizeFrameForForwardArguments; > else > sizeOperation = operationSizeFrameForVarargs; >@@ -72,7 +101,7 @@ void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, Ca > addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); > emitGetVirtualRegister(arguments, regT2); > F_JITOperation_EFJZZ setupOperation; >- if (opcode == op_tail_call_forward_arguments) >+ if (Op::opcodeID() == op_tail_call_forward_arguments) > setupOperation = operationSetupForwardArgumentsFrame; > else > setupOperation = operationSetupVarargsFrame; >@@ -93,7 +122,14 @@ void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, Ca > addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); > } > >-void JIT::compileCallEval(Instruction* instruction) >+template<typename Op> >+bool JIT::compileCallEval(const Op&) >+{ >+ return false; >+} >+ >+template<> >+bool JIT::compileCallEval(const OpCallEval& bytecode) > { > addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1); > storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset())); >@@ -107,17 +143,20 @@ void JIT::compileCallEval(Instruction* instruction) > > sampleCodeBlock(m_codeBlock); > >- emitPutCallResult(instruction); >+ emitPutCallResult(bytecode); >+ >+ return true; > } > >-void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::compileCallEvalSlowCase(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >+ auto bytecode = instruction->as<OpCallEval>(); > CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); > info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0); > >- int registerOffset = -instruction[4].u.operand; >+ int registerOffset = -bytecode.argv; > > addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); > >@@ -128,12 +167,45 @@ void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry > > sampleCodeBlock(m_codeBlock); > >- emitPutCallResult(instruction); >+ emitPutCallResult(bytecode); > } > >-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) >+template<typename Op> >+bool JIT::compileTailCall(const Op&, CallLinkInfo*, unsigned) > { >- int callee = instruction[2].u.operand; >+ return false; >+} >+ >+template<> >+bool JIT::compileTailCall(const OpTailCall& bytecode, CallLinkInfo* info, unsigned callLinkInfoIndex) >+{ >+ CallFrameShuffleData shuffleData; >+ shuffleData.numPassedArgs = bytecode.argc; >+ shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; >+ shuffleData.numLocals = >+ bytecode.argv - sizeof(CallerFrameAndPC) / sizeof(Register); >+ shuffleData.args.resize(bytecode.argc); >+ for (unsigned i = 0; i < bytecode.argc; ++i) { >+ shuffleData.args[i] = >+ ValueRecovery::displacedInJSStack( >+ virtualRegisterForArgument(i) - bytecode.argv, >+ DataFormatJS); >+ } >+ shuffleData.callee = >+ ValueRecovery::inGPR(regT0, DataFormatJS); >+ shuffleData.setupCalleeSaveRegisters(m_codeBlock); >+ info->setFrameShuffleData(shuffleData); >+ CallFrameShuffler(*this, shuffleData).prepareForTailCall(); >+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); >+ return true; >+} >+ >+template<typename Op> >+void JIT::compileOpCall(const Instruction* instruction, unsigned callLinkInfoIndex) >+{ >+ OpcodeID opcodeID = Op::opcodeID(); >+ auto bytecode = instruction->as<Op>(); >+ int callee = bytecode.callee.offset(); > > /* Caller always: > - Updates callFrameRegister to callee callFrame. >@@ -147,33 +219,11 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca > - Caller initializes ReturnPC; CodeBlock. > - Caller restores callFrameRegister after return. > */ >- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); >- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); >- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); >- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length); >- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length); >- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), call_and_tail_call_forward_arguments_opcodes_must_be_same_length); >- > CallLinkInfo* info = nullptr; > if (opcodeID != op_call_eval) > info = m_codeBlock->addCallLinkInfo(); >- if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) >- compileSetupVarargsFrame(opcodeID, instruction, info); >- else { >- int argCount = instruction[3].u.operand; >- int registerOffset = -instruction[4].u.operand; >- >- if (opcodeID == op_call && shouldEmitProfiling()) { >- emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); >- Jump done = branchIfNotCell(regT0); >- load32(Address(regT0, JSCell::structureIDOffset()), regT0); >- store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); >- done.link(this); >- } >- >- addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); >- store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); >- } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. >+ compileSetupFrame(bytecode, info); >+ // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. > > uint32_t bytecodeOffset = m_codeBlock->bytecodeOffset(instruction); > uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits(); >@@ -182,8 +232,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca > emitGetVirtualRegister(callee, regT0); // regT0 holds callee. > store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); > >- if (opcodeID == op_call_eval) { >- compileCallEval(instruction); >+ if (compileCallEval(bytecode)) { > return; > } > >@@ -197,25 +246,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca > m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; > m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; > >- if (opcodeID == op_tail_call) { >- CallFrameShuffleData shuffleData; >- shuffleData.numPassedArgs = instruction[3].u.operand; >- shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; >- shuffleData.numLocals = >- instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register); >- shuffleData.args.resize(instruction[3].u.operand); >- for (int i = 0; i < instruction[3].u.operand; ++i) { >- shuffleData.args[i] = >- ValueRecovery::displacedInJSStack( >- virtualRegisterForArgument(i) - instruction[4].u.operand, >- DataFormatJS); >- } >- shuffleData.callee = >- ValueRecovery::inGPR(regT0, DataFormatJS); >- shuffleData.setupCalleeSaveRegisters(m_codeBlock); >- info->setFrameShuffleData(shuffleData); >- CallFrameShuffler(*this, shuffleData).prepareForTailCall(); >- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); >+ if (compileTailCall(bytecode, info, callLinkInfoIndex)) { > return; > } > >@@ -233,15 +264,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca > > sampleCodeBlock(m_codeBlock); > >- emitPutCallResult(instruction); >+ emitPutCallResult(bytecode); > } > >-void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) >+template<typename Op> >+void JIT::compileOpCallSlowCase(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) > { >- if (opcodeID == op_call_eval) { >- compileCallEvalSlowCase(instruction, iter); >- return; >- } >+ OpcodeID opcodeID = Op::opcodeID(); >+ ASSERT(opcodeID != op_call_eval); > > linkAllSlowCases(iter); > >@@ -263,87 +293,88 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec > > sampleCodeBlock(m_codeBlock); > >- emitPutCallResult(instruction); >+ auto bytecode = instruction->as<Op>(); >+ emitPutCallResult(bytecode); > } > >-void JIT::emit_op_call(Instruction* currentInstruction) >+void JIT::emit_op_call(const Instruction* currentInstruction) > { >- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); >+ compileOpCall<OpCall>(currentInstruction, m_callLinkInfoIndex++); > } > >-void JIT::emit_op_tail_call(Instruction* currentInstruction) >+void JIT::emit_op_tail_call(const Instruction* currentInstruction) > { >- compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++); >+ compileOpCall<OpTailCall>(currentInstruction, m_callLinkInfoIndex++); > } > >-void JIT::emit_op_call_eval(Instruction* currentInstruction) >+void JIT::emit_op_call_eval(const Instruction* currentInstruction) > { >- compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex); >+ compileOpCall<OpCallEval>(currentInstruction, m_callLinkInfoIndex); > } > >-void JIT::emit_op_call_varargs(Instruction* currentInstruction) >+void JIT::emit_op_call_varargs(const Instruction* currentInstruction) > { >- compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++); >+ compileOpCall<OpCallVarargs>(currentInstruction, m_callLinkInfoIndex++); > } > >-void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction) >+void JIT::emit_op_tail_call_varargs(const Instruction* currentInstruction) > { >- compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++); >+ compileOpCall<OpTailCallVarargs>(currentInstruction, m_callLinkInfoIndex++); > } > >-void JIT::emit_op_tail_call_forward_arguments(Instruction* currentInstruction) >+void JIT::emit_op_tail_call_forward_arguments(const Instruction* currentInstruction) > { >- compileOpCall(op_tail_call_forward_arguments, currentInstruction, m_callLinkInfoIndex++); >+ compileOpCall<OpTailCallForwardArguments>(currentInstruction, m_callLinkInfoIndex++); > } > >-void JIT::emit_op_construct_varargs(Instruction* currentInstruction) >+void JIT::emit_op_construct_varargs(const Instruction* currentInstruction) > { >- compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++); >+ compileOpCall<OpConstructVarargs>(currentInstruction, m_callLinkInfoIndex++); > } > >-void JIT::emit_op_construct(Instruction* currentInstruction) >+void JIT::emit_op_construct(const Instruction* currentInstruction) > { >- compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); >+ compileOpCall<OpConstruct>(currentInstruction, m_callLinkInfoIndex++); > } > >-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_call(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++); >+ compileOpCallSlowCase<OpCall>(currentInstruction, iter, m_callLinkInfoIndex++); > } > >-void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_tail_call(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++); >+ compileOpCallSlowCase<OpTailCall>(currentInstruction, iter, m_callLinkInfoIndex++); > } > >-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_call_eval(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex); >+ compileCallEvalSlowCase(currentInstruction, iter); > } > >-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_call_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++); >+ compileOpCallSlowCase<OpCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++); > } > >-void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_tail_call_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++); >+ compileOpCallSlowCase<OpTailCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++); > } > >-void JIT::emitSlow_op_tail_call_forward_arguments(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_tail_call_forward_arguments(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- compileOpCallSlowCase(op_tail_call_forward_arguments, currentInstruction, iter, m_callLinkInfoIndex++); >+ compileOpCallSlowCase<OpTailCallForwardArguments>(currentInstruction, iter, m_callLinkInfoIndex++); > } > >-void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_construct_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++); >+ compileOpCallSlowCase<OpConstructVarargs>(currentInstruction, iter, m_callLinkInfoIndex++); > } > >-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_construct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++); >+ compileOpCallSlowCase<OpConstruct>(currentInstruction, iter, m_callLinkInfoIndex++); > } > > } // namespace JSC >diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp >index 055945f795217c1aff7c259edf2f1f89ad239c76..0b9c2ffba93f42a58c60975a65b573f1b3f3f5e2 100644 >--- a/Source/JavaScriptCore/jit/JITExceptions.cpp >+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp >@@ -66,7 +66,7 @@ void genericUnwind(VM* vm, ExecState* callFrame, UnwindStart unwindStart) > HandlerInfo* handler = vm->interpreter->unwind(*vm, callFrame, exception, unwindStart); // This may update callFrame. > > void* catchRoutine; >- Instruction* catchPCForInterpreter = 0; >+ const Instruction* catchPCForInterpreter = nullptr; > if (handler) { > // handler->target is meaningless for getting a code offset when catching > // the exception in a DFG/FTL frame. This bytecode target offset could be >@@ -75,7 +75,7 @@ void genericUnwind(VM* vm, ExecState* callFrame, UnwindStart unwindStart) > // and can cause an overflow. OSR exit properly exits to handler->target > // in the proper frame. > if (!JITCode::isOptimizingJIT(callFrame->codeBlock()->jitType())) >- catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target]; >+ catchPCForInterpreter = callFrame->codeBlock()->instructions().at(handler->target).ptr(); > #if ENABLE(JIT) > catchRoutine = handler->nativeCode.executableAddress(); > #else >diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h >index 2bb67f13bede04d754b92c576af8a9fa0397c384..cd0c5ba265430ab257ec4a7962fb6a4cf7d05da4 100644 >--- a/Source/JavaScriptCore/jit/JITInlines.h >+++ b/Source/JavaScriptCore/jit/JITInlines.h >@@ -31,7 +31,7 @@ > > namespace JSC { > >-inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType) >+inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(const Instruction* instruction, PatchableJump& badType) > { > #if USE(JSVALUE64) > JSValueRegs result = JSValueRegs(regT0); >@@ -43,7 +43,7 @@ inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction > return slowCases; > } > >-ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType) >+ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(const Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType) > { > switch (arrayMode) { > case JITInt32: >@@ -61,12 +61,12 @@ ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* cu > return MacroAssembler::JumpList(); > } > >-inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(Instruction* instruction, PatchableJump& badType, IndexingType expectedShape) >+inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(const Instruction* instruction, PatchableJump& badType, IndexingType expectedShape) > { > return emitContiguousLoad(instruction, badType, expectedShape); > } > >-inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(Instruction* instruction, PatchableJump& badType) >+inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(const Instruction* instruction, PatchableJump& badType) > { > return emitArrayStorageLoad(instruction, badType); > } >@@ -181,10 +181,11 @@ ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueRe > return call; > } > >-ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr<CFunctionPtrTag> function, int dst) >+template<typename Metadata> >+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata& metadata, const FunctionPtr<CFunctionPtrTag> function, int dst) > { > MacroAssembler::Call call = appendCallWithExceptionCheck(function); >- emitValueProfilingSite(); >+ emitValueProfilingSite(metadata); > #if USE(JSVALUE64) > emitPutVirtualRegister(dst, returnValueGPR); > #else >@@ -275,13 +276,13 @@ ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t coun > > #if ENABLE(OPCODE_SAMPLING) > #if CPU(X86_64) >-ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) >+ALWAYS_INLINE void JIT::sampleInstruction(const Instruction* instruction, bool inHostFunction) > { > move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx); > storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx); > } > #else >-ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) >+ALWAYS_INLINE void JIT::sampleInstruction(const Instruction* instruction, bool inHostFunction) > { > storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot()); > } >@@ -328,16 +329,10 @@ inline void JIT::emitValueProfilingSite(ValueProfile& valueProfile) > #endif > } > >-inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset) >+template<typename Metadata> >+inline void JIT::emitValueProfilingSite(Metadata& metadata) > { >- if (!shouldEmitProfiling()) >- return; >- emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset)); >-} >- >-inline void JIT::emitValueProfilingSite() >-{ >- emitValueProfilingSite(m_bytecodeOffset); >+ emitValueProfilingSite(metadata.profile); > } > > inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile) >@@ -701,9 +696,11 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotNumber(RegisterID reg) > addSlowCase(branchIfNotNumber(reg)); > } > >-inline Instruction* JIT::copiedInstruction(Instruction* inst) >+// TODO: does this still make sense? >+inline const Instruction* JIT::copiedInstruction(const Instruction* inst) > { >- return &m_instructions[m_codeBlock->bytecodeOffset(inst)]; >+ return inst; >+ //return &m_instructions[m_codeBlock->bytecodeOffset(inst)]; > } > > #endif // USE(JSVALUE32_64) >diff --git a/Source/JavaScriptCore/jit/JITMathIC.h b/Source/JavaScriptCore/jit/JITMathIC.h >index 4e00958d19f8f308b390f6ab7cbcde42d04c9baa..d82a2f3d12ceae6ba03fa787d5ada4dff513bba1 100644 >--- a/Source/JavaScriptCore/jit/JITMathIC.h >+++ b/Source/JavaScriptCore/jit/JITMathIC.h >@@ -56,7 +56,7 @@ template <typename GeneratorType, bool(*isProfileEmpty)(ArithProfile&)> > class JITMathIC { > WTF_MAKE_FAST_ALLOCATED; > public: >- JITMathIC(ArithProfile* arithProfile, Instruction* instruction) >+ JITMathIC(ArithProfile* arithProfile, const Instruction* instruction) > : m_arithProfile(arithProfile) > , m_instruction(instruction) > { >@@ -235,7 +235,7 @@ public: > } > > ArithProfile* arithProfile() const { return m_arithProfile; } >- Instruction* instruction() const { return m_instruction; } >+ const Instruction* instruction() const { return m_instruction; } > > #if ENABLE(MATH_IC_STATS) > size_t m_generatedCodeSize { 0 }; >@@ -249,7 +249,7 @@ public: > #endif > > ArithProfile* m_arithProfile; >- Instruction* m_instruction; >+ const Instruction* m_instruction; > MacroAssemblerCodeRef<JITStubRoutinePtrTag> m_code; > CodeLocationLabel<JSInternalPtrTag> m_inlineStart; > int32_t m_inlineSize; >@@ -266,7 +266,7 @@ inline bool isBinaryProfileEmpty(ArithProfile& arithProfile) > template <typename GeneratorType> > class JITBinaryMathIC : public JITMathIC<GeneratorType, isBinaryProfileEmpty> { > public: >- JITBinaryMathIC(ArithProfile* arithProfile, Instruction* instruction) >+ JITBinaryMathIC(ArithProfile* arithProfile, const Instruction* instruction) > : JITMathIC<GeneratorType, isBinaryProfileEmpty>(arithProfile, instruction) > { > } >@@ -284,7 +284,7 @@ inline bool isUnaryProfileEmpty(ArithProfile& arithProfile) > template <typename GeneratorType> > class JITUnaryMathIC : public JITMathIC<GeneratorType, isUnaryProfileEmpty> { > public: >- JITUnaryMathIC(ArithProfile* arithProfile, Instruction* instruction) >+ JITUnaryMathIC(ArithProfile* arithProfile, const Instruction* instruction) > : JITMathIC<GeneratorType, isUnaryProfileEmpty>(arithProfile, instruction) > { > } >diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp >index 6c39fce009cd5b3a2c0541615e0cb5e5dd79eee7..b75e3e4273fe1355b6b927c800abcead5aef86c2 100644 >--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp >+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp >@@ -52,10 +52,11 @@ namespace JSC { > > #if USE(JSVALUE64) > >-void JIT::emit_op_mov(Instruction* currentInstruction) >+void JIT::emit_op_mov(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int src = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpMov>(); >+ int dst = bytecode.dst.offset(); >+ int src = bytecode.src.offset(); > > if (m_codeBlock->isConstantRegisterIndex(src)) { > JSValue value = m_codeBlock->getConstant(src); >@@ -71,24 +72,28 @@ void JIT::emit_op_mov(Instruction* currentInstruction) > } > > >-void JIT::emit_op_end(Instruction* currentInstruction) >+void JIT::emit_op_end(const Instruction* currentInstruction) > { >+ auto bytecode = currentInstruction->as<OpEnd>(); > RELEASE_ASSERT(returnValueGPR != callFrameRegister); >- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); >+ emitGetVirtualRegister(bytecode.value.offset(), returnValueGPR); > emitRestoreCalleeSaves(); > emitFunctionEpilogue(); > ret(); > } > >-void JIT::emit_op_jmp(Instruction* currentInstruction) >+void JIT::emit_op_jmp(const Instruction* currentInstruction) > { >- unsigned target = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpJmp>(); >+ unsigned target = bytecode.target; > addJump(jump(), target); > } > >-void JIT::emit_op_new_object(Instruction* currentInstruction) >+void JIT::emit_op_new_object(const Instruction* currentInstruction) > { >- Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); >+ auto bytecode = currentInstruction->as<OpNewObject>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ Structure* structure = metadata.allocationProfile.structure(); > size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); > Allocator allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists); > >@@ -104,26 +109,28 @@ void JIT::emit_op_new_object(Instruction* currentInstruction) > emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, scratchReg, slowCases); > emitInitializeInlineStorage(resultReg, structure->inlineCapacity()); > addSlowCase(slowCases); >- emitPutVirtualRegister(currentInstruction[1].u.operand); >+ emitPutVirtualRegister(bytecode.dst.offset()); > } > } > >-void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int dst = currentInstruction[1].u.operand; >- Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); >+ auto bytecode = currentInstruction->as<OpNewObject>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int dst = bytecode.dst.offset(); >+ Structure* structure = metadata.allocationProfile.structure(); > callOperation(operationNewObject, structure); > emitStoreCell(dst, returnValueGPR); > } > >-void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction) >+void JIT::emit_op_overrides_has_instance(const Instruction* currentInstruction) > { >- auto& bytecode = *reinterpret_cast<OpOverridesHasInstance*>(currentInstruction); >- int dst = bytecode.dst(); >- int constructor = bytecode.constructor(); >- int hasInstanceValue = bytecode.hasInstanceValue(); >+ auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); >+ int dst = bytecode.dst.offset(); >+ int constructor = bytecode.constructor.offset(); >+ int hasInstanceValue = bytecode.hasInstanceValue.offset(); > > emitGetVirtualRegister(hasInstanceValue, regT0); > >@@ -144,12 +151,12 @@ void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_instanceof(Instruction* currentInstruction) >+void JIT::emit_op_instanceof(const Instruction* currentInstruction) > { >- auto& bytecode = *reinterpret_cast<OpInstanceof*>(currentInstruction); >- int dst = bytecode.dst(); >- int value = bytecode.value(); >- int proto = bytecode.prototype(); >+ auto bytecode = currentInstruction->as<OpInstanceof>(); >+ int dst = bytecode.dst.offset(); >+ int value = bytecode.value.offset(); >+ int proto = bytecode.prototype.offset(); > > // Load the operands (baseVal, proto, and value respectively) into registers. > // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. >@@ -173,11 +180,12 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_instanceof(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int resultVReg = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpInstanceof>(); >+ int resultVReg = bytecode.dst.offset(); > > JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++]; > >@@ -186,16 +194,17 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas > gen.reportSlowPathCall(coldPathBegin, call); > } > >-void JIT::emit_op_instanceof_custom(Instruction*) >+void JIT::emit_op_instanceof_custom(const Instruction*) > { > // This always goes to slow path since we expect it to be rare. > addSlowCase(jump()); > } > >-void JIT::emit_op_is_empty(Instruction* currentInstruction) >+void JIT::emit_op_is_empty(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int value = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpIsEmpty>(); >+ int dst = bytecode.dst.offset(); >+ int value = bytecode.operand.offset(); > > emitGetVirtualRegister(value, regT0); > compare64(Equal, regT0, TrustedImm32(JSValue::encode(JSValue())), regT0); >@@ -204,10 +213,11 @@ void JIT::emit_op_is_empty(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_is_undefined(Instruction* currentInstruction) >+void JIT::emit_op_is_undefined(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int value = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpIsUndefined>(); >+ int dst = bytecode.dst.offset(); >+ int value = bytecode.operand.offset(); > > emitGetVirtualRegister(value, regT0); > Jump isCell = branchIfCell(regT0); >@@ -232,10 +242,11 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_is_boolean(Instruction* currentInstruction) >+void JIT::emit_op_is_boolean(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int value = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpIsBoolean>(); >+ int dst = bytecode.dst.offset(); >+ int value = bytecode.operand.offset(); > > emitGetVirtualRegister(value, regT0); > xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0); >@@ -244,10 +255,11 @@ void JIT::emit_op_is_boolean(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_is_number(Instruction* currentInstruction) >+void JIT::emit_op_is_number(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int value = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpIsNumber>(); >+ int dst = bytecode.dst.offset(); >+ int value = bytecode.operand.offset(); > > emitGetVirtualRegister(value, regT0); > test64(NonZero, regT0, tagTypeNumberRegister, regT0); >@@ -255,11 +267,12 @@ void JIT::emit_op_is_number(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_is_cell_with_type(Instruction* currentInstruction) >+void JIT::emit_op_is_cell_with_type(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int value = currentInstruction[2].u.operand; >- int type = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpIsCellWithType>(); >+ int dst = bytecode.dst.offset(); >+ int value = bytecode.operand.offset(); >+ int type = bytecode.type; > > emitGetVirtualRegister(value, regT0); > Jump isNotCell = branchIfNotCell(regT0); >@@ -275,10 +288,11 @@ void JIT::emit_op_is_cell_with_type(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_is_object(Instruction* currentInstruction) >+void JIT::emit_op_is_object(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int value = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpIsObject>(); >+ int dst = bytecode.dst.offset(); >+ int value = bytecode.operand.offset(); > > emitGetVirtualRegister(value, regT0); > Jump isNotCell = branchIfNotCell(regT0); >@@ -294,14 +308,15 @@ void JIT::emit_op_is_object(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_ret(Instruction* currentInstruction) >+void JIT::emit_op_ret(const Instruction* currentInstruction) > { > ASSERT(callFrameRegister != regT1); > ASSERT(regT1 != returnValueGPR); > ASSERT(returnValueGPR != callFrameRegister); > > // Return the result in %eax. >- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); >+ auto bytecode = currentInstruction->as<OpRet>(); >+ emitGetVirtualRegister(bytecode.value.offset(), returnValueGPR); > > checkStackPointerAlignment(); > emitRestoreCalleeSaves(); >@@ -309,10 +324,11 @@ void JIT::emit_op_ret(Instruction* currentInstruction) > ret(); > } > >-void JIT::emit_op_to_primitive(Instruction* currentInstruction) >+void JIT::emit_op_to_primitive(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int src = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpToPrimitive>(); >+ int dst = bytecode.dst.offset(); >+ int src = bytecode.src.offset(); > > emitGetVirtualRegister(src, regT0); > >@@ -325,16 +341,18 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) > > } > >-void JIT::emit_op_set_function_name(Instruction* currentInstruction) >+void JIT::emit_op_set_function_name(const Instruction* currentInstruction) > { >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); >- emitGetVirtualRegister(currentInstruction[2].u.operand, regT1); >+ auto bytecode = currentInstruction->as<OpSetFunctionName>(); >+ emitGetVirtualRegister(bytecode.function.offset(), regT0); >+ emitGetVirtualRegister(bytecode.name.offset(), regT1); > callOperation(operationSetFunctionName, regT0, regT1); > } > >-void JIT::emit_op_not(Instruction* currentInstruction) >+void JIT::emit_op_not(const Instruction* currentInstruction) > { >- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); >+ auto bytecode = currentInstruction->as<OpNot>(); >+ emitGetVirtualRegister(bytecode.operand.offset(), regT0); > > // Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be > // clear other than the low bit (which will be 0 or 1 for false or true inputs respectively). >@@ -343,26 +361,28 @@ void JIT::emit_op_not(Instruction* currentInstruction) > addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1)))); > xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0); > >- emitPutVirtualRegister(currentInstruction[1].u.operand); >+ emitPutVirtualRegister(bytecode.dst.offset()); > } > >-void JIT::emit_op_jfalse(Instruction* currentInstruction) >+void JIT::emit_op_jfalse(const Instruction* currentInstruction) > { >- unsigned target = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpJfalse>(); >+ unsigned target = bytecode.target; > > GPRReg value = regT0; > GPRReg scratch1 = regT1; > GPRReg scratch2 = regT2; > bool shouldCheckMasqueradesAsUndefined = true; > >- emitGetVirtualRegister(currentInstruction[1].u.operand, value); >+ emitGetVirtualRegister(bytecode.condition.offset(), value); > addJump(branchIfFalsey(*vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); > } > >-void JIT::emit_op_jeq_null(Instruction* currentInstruction) >+void JIT::emit_op_jeq_null(const Instruction* currentInstruction) > { >- int src = currentInstruction[1].u.operand; >- unsigned target = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpJeqNull>(); >+ int src = bytecode.condition.offset(); >+ unsigned target = bytecode.target; > > emitGetVirtualRegister(src, regT0); > Jump isImmediate = branchIfNotCell(regT0); >@@ -382,10 +402,11 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction) > isNotMasqueradesAsUndefined.link(this); > masqueradesGlobalObjectIsForeign.link(this); > }; >-void JIT::emit_op_jneq_null(Instruction* currentInstruction) >+void JIT::emit_op_jneq_null(const Instruction* currentInstruction) > { >- int src = currentInstruction[1].u.operand; >- unsigned target = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpJneqNull>(); >+ int src = bytecode.condition.offset(); >+ unsigned target = bytecode.target; > > emitGetVirtualRegister(src, regT0); > Jump isImmediate = branchIfNotCell(regT0); >@@ -405,80 +426,90 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) > wasNotImmediate.link(this); > } > >-void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) >+void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction) > { >- int src = currentInstruction[1].u.operand; >- Special::Pointer ptr = currentInstruction[2].u.specialPointer; >- unsigned target = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpJneqPtr>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int src = bytecode.condition.offset(); >+ Special::Pointer ptr = bytecode.specialPointer; >+ unsigned target = bytecode.target; > > emitGetVirtualRegister(src, regT0); > CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))); >- store32(TrustedImm32(1), ¤tInstruction[4].u.operand); >+ store32(TrustedImm32(1), &metadata.hasJumped); > addJump(jump(), target); > equal.link(this); > } > >-void JIT::emit_op_eq(Instruction* currentInstruction) >+void JIT::emit_op_eq(const Instruction* currentInstruction) > { >- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); >+ auto bytecode = currentInstruction->as<OpEq>(); >+ emitGetVirtualRegisters(bytecode.lhs.offset(), regT0, bytecode.rhs.offset(), regT1); > emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); > compare32(Equal, regT1, regT0, regT0); > boxBoolean(regT0, JSValueRegs { regT0 }); >- emitPutVirtualRegister(currentInstruction[1].u.operand); >+ emitPutVirtualRegister(bytecode.dst.offset()); > } > >-void JIT::emit_op_jeq(Instruction* currentInstruction) >+void JIT::emit_op_jeq(const Instruction* currentInstruction) > { >- unsigned target = currentInstruction[3].u.operand; >- emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1); >+ auto bytecode = currentInstruction->as<OpJeq>(); >+ unsigned target = bytecode.target; >+ emitGetVirtualRegisters(bytecode.lhs.offset(), regT0, bytecode.rhs.offset(), regT1); > emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); > addJump(branch32(Equal, regT0, regT1), target); > } > >-void JIT::emit_op_jtrue(Instruction* currentInstruction) >+void JIT::emit_op_jtrue(const Instruction* currentInstruction) > { >- unsigned target = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpJtrue>(); >+ unsigned target = bytecode.target; > > GPRReg value = regT0; > GPRReg scratch1 = regT1; > GPRReg scratch2 = regT2; > bool shouldCheckMasqueradesAsUndefined = true; >- emitGetVirtualRegister(currentInstruction[1].u.operand, value); >+ emitGetVirtualRegister(bytecode.condition.offset(), value); > addJump(branchIfTruthy(*vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); > } > >-void JIT::emit_op_neq(Instruction* currentInstruction) >+void JIT::emit_op_neq(const Instruction* currentInstruction) > { >- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); >+ auto bytecode = currentInstruction->as<OpNeq>(); >+ emitGetVirtualRegisters(bytecode.lhs.offset(), regT0, bytecode.rhs.offset(), regT1); > emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); > compare32(NotEqual, regT1, regT0, regT0); > boxBoolean(regT0, JSValueRegs { regT0 }); > >- emitPutVirtualRegister(currentInstruction[1].u.operand); >+ emitPutVirtualRegister(bytecode.dst.offset()); > } > >-void JIT::emit_op_jneq(Instruction* currentInstruction) >+void JIT::emit_op_jneq(const Instruction* currentInstruction) > { >- unsigned target = currentInstruction[3].u.operand; >- emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1); >+ auto bytecode = currentInstruction->as<OpJneq>(); >+ unsigned target = bytecode.target; >+ emitGetVirtualRegisters(bytecode.lhs.offset(), regT0, bytecode.rhs.offset(), regT1); > emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); > addJump(branch32(NotEqual, regT0, regT1), target); > } > >-void JIT::emit_op_throw(Instruction* currentInstruction) >+void JIT::emit_op_throw(const Instruction* currentInstruction) > { >+ auto bytecode = currentInstruction->as<OpThrow>(); > ASSERT(regT0 == returnValueGPR); > copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); >+ emitGetVirtualRegister(bytecode.value.offset(), regT0); > callOperationNoExceptionCheck(operationThrow, regT0); > jumpToExceptionHandler(*vm()); > } > >-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) >+template<typename Op> >+void JIT::compileOpStrictEq(const Instruction* currentInstruction, CompileOpStrictEqType type) > { >- int dst = currentInstruction[1].u.operand; >- int src1 = currentInstruction[2].u.operand; >- int src2 = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ int dst = bytecode.dst.offset(); >+ int src1 = bytecode.lhs.offset(); >+ int src2 = bytecode.rhs.offset(); > > emitGetVirtualRegisters(src1, regT0, src2, regT1); > >@@ -505,21 +536,23 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_stricteq(Instruction* currentInstruction) >+void JIT::emit_op_stricteq(const Instruction* currentInstruction) > { >- compileOpStrictEq(currentInstruction, CompileOpStrictEqType::StrictEq); >+ compileOpStrictEq<OpStricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); > } > >-void JIT::emit_op_nstricteq(Instruction* currentInstruction) >+void JIT::emit_op_nstricteq(const Instruction* currentInstruction) > { >- compileOpStrictEq(currentInstruction, CompileOpStrictEqType::NStrictEq); >+ compileOpStrictEq<OpNstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); > } > >-void JIT::compileOpStrictEqJump(Instruction* currentInstruction, CompileOpStrictEqType type) >+template<typename Op> >+void JIT::compileOpStrictEqJump(const Instruction* currentInstruction, CompileOpStrictEqType type) > { >- int target = currentInstruction[3].u.operand; >- int src1 = currentInstruction[1].u.operand; >- int src2 = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ int target = bytecode.target; >+ int src1 = bytecode.lhs.offset(); >+ int src2 = bytecode.rhs.offset(); > > emitGetVirtualRegisters(src1, regT0, src2, regT1); > >@@ -543,74 +576,82 @@ void JIT::compileOpStrictEqJump(Instruction* currentInstruction, CompileOpStrict > addJump(branch64(NotEqual, regT1, regT0), target); > } > >-void JIT::emit_op_jstricteq(Instruction* currentInstruction) >+void JIT::emit_op_jstricteq(const Instruction* currentInstruction) > { >- compileOpStrictEqJump(currentInstruction, CompileOpStrictEqType::StrictEq); >+ compileOpStrictEqJump<OpJstricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); > } > >-void JIT::emit_op_jnstricteq(Instruction* currentInstruction) >+void JIT::emit_op_jnstricteq(const Instruction* currentInstruction) > { >- compileOpStrictEqJump(currentInstruction, CompileOpStrictEqType::NStrictEq); >+ compileOpStrictEqJump<OpJnstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); > } > >-void JIT::emitSlow_op_jstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- unsigned target = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpJstricteq>(); >+ unsigned target = bytecode.target; > callOperation(operationCompareStrictEq, regT0, regT1); > emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target); > } > >-void JIT::emitSlow_op_jnstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jnstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- unsigned target = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpJnstricteq>(); >+ unsigned target = bytecode.target; > callOperation(operationCompareStrictEq, regT0, regT1); > emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); > } > >-void JIT::emit_op_to_number(Instruction* currentInstruction) >+void JIT::emit_op_to_number(const Instruction* currentInstruction) > { >- int dstVReg = currentInstruction[1].u.operand; >- int srcVReg = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpToNumber>(); >+ int dstVReg = bytecode.dst.offset(); >+ int srcVReg = bytecode.operand.offset(); > emitGetVirtualRegister(srcVReg, regT0); > > addSlowCase(branchIfNotNumber(regT0)); > >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > if (srcVReg != dstVReg) > emitPutVirtualRegister(dstVReg); > } > >-void JIT::emit_op_to_string(Instruction* currentInstruction) >+void JIT::emit_op_to_string(const Instruction* currentInstruction) > { >- int srcVReg = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpToString>(); >+ int srcVReg = bytecode.operand.offset(); > emitGetVirtualRegister(srcVReg, regT0); > > addSlowCase(branchIfNotCell(regT0)); > addSlowCase(branchIfNotString(regT0)); > >- emitPutVirtualRegister(currentInstruction[1].u.operand); >+ emitPutVirtualRegister(bytecode.dst.offset()); > } > >-void JIT::emit_op_to_object(Instruction* currentInstruction) >+void JIT::emit_op_to_object(const Instruction* currentInstruction) > { >- int dstVReg = currentInstruction[1].u.operand; >- int srcVReg = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpToObject>(); >+ int dstVReg = bytecode.dst.offset(); >+ int srcVReg = bytecode.operand.offset(); > emitGetVirtualRegister(srcVReg, regT0); > > addSlowCase(branchIfNotCell(regT0)); > addSlowCase(branchIfNotObject(regT0)); > >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > if (srcVReg != dstVReg) > emitPutVirtualRegister(dstVReg); > } > >-void JIT::emit_op_catch(Instruction* currentInstruction) >+void JIT::emit_op_catch(const Instruction* currentInstruction) > { >+ auto bytecode = currentInstruction->as<OpCatch>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ > restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); > > move(TrustedImmPtr(m_vm), regT3); >@@ -627,17 +668,17 @@ void JIT::emit_op_catch(Instruction* currentInstruction) > move(TrustedImmPtr(m_vm), regT3); > load64(Address(regT3, VM::exceptionOffset()), regT0); > store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset())); >- emitPutVirtualRegister(currentInstruction[1].u.operand); >+ emitPutVirtualRegister(bytecode.exception.offset()); > > load64(Address(regT0, Exception::valueOffset()), regT0); >- emitPutVirtualRegister(currentInstruction[2].u.operand); >+ emitPutVirtualRegister(bytecode.thrownValue.offset()); > > #if ENABLE(DFG_JIT) > // FIXME: consider inline caching the process of doing OSR entry, including > // argument type proofs, storing locals to the buffer, etc > // https://bugs.webkit.org/show_bug.cgi?id=175598 > >- ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(currentInstruction[3].u.pointer); >+ ValueProfileAndOperandBuffer* buffer = metadata.buffer; > if (buffer || !shouldEmitProfiling()) > callOperation(operationTryOSREnterAtCatch, m_bytecodeOffset); > else >@@ -656,24 +697,26 @@ void JIT::emit_op_catch(Instruction* currentInstruction) > #endif // ENABLE(DFG_JIT) > } > >-void JIT::emit_op_identity_with_profile(Instruction*) >+void JIT::emit_op_identity_with_profile(const Instruction*) > { > // We don't need to do anything here... > } > >-void JIT::emit_op_get_parent_scope(Instruction* currentInstruction) >+void JIT::emit_op_get_parent_scope(const Instruction* currentInstruction) > { >- int currentScope = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpGetParentScope>(); >+ int currentScope = bytecode.scope.offset(); > emitGetVirtualRegister(currentScope, regT0); > loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); >- emitStoreCell(currentInstruction[1].u.operand, regT0); >+ emitStoreCell(bytecode.dst.offset(), regT0); > } > >-void JIT::emit_op_switch_imm(Instruction* currentInstruction) >+void JIT::emit_op_switch_imm(const Instruction* currentInstruction) > { >- size_t tableIndex = currentInstruction[1].u.operand; >- unsigned defaultOffset = currentInstruction[2].u.operand; >- unsigned scrutinee = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpSwitchImm>(); >+ size_t tableIndex = bytecode.tableIndex; >+ unsigned defaultOffset = bytecode.defaultOffset; >+ unsigned scrutinee = bytecode.scrutinee.offset(); > > // create jump table for switch destinations, track this switch statement. > SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); >@@ -685,11 +728,12 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction) > jump(returnValueGPR, JSSwitchPtrTag); > } > >-void JIT::emit_op_switch_char(Instruction* currentInstruction) >+void JIT::emit_op_switch_char(const Instruction* currentInstruction) > { >- size_t tableIndex = currentInstruction[1].u.operand; >- unsigned defaultOffset = currentInstruction[2].u.operand; >- unsigned scrutinee = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpSwitchChar>(); >+ size_t tableIndex = bytecode.tableIndex; >+ unsigned defaultOffset = bytecode.defaultOffset; >+ unsigned scrutinee = bytecode.scrutinee.offset(); > > // create jump table for switch destinations, track this switch statement. > SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); >@@ -701,11 +745,12 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction) > jump(returnValueGPR, JSSwitchPtrTag); > } > >-void JIT::emit_op_switch_string(Instruction* currentInstruction) >+void JIT::emit_op_switch_string(const Instruction* currentInstruction) > { >- size_t tableIndex = currentInstruction[1].u.operand; >- unsigned defaultOffset = currentInstruction[2].u.operand; >- unsigned scrutinee = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpSwitchString>(); >+ size_t tableIndex = bytecode.tableIndex; >+ unsigned defaultOffset = bytecode.defaultOffset; >+ unsigned scrutinee = bytecode.scrutinee.offset(); > > // create jump table for switch destinations, track this switch statement. > StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); >@@ -716,18 +761,20 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction) > jump(returnValueGPR, JSSwitchPtrTag); > } > >-void JIT::emit_op_debug(Instruction* currentInstruction) >+void JIT::emit_op_debug(const Instruction* currentInstruction) > { >+ auto bytecode = currentInstruction->as<OpDebug>(); > load32(codeBlock()->debuggerRequestsAddress(), regT0); > Jump noDebuggerRequests = branchTest32(Zero, regT0); >- callOperation(operationDebug, currentInstruction[1].u.operand); >+ callOperation(operationDebug, static_cast<int>(bytecode.debugHookType)); > noDebuggerRequests.link(this); > } > >-void JIT::emit_op_eq_null(Instruction* currentInstruction) >+void JIT::emit_op_eq_null(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int src1 = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpEqNull>(); >+ int dst = bytecode.dst.offset(); >+ int src1 = bytecode.operand.offset(); > > emitGetVirtualRegister(src1, regT0); > Jump isImmediate = branchIfNotCell(regT0); >@@ -756,10 +803,11 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) > > } > >-void JIT::emit_op_neq_null(Instruction* currentInstruction) >+void JIT::emit_op_neq_null(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int src1 = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpNeqNull>(); >+ int dst = bytecode.dst.offset(); >+ int src1 = bytecode.operand.offset(); > > emitGetVirtualRegister(src1, regT0); > Jump isImmediate = branchIfNotCell(regT0); >@@ -787,7 +835,7 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_enter(Instruction*) >+void JIT::emit_op_enter(const Instruction*) > { > // Even though CTI doesn't use them, we initialize our constant > // registers to zap stale pointers, to avoid unnecessarily prolonging >@@ -801,18 +849,21 @@ void JIT::emit_op_enter(Instruction*) > emitEnterOptimizationCheck(); > } > >-void JIT::emit_op_get_scope(Instruction* currentInstruction) >+void JIT::emit_op_get_scope(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpGetScope>(); >+ int dst = bytecode.dst.offset(); > emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0); > loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); > emitStoreCell(dst, regT0); > } > >-void JIT::emit_op_to_this(Instruction* currentInstruction) >+void JIT::emit_op_to_this(const Instruction* currentInstruction) > { >- WriteBarrierBase<Structure>* cachedStructure = ¤tInstruction[2].u.structure; >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT1); >+ auto bytecode = currentInstruction->as<OpToThis>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ WriteBarrierBase<Structure>* cachedStructure = &metadata.cachedStructure; >+ emitGetVirtualRegister(bytecode.srcDst.offset(), regT1); > > emitJumpSlowCaseIfNotJSCell(regT1); > >@@ -823,10 +874,12 @@ void JIT::emit_op_to_this(Instruction* currentInstruction) > addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2)); > } > >-void JIT::emit_op_create_this(Instruction* currentInstruction) >+void JIT::emit_op_create_this(const Instruction* currentInstruction) > { >- int callee = currentInstruction[2].u.operand; >- WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[4].u.jsCell; >+ auto bytecode = currentInstruction->as<OpCreateThis>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int callee = bytecode.callee.offset(); >+ WriteBarrierBase<JSCell>* cachedFunction = &metadata.cachedCallee; > RegisterID calleeReg = regT0; > RegisterID rareDataReg = regT4; > RegisterID resultReg = regT0; >@@ -857,64 +910,69 @@ void JIT::emit_op_create_this(Instruction* currentInstruction) > load32(Address(scratchReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfInlineCapacity()), scratchReg); > emitInitializeInlineStorage(resultReg, scratchReg); > addSlowCase(slowCases); >- emitPutVirtualRegister(currentInstruction[1].u.operand); >+ emitPutVirtualRegister(bytecode.dst.offset()); > } > >-void JIT::emit_op_check_tdz(Instruction* currentInstruction) >+void JIT::emit_op_check_tdz(const Instruction* currentInstruction) > { >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); >+ auto bytecode = currentInstruction->as<OpCheckTdz>(); >+ emitGetVirtualRegister(bytecode.target.offset(), regT0); > addSlowCase(branchIfEmpty(regT0)); > } > > > // Slow cases > >-void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_eq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >+ auto bytecode = currentInstruction->as<OpEq>(); > callOperation(operationCompareEq, regT0, regT1); > boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR }); >- emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); >+ emitPutVirtualRegister(bytecode.dst.offset(), returnValueGPR); > } > >-void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_neq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >+ auto bytecode = currentInstruction->as<OpNeq>(); > callOperation(operationCompareEq, regT0, regT1); > xor32(TrustedImm32(0x1), regT0); > boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR }); >- emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); >+ emitPutVirtualRegister(bytecode.dst.offset(), returnValueGPR); > } > >-void JIT::emitSlow_op_jeq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jeq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- unsigned target = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpJeq>(); >+ unsigned target = bytecode.target; > callOperation(operationCompareEq, regT0, regT1); > emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target); > } > >-void JIT::emitSlow_op_jneq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_jneq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- unsigned target = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpJneq>(); >+ unsigned target = bytecode.target; > callOperation(operationCompareEq, regT0, regT1); > emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); > } > >-void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_instanceof_custom(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- auto& bytecode = *reinterpret_cast<OpInstanceofCustom*>(currentInstruction); >- int dst = bytecode.dst(); >- int value = bytecode.value(); >- int constructor = bytecode.constructor(); >- int hasInstanceValue = bytecode.hasInstanceValue(); >+ auto bytecode = currentInstruction->as<OpInstanceofCustom>(); >+ int dst = bytecode.dst.offset(); >+ int value = bytecode.value.offset(); >+ int constructor = bytecode.constructor.offset(); >+ int hasInstanceValue = bytecode.hasInstanceValue.offset(); > > emitGetVirtualRegister(value, regT0); > emitGetVirtualRegister(constructor, regT1); >@@ -926,7 +984,7 @@ void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector< > > #endif // USE(JSVALUE64) > >-void JIT::emit_op_loop_hint(Instruction*) >+void JIT::emit_op_loop_hint(const Instruction*) > { > // Emit the JIT optimization check: > if (canBeOptimized()) { >@@ -935,7 +993,7 @@ void JIT::emit_op_loop_hint(Instruction*) > } > } > >-void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_loop_hint(const Instruction*, Vector<SlowCaseEntry>::iterator& iter) > { > #if ENABLE(DFG_JIT) > // Emit the slow path for the JIT optimization check: >@@ -961,53 +1019,56 @@ void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& i > #endif > } > >-void JIT::emit_op_check_traps(Instruction*) >+void JIT::emit_op_check_traps(const Instruction*) > { > addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->needTrapHandlingAddress()))); > } > >-void JIT::emit_op_nop(Instruction*) >+void JIT::emit_op_nop(const Instruction*) > { > } > >-void JIT::emit_op_super_sampler_begin(Instruction*) >+void JIT::emit_op_super_sampler_begin(const Instruction*) > { > add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); > } > >-void JIT::emit_op_super_sampler_end(Instruction*) >+void JIT::emit_op_super_sampler_end(const Instruction*) > { > sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); > } > >-void JIT::emitSlow_op_check_traps(Instruction*, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > > callOperation(operationHandleTraps); > } > >-void JIT::emit_op_new_regexp(Instruction* currentInstruction) >+void JIT::emit_op_new_regexp(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int regexp = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpNewRegexp>(); >+ int dst = bytecode.dst.offset(); >+ int regexp = bytecode.regexp.offset(); > callOperation(operationNewRegexp, jsCast<RegExp*>(m_codeBlock->getConstant(regexp))); > emitStoreCell(dst, returnValueGPR); > } > >-void JIT::emitNewFuncCommon(Instruction* currentInstruction) >+template<typename Op> >+void JIT::emitNewFuncCommon(const Instruction* currentInstruction) > { > Jump lazyJump; >- int dst = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ int dst = bytecode.dst.offset(); > > #if USE(JSVALUE64) >- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); >+ emitGetVirtualRegister(bytecode.scope.offset(), regT0); > #else >- emitLoadPayload(currentInstruction[2].u.operand, regT0); >+ emitLoadPayload(bytecode.scope.offset(), regT0); > #endif >- FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand); >+ FunctionExecutable* funcExec = m_codeBlock->functionDecl(bytecode.functionDecl); > >- OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode); >+ OpcodeID opcodeID = Op::opcodeID(); > if (opcodeID == op_new_func) > callOperation(operationNewFunction, dst, regT0, funcExec); > else if (opcodeID == op_new_generator_func) >@@ -1020,37 +1081,39 @@ void JIT::emitNewFuncCommon(Instruction* currentInstruction) > } > } > >-void JIT::emit_op_new_func(Instruction* currentInstruction) >+void JIT::emit_op_new_func(const Instruction* currentInstruction) > { >- emitNewFuncCommon(currentInstruction); >+ emitNewFuncCommon<OpNewFunc>(currentInstruction); > } > >-void JIT::emit_op_new_generator_func(Instruction* currentInstruction) >+void JIT::emit_op_new_generator_func(const Instruction* currentInstruction) > { >- emitNewFuncCommon(currentInstruction); >+ emitNewFuncCommon<OpNewGeneratorFunc>(currentInstruction); > } > >-void JIT::emit_op_new_async_generator_func(Instruction* currentInstruction) >+void JIT::emit_op_new_async_generator_func(const Instruction* currentInstruction) > { >- emitNewFuncCommon(currentInstruction); >+ emitNewFuncCommon<OpNewAsyncGeneratorFunc>(currentInstruction); > } > >-void JIT::emit_op_new_async_func(Instruction* currentInstruction) >+void JIT::emit_op_new_async_func(const Instruction* currentInstruction) > { >- emitNewFuncCommon(currentInstruction); >+ emitNewFuncCommon<OpNewAsyncFunc>(currentInstruction); > } > >-void JIT::emitNewFuncExprCommon(Instruction* currentInstruction) >+template<typename Op> >+void JIT::emitNewFuncExprCommon(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<Op>(); >+ int dst = bytecode.dst.offset(); > #if USE(JSVALUE64) >- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); >+ emitGetVirtualRegister(bytecode.scope.offset(), regT0); > #else >- emitLoadPayload(currentInstruction[2].u.operand, regT0); >+ emitLoadPayload(bytecode.scope.offset(), regT0); > #endif > >- FunctionExecutable* function = m_codeBlock->functionExpr(currentInstruction[3].u.operand); >- OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode); >+ FunctionExecutable* function = m_codeBlock->functionExpr(bytecode.functionDecl); >+ OpcodeID opcodeID = Op::opcodeID(); > > if (opcodeID == op_new_func_exp) > callOperation(operationNewFunction, dst, regT0, function); >@@ -1064,57 +1127,62 @@ void JIT::emitNewFuncExprCommon(Instruction* currentInstruction) > } > } > >-void JIT::emit_op_new_func_exp(Instruction* currentInstruction) >+void JIT::emit_op_new_func_exp(const Instruction* currentInstruction) > { >- emitNewFuncExprCommon(currentInstruction); >+ emitNewFuncExprCommon<OpNewFuncExp>(currentInstruction); > } > >-void JIT::emit_op_new_generator_func_exp(Instruction* currentInstruction) >+void JIT::emit_op_new_generator_func_exp(const Instruction* currentInstruction) > { >- emitNewFuncExprCommon(currentInstruction); >+ emitNewFuncExprCommon<OpNewGeneratorFuncExp>(currentInstruction); > } > >-void JIT::emit_op_new_async_func_exp(Instruction* currentInstruction) >+void JIT::emit_op_new_async_func_exp(const Instruction* currentInstruction) > { >- emitNewFuncExprCommon(currentInstruction); >+ emitNewFuncExprCommon<OpNewAsyncFuncExp>(currentInstruction); > } > >-void JIT::emit_op_new_async_generator_func_exp(Instruction* currentInstruction) >+void JIT::emit_op_new_async_generator_func_exp(const Instruction* currentInstruction) > { >- emitNewFuncExprCommon(currentInstruction); >+ emitNewFuncExprCommon<OpNewAsyncGeneratorFuncExp>(currentInstruction); > } > >-void JIT::emit_op_new_array(Instruction* currentInstruction) >+void JIT::emit_op_new_array(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int valuesIndex = currentInstruction[2].u.operand; >- int size = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpNewArray>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int dst = bytecode.dst.offset(); >+ int valuesIndex = bytecode.argv.offset(); >+ int size = bytecode.argc; > addPtr(TrustedImm32(valuesIndex * sizeof(Register)), callFrameRegister, regT0); > callOperation(operationNewArrayWithProfile, dst, >- currentInstruction[4].u.arrayAllocationProfile, regT0, size); >+ &metadata.allocationProfile, regT0, size); > } > >-void JIT::emit_op_new_array_with_size(Instruction* currentInstruction) >+void JIT::emit_op_new_array_with_size(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int sizeIndex = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpNewArrayWithSize>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int dst = bytecode.dst.offset(); >+ int sizeIndex = bytecode.length.offset(); > #if USE(JSVALUE64) > emitGetVirtualRegister(sizeIndex, regT0); > callOperation(operationNewArrayWithSizeAndProfile, dst, >- currentInstruction[3].u.arrayAllocationProfile, regT0); >+ &metadata.allocationProfile, regT0); > #else > emitLoad(sizeIndex, regT1, regT0); > callOperation(operationNewArrayWithSizeAndProfile, dst, >- currentInstruction[3].u.arrayAllocationProfile, JSValueRegs(regT1, regT0)); >+ &metadata.allocationProfile, JSValueRegs(regT1, regT0)); > #endif > } > > #if USE(JSVALUE64) >-void JIT::emit_op_has_structure_property(Instruction* currentInstruction) >+void JIT::emit_op_has_structure_property(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int base = currentInstruction[2].u.operand; >- int enumerator = currentInstruction[4].u.operand; >+ auto bytecode = currentInstruction->as<OpHasStructureProperty>(); >+ int dst = bytecode.dst.offset(); >+ int base = bytecode.base.offset(); >+ int enumerator = bytecode.enumerator.offset(); > > emitGetVirtualRegister(base, regT0); > emitGetVirtualRegister(enumerator, regT1); >@@ -1129,7 +1197,7 @@ void JIT::emit_op_has_structure_property(Instruction* currentInstruction) > > void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) > { >- Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex]; >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); > > PatchableJump badType; > >@@ -1154,12 +1222,14 @@ void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPt > MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationHasIndexedPropertyGeneric)); > } > >-void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) >+void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int base = currentInstruction[2].u.operand; >- int property = currentInstruction[3].u.operand; >- ArrayProfile* profile = currentInstruction[4].u.arrayProfile; >+ auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int dst = bytecode.dst.offset(); >+ int base = bytecode.base.offset(); >+ int property = bytecode.property.offset(); >+ ArrayProfile* profile = &metadata.arrayProfile; > ByValInfo* byValInfo = m_codeBlock->addByValInfo(); > > emitGetVirtualRegisters(base, regT0, property, regT1); >@@ -1197,13 +1267,14 @@ void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) > m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath)); > } > >-void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int dst = currentInstruction[1].u.operand; >- int base = currentInstruction[2].u.operand; >- int property = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); >+ int dst = bytecode.dst.offset(); >+ int base = bytecode.base.offset(); >+ int property = bytecode.property.offset(); > ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; > > Label slowPath = label(); >@@ -1217,12 +1288,13 @@ void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vect > m_byValInstructionIndex++; > } > >-void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) >+void JIT::emit_op_get_direct_pname(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int base = currentInstruction[2].u.operand; >- int index = currentInstruction[4].u.operand; >- int enumerator = currentInstruction[5].u.operand; >+ auto bytecode = currentInstruction->as<OpGetDirectPname>(); >+ int dst = bytecode.dst.offset(); >+ int base = bytecode.base.offset(); >+ int index = bytecode.index.offset(); >+ int enumerator = bytecode.enumerator.offset(); > > // Check that base is a cell > emitGetVirtualRegister(base, regT0); >@@ -1253,15 +1325,16 @@ void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) > load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0); > > done.link(this); >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > emitPutVirtualRegister(dst, regT0); > } > >-void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) >+void JIT::emit_op_enumerator_structure_pname(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int enumerator = currentInstruction[2].u.operand; >- int index = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); >+ int dst = bytecode.dst.offset(); >+ int enumerator = bytecode.enumerator.offset(); >+ int index = bytecode.index.offset(); > > emitGetVirtualRegister(index, regT0); > emitGetVirtualRegister(enumerator, regT1); >@@ -1280,11 +1353,12 @@ void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) >+void JIT::emit_op_enumerator_generic_pname(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int enumerator = currentInstruction[2].u.operand; >- int index = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); >+ int dst = bytecode.dst.offset(); >+ int enumerator = bytecode.enumerator.offset(); >+ int index = bytecode.index.offset(); > > emitGetVirtualRegister(index, regT0); > emitGetVirtualRegister(enumerator, regT1); >@@ -1303,10 +1377,12 @@ void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_profile_type(Instruction* currentInstruction) >+void JIT::emit_op_profile_type(const Instruction* currentInstruction) > { >- TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; >- int valueToProfile = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpProfileType>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ TypeLocation* cachedTypeLocation = metadata.typeLocation; >+ int valueToProfile = bytecode.target.offset(); > > emitGetVirtualRegister(valueToProfile, regT0); > >@@ -1365,36 +1441,40 @@ void JIT::emit_op_profile_type(Instruction* currentInstruction) > jumpToEnd.link(this); > } > >-void JIT::emit_op_log_shadow_chicken_prologue(Instruction* currentInstruction) >+void JIT::emit_op_log_shadow_chicken_prologue(const Instruction* currentInstruction) > { > updateTopCallFrame(); > static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true."); >+ auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); > GPRReg shadowPacketReg = regT0; > GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. > GPRReg scratch2Reg = regT2; > ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg); >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT3); >+ emitGetVirtualRegister(bytecode.scope.offset(), regT3); > logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3); > } > >-void JIT::emit_op_log_shadow_chicken_tail(Instruction* currentInstruction) >+void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction) > { > updateTopCallFrame(); > static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true."); >+ auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); > GPRReg shadowPacketReg = regT0; > GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. > GPRReg scratch2Reg = regT2; > ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg); >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT2); >- emitGetVirtualRegister(currentInstruction[2].u.operand, regT3); >+ emitGetVirtualRegister(bytecode.thisValue.offset(), regT2); >+ emitGetVirtualRegister(bytecode.scope.offset(), regT3); > logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeOffset)); > } > > #endif // USE(JSVALUE64) > >-void JIT::emit_op_profile_control_flow(Instruction* currentInstruction) >+void JIT::emit_op_profile_control_flow(const Instruction* currentInstruction) > { >- BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; >+ auto bytecode = currentInstruction->as<OpProfileControlFlow>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ BasicBlockLocation* basicBlockLocation = metadata.basicBlockLocation; > #if USE(JSVALUE64) > basicBlockLocation->emitExecuteCode(*this); > #else >@@ -1402,9 +1482,10 @@ void JIT::emit_op_profile_control_flow(Instruction* currentInstruction) > #endif > } > >-void JIT::emit_op_argument_count(Instruction* currentInstruction) >+void JIT::emit_op_argument_count(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >+ auto bytecode = currentInstruction->as<OpArgumentCount>(); >+ int dst = bytecode.dst.offset(); > load32(payloadFor(CallFrameSlot::argumentCount), regT0); > sub32(TrustedImm32(1), regT0); > JSValueRegs result = JSValueRegs::withTwoAvailableRegs(regT0, regT1); >@@ -1412,10 +1493,11 @@ void JIT::emit_op_argument_count(Instruction* currentInstruction) > emitPutVirtualRegister(dst, result); > } > >-void JIT::emit_op_get_rest_length(Instruction* currentInstruction) >+void JIT::emit_op_get_rest_length(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue; >+ auto bytecode = currentInstruction->as<OpGetRestLength>(); >+ int dst = bytecode.dst.offset(); >+ unsigned numParamsToSkip = bytecode.numParametersToSkip; > load32(payloadFor(CallFrameSlot::argumentCount), regT0); > sub32(TrustedImm32(1), regT0); > Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip)); >@@ -1441,10 +1523,11 @@ void JIT::emit_op_get_rest_length(Instruction* currentInstruction) > #endif > } > >-void JIT::emit_op_get_argument(Instruction* currentInstruction) >+void JIT::emit_op_get_argument(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int index = currentInstruction[2].u.operand; >+ auto bytecode = currentInstruction->as<OpGetArgument>(); >+ int dst = bytecode.dst.offset(); >+ int index = bytecode.index; > #if USE(JSVALUE64) > JSValueRegs resultRegs(regT0); > #else >@@ -1460,7 +1543,7 @@ void JIT::emit_op_get_argument(Instruction* currentInstruction) > moveValue(jsUndefined(), resultRegs); > > done.link(this); >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > emitPutVirtualRegister(dst, resultRegs); > } > >diff --git a/Source/JavaScriptCore/jit/JITOperations.cpp b/Source/JavaScriptCore/jit/JITOperations.cpp >index 226c20e2e4ecff5bba810620684b011564713552..67959c8e61bde128e9a23967830e5b20e0b4ccb9 100644 >--- a/Source/JavaScriptCore/jit/JITOperations.cpp >+++ b/Source/JavaScriptCore/jit/JITOperations.cpp >@@ -1699,8 +1699,9 @@ char* JIT_OPERATION operationTryOSREnterAtCatchAndValueProfile(ExecState* exec, > } > > codeBlock->ensureCatchLivenessIsComputedForBytecodeOffset(bytecodeIndex); >- ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(codeBlock->instructions()[bytecodeIndex + 3].u.pointer); >- buffer->forEach([&] (ValueProfileAndOperand& profile) { >+ auto bytecode = codeBlock->instructions().at(bytecodeIndex)->as<OpCatch>(); >+ auto& metadata = bytecode.metadata(codeBlock); >+ metadata.buffer->forEach([&] (ValueProfileAndOperand& profile) { > profile.m_profile.m_buckets[0] = JSValue::encode(exec->uncheckedR(profile.m_operand).jsValue()); > }); > >@@ -2326,18 +2327,18 @@ char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState* exec, Enc > return reinterpret_cast<char*>(result); > } > >-EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* bytecodePC) >+EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, const Instruction* pc) > { > VM& vm = exec->vm(); > NativeCallFrameTracer tracer(&vm, exec); > auto throwScope = DECLARE_THROW_SCOPE(vm); > > CodeBlock* codeBlock = exec->codeBlock(); >- Instruction* pc = bytecodePC; > >- const Identifier& ident = codeBlock->identifier(pc[3].u.operand); >- JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[2].u.operand).jsValue()); >- GetPutInfo getPutInfo(pc[4].u.operand); >+ auto bytecode = pc->as<OpGetFromScope>(); >+ const Identifier& ident = codeBlock->identifier(bytecode.var); >+ JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(bytecode.scope.offset()).jsValue()); >+ GetPutInfo& getPutInfo = bytecode.metadata(codeBlock).getPutInfo; > > // ModuleVar is always converted to ClosureVar for get_from_scope. > ASSERT(getPutInfo.resolveType() != ModuleVar); >@@ -2360,7 +2361,7 @@ EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* > } > } > >- CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident); >+ CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, bytecode, scope, slot, ident); > > if (!result) > return slot.getValue(exec, ident); >@@ -2368,27 +2369,28 @@ EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* > })); > } > >-void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC) >+void JIT_OPERATION operationPutToScope(ExecState* exec, const Instruction* pc) > { > VM& vm = exec->vm(); > NativeCallFrameTracer tracer(&vm, exec); > auto throwScope = DECLARE_THROW_SCOPE(vm); > >- Instruction* pc = bytecodePC; >- > CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[2].u.operand); >- JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[1].u.operand).jsValue()); >- JSValue value = exec->r(pc[3].u.operand).jsValue(); >- GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >+ auto bytecode = pc->as<OpPutToScope>(); >+ auto& metadata = bytecode.metadata(codeBlock); >+ >+ const Identifier& ident = codeBlock->identifier(bytecode.var); >+ JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(bytecode.scope.offset()).jsValue()); >+ JSValue value = exec->r(bytecode.value.offset()).jsValue(); >+ GetPutInfo& getPutInfo = metadata.getPutInfo; > > // ModuleVar does not keep the scope register value alive in DFG. > ASSERT(getPutInfo.resolveType() != ModuleVar); > > if (getPutInfo.resolveType() == LocalClosureVar) { > JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope); >- environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value); >- if (WatchpointSet* set = pc[5].u.watchpointSet) >+ environment->variableAt(ScopeOffset(bytecode.offset)).set(vm, environment, value); >+ if (WatchpointSet* set = metadata.watchpointSet) > set->touch(vm, "Executed op_put_scope<LocalClosureVar>"); > return; > } >@@ -2417,7 +2419,7 @@ void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC) > > RETURN_IF_EXCEPTION(throwScope, void()); > >- CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident); >+ CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, bytecode, scope, slot, ident); > } > > void JIT_OPERATION operationThrow(ExecState* exec, EncodedJSValue encodedExceptionValue) >diff --git a/Source/JavaScriptCore/jit/JITOperations.h b/Source/JavaScriptCore/jit/JITOperations.h >index 6071e5dab4d3a256785b54d14b581de1acced266..a12d2abf544491384ae3846902a1b684ff83d6bb 100644 >--- a/Source/JavaScriptCore/jit/JITOperations.h >+++ b/Source/JavaScriptCore/jit/JITOperations.h >@@ -466,8 +466,8 @@ CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState*, CallFrame*, Enco > char* JIT_OPERATION operationSwitchCharWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL; > char* JIT_OPERATION operationSwitchImmWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL; > char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL; >-EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL; >-void JIT_OPERATION operationPutToScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL; >+EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState*, const Instruction* bytecodePC) WTF_INTERNAL; >+void JIT_OPERATION operationPutToScope(ExecState*, const Instruction* bytecodePC) WTF_INTERNAL; > > char* JIT_OPERATION operationReallocateButterflyToHavePropertyStorageWithInitialCapacity(ExecState*, JSObject*) WTF_INTERNAL; > char* JIT_OPERATION operationReallocateButterflyToGrowPropertyStorage(ExecState*, JSObject*, size_t newSize) WTF_INTERNAL; >diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp >index d3c2c2d20bb160e3715d8d751213aabcb8c4dd87..de46ad18b21b735bc9eed2b4908e93819d066ee1 100644 >--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp >+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp >@@ -50,12 +50,14 @@ > namespace JSC { > #if USE(JSVALUE64) > >-void JIT::emit_op_get_by_val(Instruction* currentInstruction) >+void JIT::emit_op_get_by_val(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int base = currentInstruction[2].u.operand; >- int property = currentInstruction[3].u.operand; >- ArrayProfile* profile = currentInstruction[4].u.arrayProfile; >+ auto bytecode = currentInstruction->as<OpGetByVal>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int dst = bytecode.dst.offset(); >+ int base = bytecode.base.offset(); >+ int property = bytecode.property.offset(); >+ ArrayProfile* profile = &metadata.arrayProfile; > ByValInfo* byValInfo = m_codeBlock->addByValInfo(); > > emitGetVirtualRegister(base, regT0); >@@ -117,7 +119,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) > resultOK.link(this); > } > >- emitValueProfilingSite(); >+ emitValueProfilingSite(metadata); > emitPutVirtualRegister(dst); > > Label nextHotPath = label(); >@@ -125,13 +127,13 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) > m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath)); > } > >-JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases) >+JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, OpGetByVal bytecode, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases) > { > // base: regT0 > // property: regT1 > // scratch: regT3 > >- int dst = currentInstruction[1].u.operand; >+ int dst = bytecode.dst.offset(); > > slowCases.append(branchIfNotCell(regT1)); > emitByValIdentifierCheck(byValInfo, regT1, regT3, propertyName, slowCases); >@@ -146,18 +148,19 @@ JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruct > Label coldPathBegin = label(); > gen.slowPathJump().link(this); > >- Call call = callOperationWithProfile(operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl()); >+ Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl()); > gen.reportSlowPathCall(coldPathBegin, call); > slowDoneCase = jump(); > > return gen; > } > >-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_get_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int dst = currentInstruction[1].u.operand; >- int base = currentInstruction[2].u.operand; >- int property = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpGetByVal>(); >+ int dst = bytecode.dst.offset(); >+ int base = bytecode.base.offset(); >+ int property = bytecode.property.offset(); > ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; > > linkSlowCaseIfNotJSCell(iter, base); // base cell check >@@ -188,14 +191,16 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas > m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; > m_byValInstructionIndex++; > >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > } > >-void JIT::emit_op_put_by_val(Instruction* currentInstruction) >+void JIT::emit_op_put_by_val(const Instruction* currentInstruction) > { >- int base = currentInstruction[1].u.operand; >- int property = currentInstruction[2].u.operand; >- ArrayProfile* profile = currentInstruction[4].u.arrayProfile; >+ auto bytecode = currentInstruction->as<OpPutByVal>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int base = bytecode.base.offset(); >+ int property = bytecode.property.offset(); >+ ArrayProfile* profile = &metadata.arrayProfile; > ByValInfo* byValInfo = m_codeBlock->addByValInfo(); > > emitGetVirtualRegister(base, regT0); >@@ -225,16 +230,16 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction) > JITArrayMode mode = chooseArrayMode(profile); > switch (mode) { > case JITInt32: >- slowCases = emitInt32PutByVal(currentInstruction, badType); >+ slowCases = emitInt32PutByVal(bytecode, badType); > break; > case JITDouble: >- slowCases = emitDoublePutByVal(currentInstruction, badType); >+ slowCases = emitDoublePutByVal(bytecode, badType); > break; > case JITContiguous: >- slowCases = emitContiguousPutByVal(currentInstruction, badType); >+ slowCases = emitContiguousPutByVal(bytecode, badType); > break; > case JITArrayStorage: >- slowCases = emitArrayStoragePutByVal(currentInstruction, badType); >+ slowCases = emitArrayStoragePutByVal(bytecode, badType); > break; > default: > CRASH(); >@@ -249,10 +254,11 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction) > m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done)); > } > >-JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape) >+JIT::JumpList JIT::emitGenericContiguousPutByVal(OpPutByVal bytecode, PatchableJump& badType, IndexingType indexingShape) > { >- int value = currentInstruction[3].u.operand; >- ArrayProfile* profile = currentInstruction[4].u.arrayProfile; >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int value = bytecode.value.offset(); >+ ArrayProfile* profile = &metadata.arrayProfile; > > JumpList slowCases; > >@@ -282,7 +288,7 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction > } > case ContiguousShape: > store64(regT3, BaseIndex(regT2, regT1, TimesEight)); >- emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue); >+ emitWriteBarrier(bytecode.base.offset(), value, ShouldFilterValue); > break; > default: > CRASH(); >@@ -305,10 +311,11 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction > return slowCases; > } > >-JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType) >+JIT::JumpList JIT::emitArrayStoragePutByVal(OpPutByVal bytecode, PatchableJump& badType) > { >- int value = currentInstruction[3].u.operand; >- ArrayProfile* profile = currentInstruction[4].u.arrayProfile; >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int value = bytecode.value.offset(); >+ ArrayProfile* profile = &metadata.arrayProfile; > > JumpList slowCases; > >@@ -321,7 +328,7 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat > Label storeResult(this); > emitGetVirtualRegister(value, regT3); > store64(regT3, BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset())); >- emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue); >+ emitWriteBarrier(bytecode.base.offset(), value, ShouldFilterValue); > Jump end = jump(); > > empty.link(this); >@@ -339,14 +346,14 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat > return slowCases; > } > >-JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases) >+JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, OpPutByVal bytecode, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases) > { > // base: regT0 > // property: regT1 > // scratch: regT2 > >- int base = currentInstruction[1].u.operand; >- int value = currentInstruction[3].u.operand; >+ int base = bytecode.base.offset(); >+ int value = bytecode.value.offset(); > > slowCases.append(branchIfNotCell(regT1)); > emitByValIdentifierCheck(byValInfo, regT1, regT1, propertyName, slowCases); >@@ -372,11 +379,24 @@ JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Instruct > return gen; > } > >-void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_put_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { >- int base = currentInstruction[1].u.operand; >- int property = currentInstruction[2].u.operand; >- int value = currentInstruction[3].u.operand; >+ bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; >+ int base; >+ int property; >+ int value; >+ >+ auto load = [&](auto bytecode) { >+ base = bytecode.base.offset(); >+ property = bytecode.property.offset(); >+ value = bytecode.value.offset(); >+ }; >+ >+ if (isDirect) >+ load(currentInstruction->as<OpPutByValDirect>()); >+ else >+ load(currentInstruction->as<OpPutByVal>()); >+ > ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; > > linkAllSlowCases(iter); >@@ -385,7 +405,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas > emitGetVirtualRegister(base, regT0); > emitGetVirtualRegister(property, regT1); > emitGetVirtualRegister(value, regT2); >- bool isDirect = Interpreter::getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct; > Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT0, regT1, regT2, byValInfo); > > m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; >@@ -393,73 +412,81 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas > m_byValInstructionIndex++; > } > >-void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction) >+void JIT::emit_op_put_getter_by_id(const Instruction* currentInstruction) > { >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); >- int32_t options = currentInstruction[3].u.operand; >- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); >- callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1); >+ auto bytecode = currentInstruction->as<OpPutGetterById>(); >+ emitGetVirtualRegister(bytecode.base.offset(), regT0); >+ int32_t options = bytecode.attributes; >+ emitGetVirtualRegister(bytecode.accessor.offset(), regT1); >+ callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(bytecode.property).impl(), options, regT1); > } > >-void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction) >+void JIT::emit_op_put_setter_by_id(const Instruction* currentInstruction) > { >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); >- int32_t options = currentInstruction[3].u.operand; >- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); >- callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1); >+ auto bytecode = currentInstruction->as<OpPutSetterById>(); >+ emitGetVirtualRegister(bytecode.base.offset(), regT0); >+ int32_t options = bytecode.attributes; >+ emitGetVirtualRegister(bytecode.accessor.offset(), regT1); >+ callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(bytecode.property).impl(), options, regT1); > } > >-void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction) >+void JIT::emit_op_put_getter_setter_by_id(const Instruction* currentInstruction) > { >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); >- int32_t attribute = currentInstruction[3].u.operand; >- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); >- emitGetVirtualRegister(currentInstruction[5].u.operand, regT2); >- callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), attribute, regT1, regT2); >+ auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); >+ emitGetVirtualRegister(bytecode.base.offset(), regT0); >+ int32_t attribute = bytecode.attributes; >+ emitGetVirtualRegister(bytecode.getter.offset(), regT1); >+ emitGetVirtualRegister(bytecode.setter.offset(), regT2); >+ callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(bytecode.property).impl(), attribute, regT1, regT2); > } > >-void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction) >+void JIT::emit_op_put_getter_by_val(const Instruction* currentInstruction) > { >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); >- emitGetVirtualRegister(currentInstruction[2].u.operand, regT1); >- int32_t attributes = currentInstruction[3].u.operand; >- emitGetVirtualRegister(currentInstruction[4].u.operand, regT2); >+ auto bytecode = currentInstruction->as<OpPutGetterByVal>(); >+ emitGetVirtualRegister(bytecode.base.offset(), regT0); >+ emitGetVirtualRegister(bytecode.property.offset(), regT1); >+ int32_t attributes = bytecode.attributes; >+ emitGetVirtualRegister(bytecode.accessor, regT2); > callOperation(operationPutGetterByVal, regT0, regT1, attributes, regT2); > } > >-void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction) >+void JIT::emit_op_put_setter_by_val(const Instruction* currentInstruction) > { >- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); >- emitGetVirtualRegister(currentInstruction[2].u.operand, regT1); >- int32_t attributes = currentInstruction[3].u.operand; >- emitGetVirtualRegister(currentInstruction[4].u.operand, regT2); >+ auto bytecode = currentInstruction->as<OpPutSetterByVal>(); >+ emitGetVirtualRegister(bytecode.base.offset(), regT0); >+ emitGetVirtualRegister(bytecode.property.offset(), regT1); >+ int32_t attributes = bytecode.attributes; >+ emitGetVirtualRegister(bytecode.accessor.offset(), regT2); > callOperation(operationPutSetterByVal, regT0, regT1, attributes, regT2); > } > >-void JIT::emit_op_del_by_id(Instruction* currentInstruction) >+void JIT::emit_op_del_by_id(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int base = currentInstruction[2].u.operand; >- int property = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpDelById>(); >+ int dst = bytecode.dst.offset(); >+ int base = bytecode.base.offset(); >+ int property = bytecode.property; > emitGetVirtualRegister(base, regT0); > callOperation(operationDeleteByIdJSResult, dst, regT0, m_codeBlock->identifier(property).impl()); > } > >-void JIT::emit_op_del_by_val(Instruction* currentInstruction) >+void JIT::emit_op_del_by_val(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int base = currentInstruction[2].u.operand; >- int property = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpDelByVal>(); >+ int dst = bytecode.dst.offset(); >+ int base = bytecode.base.offset(); >+ int property = bytecode.property.offset(); > emitGetVirtualRegister(base, regT0); > emitGetVirtualRegister(property, regT1); > callOperation(operationDeleteByValJSResult, dst, regT0, regT1); > } > >-void JIT::emit_op_try_get_by_id(Instruction* currentInstruction) >+void JIT::emit_op_try_get_by_id(const Instruction* currentInstruction) > { >- int resultVReg = currentInstruction[1].u.operand; >- int baseVReg = currentInstruction[2].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpTryGetById>(); >+ int resultVReg = bytecode.dst.offset(); >+ int baseVReg = bytecode.base.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > emitGetVirtualRegister(baseVReg, regT0); > >@@ -472,16 +499,17 @@ void JIT::emit_op_try_get_by_id(Instruction* currentInstruction) > addSlowCase(gen.slowPathJump()); > m_getByIds.append(gen); > >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > emitPutVirtualRegister(resultVReg); > } > >-void JIT::emitSlow_op_try_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_try_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int resultVReg = currentInstruction[1].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpTryGetById>(); >+ int resultVReg = bytecode.dst.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; > >@@ -492,11 +520,12 @@ void JIT::emitSlow_op_try_get_by_id(Instruction* currentInstruction, Vector<Slow > gen.reportSlowPathCall(coldPathBegin, call); > } > >-void JIT::emit_op_get_by_id_direct(Instruction* currentInstruction) >+void JIT::emit_op_get_by_id_direct(const Instruction* currentInstruction) > { >- int resultVReg = currentInstruction[1].u.operand; >- int baseVReg = currentInstruction[2].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpGetByIdDirect>(); >+ int resultVReg = bytecode.dst.offset(); >+ int baseVReg = bytecode.base.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > emitGetVirtualRegister(baseVReg, regT0); > >@@ -509,31 +538,33 @@ void JIT::emit_op_get_by_id_direct(Instruction* currentInstruction) > addSlowCase(gen.slowPathJump()); > m_getByIds.append(gen); > >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > emitPutVirtualRegister(resultVReg); > } > >-void JIT::emitSlow_op_get_by_id_direct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_get_by_id_direct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int resultVReg = currentInstruction[1].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpGetByIdDirect>(); >+ int resultVReg = bytecode.dst.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; > > Label coldPathBegin = label(); > >- Call call = callOperationWithProfile(operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); >+ Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); > > gen.reportSlowPathCall(coldPathBegin, call); > } > >-void JIT::emit_op_get_by_id(Instruction* currentInstruction) >+void JIT::emit_op_get_by_id(const Instruction* currentInstruction) > { >- int resultVReg = currentInstruction[1].u.operand; >- int baseVReg = currentInstruction[2].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpGetById>(); >+ int resultVReg = bytecode.dst.offset(); >+ int baseVReg = bytecode.base.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > emitGetVirtualRegister(baseVReg, regT0); > >@@ -549,16 +580,17 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction) > addSlowCase(gen.slowPathJump()); > m_getByIds.append(gen); > >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > emitPutVirtualRegister(resultVReg); > } > >-void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction) >+void JIT::emit_op_get_by_id_with_this(const Instruction* currentInstruction) > { >- int resultVReg = currentInstruction[1].u.operand; >- int baseVReg = currentInstruction[2].u.operand; >- int thisVReg = currentInstruction[3].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand)); >+ auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); >+ int resultVReg = bytecode.dst.offset(); >+ int baseVReg = bytecode.base.offset(); >+ int thisVReg = bytecode.thisValue.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > emitGetVirtualRegister(baseVReg, regT0); > emitGetVirtualRegister(thisVReg, regT1); >@@ -572,47 +604,51 @@ void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction) > addSlowCase(gen.slowPathJump()); > m_getByIdsWithThis.append(gen); > >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > emitPutVirtualRegister(resultVReg); > } > >-void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int resultVReg = currentInstruction[1].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpGetById>(); >+ int resultVReg = bytecode.dst.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; > > Label coldPathBegin = label(); > >- Call call = callOperationWithProfile(operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); >+ Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); > > gen.reportSlowPathCall(coldPathBegin, call); > } > >-void JIT::emitSlow_op_get_by_id_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_get_by_id_with_this(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int resultVReg = currentInstruction[1].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand)); >+ auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); >+ int resultVReg = bytecode.dst.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++]; > > Label coldPathBegin = label(); > >- Call call = callOperationWithProfile(operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), regT0, regT1, ident->impl()); >+ Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), regT0, regT1, ident->impl()); > > gen.reportSlowPathCall(coldPathBegin, call); > } > >-void JIT::emit_op_put_by_id(Instruction* currentInstruction) >+void JIT::emit_op_put_by_id(const Instruction* currentInstruction) > { >- int baseVReg = currentInstruction[1].u.operand; >- int valueVReg = currentInstruction[3].u.operand; >- unsigned direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect; >+ auto bytecode = currentInstruction->as<OpPutById>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int baseVReg = bytecode.base.offset(); >+ int valueVReg = bytecode.value.offset(); >+ unsigned direct = metadata.flags & PutByIdIsDirect; > > // In order to be able to patch both the Structure, and the object offset, we store one pointer, > // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code >@@ -635,11 +671,12 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) > m_putByIds.append(gen); > } > >-void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_put_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); >+ auto bytecode = currentInstruction->as<OpPutById>(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > Label coldPathBegin(this); > >@@ -650,11 +687,12 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase > gen.reportSlowPathCall(coldPathBegin, call); > } > >-void JIT::emit_op_in_by_id(Instruction* currentInstruction) >+void JIT::emit_op_in_by_id(const Instruction* currentInstruction) > { >- int resultVReg = currentInstruction[1].u.operand; >- int baseVReg = currentInstruction[2].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpInById>(); >+ int resultVReg = bytecode.dst.offset(); >+ int baseVReg = bytecode.base.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > emitGetVirtualRegister(baseVReg, regT0); > >@@ -670,12 +708,13 @@ void JIT::emit_op_in_by_id(Instruction* currentInstruction) > emitPutVirtualRegister(resultVReg); > } > >-void JIT::emitSlow_op_in_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_in_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int resultVReg = currentInstruction[1].u.operand; >- const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); >+ auto bytecode = currentInstruction->as<OpInById>(); >+ int resultVReg = bytecode.dst.offset(); >+ const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); > > JITInByIdGenerator& gen = m_inByIds[m_inByIdIndex++]; > >@@ -702,12 +741,15 @@ void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, u > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_resolve_scope(Instruction* currentInstruction) >+void JIT::emit_op_resolve_scope(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int scope = currentInstruction[2].u.operand; >- ResolveType resolveType = static_cast<ResolveType>(copiedInstruction(currentInstruction)[4].u.operand); >- unsigned depth = currentInstruction[5].u.operand; >+ auto bytecode = currentInstruction->as<OpResolveScope>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int dst = bytecode.dst.offset(); >+ int scope = bytecode.scope.offset(); >+ //ResolveType resolveType = static_cast<ResolveType>(copiedInstruction(currentInstruction)[4].u.operand); >+ ResolveType resolveType = metadata.resolveType; >+ unsigned depth = metadata.localScopeDepth; > > auto emitCode = [&] (ResolveType resolveType) { > switch (resolveType) { >@@ -729,7 +771,7 @@ void JIT::emit_op_resolve_scope(Instruction* currentInstruction) > emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth); > break; > case ModuleVar: >- move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0); >+ move(TrustedImmPtr(metadata.moduleEnvironment.get()), regT0); > emitPutVirtualRegister(dst); > break; > case Dynamic: >@@ -746,7 +788,7 @@ void JIT::emit_op_resolve_scope(Instruction* currentInstruction) > case UnresolvedProperty: > case UnresolvedPropertyWithVarInjectionChecks: { > JumpList skipToEnd; >- load32(¤tInstruction[4], regT0); >+ load32(&bytecode.resolveType, regT0); > > Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty)); > emitCode(GlobalProperty); >@@ -805,13 +847,15 @@ void JIT::emitGetClosureVar(int scope, uintptr_t operand) > loadPtr(Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register)), regT0); > } > >-void JIT::emit_op_get_from_scope(Instruction* currentInstruction) >+void JIT::emit_op_get_from_scope(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int scope = currentInstruction[2].u.operand; >- ResolveType resolveType = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand).resolveType(); >- Structure** structureSlot = currentInstruction[5].u.structure.slot(); >- uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(¤tInstruction[6].u.pointer); >+ auto bytecode = currentInstruction->as<OpGetFromScope>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int dst = bytecode.dst.offset(); >+ int scope = bytecode.scope.offset(); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); >+ Structure** structureSlot = metadata.structure.slot(); >+ uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.operand); > > auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { > switch (resolveType) { >@@ -871,7 +915,7 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction) > case UnresolvedProperty: > case UnresolvedPropertyWithVarInjectionChecks: { > JumpList skipToEnd; >- load32(¤tInstruction[4], regT0); >+ load32(&metadata.getPutInfo, regT0); > and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 > > Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty)); >@@ -902,15 +946,16 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction) > break; > } > emitPutVirtualRegister(dst); >- emitValueProfilingSite(); >+ emitValueProfilingSite(metadata); > } > >-void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_get_from_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- int dst = currentInstruction[1].u.operand; >- callOperationWithProfile(operationGetFromScope, dst, currentInstruction); >+ auto bytecode = currentInstruction->as<OpGetFromScope>(); >+ int dst = bytecode.dst.offset(); >+ callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetFromScope, dst, currentInstruction); > } > > void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set) >@@ -936,14 +981,16 @@ void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointS > storePtr(regT1, Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register))); > } > >-void JIT::emit_op_put_to_scope(Instruction* currentInstruction) >+void JIT::emit_op_put_to_scope(const Instruction* currentInstruction) > { >- int scope = currentInstruction[1].u.operand; >- int value = currentInstruction[3].u.operand; >- GetPutInfo getPutInfo = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand); >+ auto bytecode = currentInstruction->as<OpPutToScope>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ int scope = bytecode.scope.offset(); >+ int value = bytecode.value.offset(); >+ GetPutInfo getPutInfo = metadata.getPutInfo; > ResolveType resolveType = getPutInfo.resolveType(); >- Structure** structureSlot = currentInstruction[5].u.structure.slot(); >- uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(¤tInstruction[6].u.pointer); >+ Structure** structureSlot = metadata.structure.slot(); >+ uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.operand); > > auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { > switch (resolveType) { >@@ -979,9 +1026,9 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction) > addSlowCase(branchIfEmpty(regT0)); > } > if (indirectLoadForOperand) >- emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(¤tInstruction[5])); >+ emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, &metadata.watchpointSet); > else >- emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet); >+ emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, metadata.watchpointSet); > emitWriteBarrier(constantScope, value, ShouldFilterValue); > break; > } >@@ -989,7 +1036,7 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction) > case ClosureVar: > case ClosureVarWithVarInjectionChecks: > emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); >- emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet); >+ emitPutClosureVar(scope, *operandSlot, value, metadata.watchpointSet); > emitWriteBarrier(scope, value, ShouldFilterValue); > break; > case ModuleVar: >@@ -1007,7 +1054,7 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction) > case UnresolvedProperty: > case UnresolvedPropertyWithVarInjectionChecks: { > JumpList skipToEnd; >- load32(¤tInstruction[4], regT0); >+ load32(&metadata.getPutInfo, regT0); > and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 > > Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty)); >@@ -1039,11 +1086,13 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction) > } > } > >-void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) >+void JIT::emitSlow_op_put_to_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > linkAllSlowCases(iter); > >- GetPutInfo getPutInfo = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand); >+ auto bytecode = currentInstruction->as<OpPutToScope>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ GetPutInfo getPutInfo = metadata.getPutInfo; > ResolveType resolveType = getPutInfo.resolveType(); > if (resolveType == ModuleVar) { > JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error); >@@ -1052,23 +1101,25 @@ void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowC > callOperation(operationPutToScope, currentInstruction); > } > >-void JIT::emit_op_get_from_arguments(Instruction* currentInstruction) >+void JIT::emit_op_get_from_arguments(const Instruction* currentInstruction) > { >- int dst = currentInstruction[1].u.operand; >- int arguments = currentInstruction[2].u.operand; >- int index = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpGetFromArguments>(); >+ int dst = bytecode.dst.offset(); >+ int arguments = bytecode.arguments.offset(); >+ int index = bytecode.index; > > emitGetVirtualRegister(arguments, regT0); > load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0); >- emitValueProfilingSite(); >+ emitValueProfilingSite(bytecode.metadata(m_codeBlock)); > emitPutVirtualRegister(dst); > } > >-void JIT::emit_op_put_to_arguments(Instruction* currentInstruction) >+void JIT::emit_op_put_to_arguments(const Instruction* currentInstruction) > { >- int arguments = currentInstruction[1].u.operand; >- int index = currentInstruction[2].u.operand; >- int value = currentInstruction[3].u.operand; >+ auto bytecode = currentInstruction->as<OpPutToArguments>(); >+ int arguments = bytecode.arguments.offset(); >+ int index = bytecode.index; >+ int value = bytecode.value.offset(); > > emitGetVirtualRegister(arguments, regT0); > emitGetVirtualRegister(value, regT1); >@@ -1174,7 +1225,7 @@ void JIT::emitByValIdentifierCheck(ByValInfo* byValInfo, RegisterID cell, Regist > > void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) > { >- Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex]; >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); > > PatchableJump badType; > JumpList slowCases; >@@ -1226,13 +1277,14 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd > > void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName) > { >- Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex]; >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); >+ auto bytecode = currentInstruction->as<OpGetByVal>(); > > Jump fastDoneCase; > Jump slowDoneCase; > JumpList slowCases; > >- JITGetByIdGenerator gen = emitGetByValWithCachedId(byValInfo, currentInstruction, propertyName, fastDoneCase, slowDoneCase, slowCases); >+ JITGetByIdGenerator gen = emitGetByValWithCachedId(byValInfo, bytecode, propertyName, fastDoneCase, slowDoneCase, slowCases); > > ConcurrentJSLocker locker(m_codeBlock->m_lock); > LinkBuffer patchBuffer(*this, m_codeBlock); >@@ -1259,7 +1311,8 @@ void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddress > > void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) > { >- Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex]; >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); >+ auto bytecode = currentInstruction->as<OpPutByVal>(); > > PatchableJump badType; > JumpList slowCases; >@@ -1268,25 +1321,25 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd > > switch (arrayMode) { > case JITInt32: >- slowCases = emitInt32PutByVal(currentInstruction, badType); >+ slowCases = emitInt32PutByVal(bytecode, badType); > break; > case JITDouble: >- slowCases = emitDoublePutByVal(currentInstruction, badType); >+ slowCases = emitDoublePutByVal(bytecode, badType); > break; > case JITContiguous: >- slowCases = emitContiguousPutByVal(currentInstruction, badType); >+ slowCases = emitContiguousPutByVal(bytecode, badType); > needsLinkForWriteBarrier = true; > break; > case JITArrayStorage: >- slowCases = emitArrayStoragePutByVal(currentInstruction, badType); >+ slowCases = emitArrayStoragePutByVal(bytecode, badType); > needsLinkForWriteBarrier = true; > break; > default: > TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode); > if (isInt(type)) >- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, type); >+ slowCases = emitIntTypedArrayPutByVal(bytecode, badType, type); > else >- slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, type); >+ slowCases = emitFloatTypedArrayPutByVal(bytecode, badType, type); > break; > } > >@@ -1301,7 +1354,7 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd > patchBuffer.link(m_calls.last().from, m_calls.last().callee); > } > >- bool isDirect = Interpreter::getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct; >+ bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; > if (!isDirect) { > byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( > m_codeBlock, patchBuffer, JITStubRoutinePtrTag, >@@ -1318,12 +1371,13 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd > > void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName) > { >- Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex]; >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); >+ auto bytecode = currentInstruction->as<OpPutByVal>(); > > JumpList doneCases; > JumpList slowCases; > >- JITPutByIdGenerator gen = emitPutByValWithCachedId(byValInfo, currentInstruction, putKind, propertyName, doneCases, slowCases); >+ JITPutByIdGenerator gen = emitPutByValWithCachedId(byValInfo, bytecode, putKind, propertyName, doneCases, slowCases); > > ConcurrentJSLocker locker(m_codeBlock->m_lock); > LinkBuffer patchBuffer(*this, m_codeBlock); >@@ -1347,7 +1401,7 @@ void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddress > MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric)); > } > >-JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType) >+JIT::JumpList JIT::emitDoubleLoad(const Instruction*, PatchableJump& badType) > { > #if USE(JSVALUE64) > RegisterID base = regT0; >@@ -1372,7 +1426,7 @@ JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType) > return slowCases; > } > >-JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape) >+JIT::JumpList JIT::emitContiguousLoad(const Instruction*, PatchableJump& badType, IndexingType expectedShape) > { > #if USE(JSVALUE64) > RegisterID base = regT0; >@@ -1399,7 +1453,7 @@ JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, Inde > return slowCases; > } > >-JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType) >+JIT::JumpList JIT::emitArrayStorageLoad(const Instruction*, PatchableJump& badType) > { > #if USE(JSVALUE64) > RegisterID base = regT0; >@@ -1429,7 +1483,7 @@ JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType) > return slowCases; > } > >-JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType) >+JIT::JumpList JIT::emitDirectArgumentsGetByVal(const Instruction*, PatchableJump& badType) > { > JumpList slowCases; > >@@ -1459,7 +1513,7 @@ JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badT > return slowCases; > } > >-JIT::JumpList JIT::emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType) >+JIT::JumpList JIT::emitScopedArgumentsGetByVal(const Instruction*, PatchableJump& badType) > { > JumpList slowCases; > >@@ -1510,7 +1564,7 @@ JIT::JumpList JIT::emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badT > return slowCases; > } > >-JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type) >+JIT::JumpList JIT::emitIntTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType type) > { > ASSERT(isInt(type)); > >@@ -1577,7 +1631,7 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp > return slowCases; > } > >-JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type) >+JIT::JumpList JIT::emitFloatTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType type) > { > ASSERT(isFloat(type)); > >@@ -1625,12 +1679,13 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT > return slowCases; > } > >-JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type) >+JIT::JumpList JIT::emitIntTypedArrayPutByVal(OpPutByVal bytecode, PatchableJump& badType, TypedArrayType type) > { >- ArrayProfile* profile = currentInstruction[4].u.arrayProfile; >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ ArrayProfile* profile = &metadata.arrayProfile; > ASSERT(isInt(type)); > >- int value = currentInstruction[3].u.operand; >+ int value = bytecode.value.offset(); > > #if USE(JSVALUE64) > RegisterID base = regT0; >@@ -1698,12 +1753,13 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa > return slowCases; > } > >-JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type) >+JIT::JumpList JIT::emitFloatTypedArrayPutByVal(OpPutByVal bytecode, PatchableJump& badType, TypedArrayType type) > { >- ArrayProfile* profile = currentInstruction[4].u.arrayProfile; >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ ArrayProfile* profile = &metadata.arrayProfile; > ASSERT(isFloat(type)); > >- int value = currentInstruction[3].u.operand; >+ int value = bytecode.value.offset(); > > #if USE(JSVALUE64) > RegisterID base = regT0; >diff --git a/Source/JavaScriptCore/jit/SlowPathCall.h b/Source/JavaScriptCore/jit/SlowPathCall.h >index 37c9427c74589b626c9fe3a9c00e8b03e38e3f97..ad784b1acce46b4cf554b9b71270e4fc7e168ce9 100644 >--- a/Source/JavaScriptCore/jit/SlowPathCall.h >+++ b/Source/JavaScriptCore/jit/SlowPathCall.h >@@ -34,7 +34,7 @@ namespace JSC { > > class JITSlowPathCall { > public: >- JITSlowPathCall(JIT* jit, Instruction* pc, SlowPathFunction slowPathFunction) >+ JITSlowPathCall(JIT* jit, const Instruction* pc, SlowPathFunction slowPathFunction) > : m_jit(jit) > , m_slowPathFunction(slowPathFunction) > , m_pc(pc) >@@ -84,7 +84,7 @@ public: > private: > JIT* m_jit; > SlowPathFunction m_slowPathFunction; >- Instruction* m_pc; >+ const Instruction* m_pc; > }; > > } // namespace JS >diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp >index c93d15c74c58ff5d96aa0aad943413f085a3d92e..1f83be8de0ae83cba450e1aa6b5e59545a232c15 100644 >--- a/Source/JavaScriptCore/llint/LLIntData.cpp >+++ b/Source/JavaScriptCore/llint/LLIntData.cpp >@@ -43,11 +43,13 @@ > > namespace JSC { namespace LLInt { > >-Instruction Data::s_exceptionInstructions[maxOpcodeLength + 1] = { }; >+// TODO >+// Instruction Data::s_exceptionInstructions[maxOpcodeLength + 1] = { }; > Opcode Data::s_opcodeMap[numOpcodeIDs] = { }; >+Opcode Data::s_opcodeMapWide[numOpcodeIDs] = { }; > > #if ENABLE(JIT) >-extern "C" void llint_entry(void*); >+extern "C" void llint_entry(void*, void*); > #endif > > void initialize() >@@ -56,14 +58,16 @@ void initialize() > CLoop::initialize(); > > #else // ENABLE(JIT) >- llint_entry(&Data::s_opcodeMap); >+ llint_entry(&Data::s_opcodeMap, &Data::s_opcodeMapWide); > >- for (int i = 0; i < numOpcodeIDs; ++i) >+ for (int i = 0; i < numOpcodeIDs; ++i) { > Data::s_opcodeMap[i] = tagCodePtr(Data::s_opcodeMap[i], BytecodePtrTag); >+ Data::s_opcodeMapWide[i] = tagCodePtr(Data::s_opcodeMap[i], BytecodePtrTag); >+ } > >- void* handler = Data::s_opcodeMap[llint_throw_from_slow_path_trampoline]; >- for (int i = 0; i < maxOpcodeLength + 1; ++i) >- Data::s_exceptionInstructions[i].u.pointer = handler; >+ //void* handler = Data::s_opcodeMap[llint_throw_from_slow_path_trampoline]; >+ //for (int i = 0; i < maxOpcodeLength + 1; ++i) >+ //Data::s_exceptionInstructions[i].u.pointer = handler; > #endif // ENABLE(JIT) > } > >diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h >index be58c00ae5c66ac30581ae3d4849428e5bb301d0..df4b8f4b5875f3fa8d3103152f5e1fbcac90cdc9 100644 >--- a/Source/JavaScriptCore/llint/LLIntData.h >+++ b/Source/JavaScriptCore/llint/LLIntData.h >@@ -43,12 +43,14 @@ typedef void (*LLIntCode)(); > namespace LLInt { > > class Data { >+ > public: > static void performAssertions(VM&); > > private: >- static Instruction s_exceptionInstructions[maxOpcodeLength + 1]; >+ //static Instruction s_exceptionInstructions[maxOpcodeLength + 1]; > static Opcode s_opcodeMap[numOpcodeIDs]; >+ static Opcode s_opcodeMapWide[numOpcodeIDs]; > > friend void initialize(); > >@@ -63,7 +65,8 @@ void initialize(); > > inline Instruction* exceptionInstructions() > { >- return Data::s_exceptionInstructions; >+ return nullptr; >+ // return Data::s_exceptionInstructions; > } > > inline Opcode* opcodeMap() >@@ -83,9 +86,7 @@ inline Opcode getOpcode(OpcodeID id) > template<PtrTag tag> > ALWAYS_INLINE MacroAssemblerCodePtr<tag> getCodePtr(OpcodeID opcodeID) > { >- void* address = reinterpret_cast<void*>(getOpcode(opcodeID)); >- address = retagCodePtr<BytecodePtrTag, tag>(address); >- return MacroAssemblerCodePtr<tag>::createFromExecutableAddress(address); >+ return MacroAssemblerCodePtr<tag>::createFromExecutableAddress(Data::s_opcodeMap[opcodeID]); > } > > template<PtrTag tag> >@@ -109,7 +110,7 @@ ALWAYS_INLINE LLIntCode getCodeFunctionPtr(OpcodeID opcodeID) > #else > ALWAYS_INLINE void* getCodePtr(OpcodeID id) > { >- return reinterpret_cast<void*>(getOpcode(id)); >+ return Data::s_opcodeMap[id]; > } > #endif > >diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp >index 961b27c2f58981990612ded4d74eca9caab14709..d7019f7dd948aea1926fdebb1d15642f03e5d3c6 100644 >--- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp >+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp >@@ -20,7 +20,7 @@ > * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY > * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT > * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. > */ > > #include "config.h" >@@ -48,6 +48,7 @@ > #include "JSString.h" > #include "JSTypeInfo.h" > #include "JumpTable.h" >+#include "LLIntData.h" > #include "LLIntOfflineAsmConfig.h" > #include "MarkedSpace.h" > #include "NativeExecutable.h" >diff --git a/Source/JavaScriptCore/llint/LLIntSettingsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntSettingsExtractor.cpp >new file mode 100644 >index 0000000000000000000000000000000000000000..d5e53a24d1ed93c711648c61e1af7faf9019f303 >--- /dev/null >+++ b/Source/JavaScriptCore/llint/LLIntSettingsExtractor.cpp >@@ -0,0 +1,39 @@ >+/* >+ * Copyright (C) 2012-2018 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#include "config.h" >+ >+#include "LLIntOfflineAsmConfig.h" >+ >+int main(int, char**) >+{ >+ // Out of an abundance of caution, make sure that LLIntSettingsExtractor::dummy() is live, >+ // and the extractorTable is live, too. >+#include "LLIntDesiredSettings.h" >+ printf("%p\n", extractorTable); >+ return 0; >+} >+ >+ >diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >index f2e411f8da89cfa10a3832e41cc1c5a650f456d1..0a7608d6fb0c8077481f142b8250a3e5bb434710 100644 >--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >@@ -57,6 +57,7 @@ > #include "LLIntCommon.h" > #include "LLIntData.h" > #include "LLIntExceptions.h" >+#include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h" > #include "LowLevelInterpreter.h" > #include "ModuleProgramCodeBlock.h" > #include "ObjectConstructor.h" >@@ -93,8 +94,8 @@ namespace JSC { namespace LLInt { > LLINT_BEGIN_NO_SET_PC(); \ > LLINT_SET_PC_FOR_STUBS() > >-#define LLINT_OP(index) (exec->uncheckedR(pc[index].u.operand)) >-#define LLINT_OP_C(index) (exec->r(pc[index].u.operand)) >+#define LLINT_OP(__r) (exec->uncheckedR(__r.offset())) >+#define LLINT_OP_C(__r) (exec->r(__r.offset())) > > #define LLINT_RETURN_TWO(first, second) do { \ > return encodeResult(first, second); \ >@@ -121,42 +122,41 @@ namespace JSC { namespace LLInt { > LLINT_END_IMPL(); \ > } while (false) > >-#define LLINT_BRANCH(opcode, condition) do { \ >+#define LLINT_BRANCH(condition) do { \ > bool __b_condition = (condition); \ > LLINT_CHECK_EXCEPTION(); \ > if (__b_condition) \ >- pc += pc[OPCODE_LENGTH(opcode) - 1].u.operand; \ >+ pc += bytecode.target; \ > else \ >- pc += OPCODE_LENGTH(opcode); \ >+ pc += pc->size(); \ > LLINT_END_IMPL(); \ > } while (false) > > #define LLINT_RETURN(value) do { \ > JSValue __r_returnValue = (value); \ > LLINT_CHECK_EXCEPTION(); \ >- LLINT_OP(1) = __r_returnValue; \ >+ LLINT_OP(bytecode.dst) = __r_returnValue; \ > LLINT_END_IMPL(); \ > } while (false) > > #define LLINT_RETURN_WITH_PC_ADJUSTMENT(value, pcAdjustment) do { \ > JSValue __r_returnValue = (value); \ > LLINT_CHECK_EXCEPTION(); \ >- LLINT_OP(1) = __r_returnValue; \ >+ LLINT_OP(bytecode.dst) = __r_returnValue; \ > pc += (pcAdjustment); \ > LLINT_END_IMPL(); \ > } while (false) > >-#define LLINT_RETURN_PROFILED(opcode, value) do { \ >+#define LLINT_RETURN_PROFILED(value) do { \ > JSValue __rp_returnValue = (value); \ > LLINT_CHECK_EXCEPTION(); \ >- LLINT_OP(1) = __rp_returnValue; \ >- LLINT_PROFILE_VALUE(opcode, __rp_returnValue); \ >+ LLINT_OP(bytecode.dst) = __rp_returnValue; \ >+ LLINT_PROFILE_VALUE(__rp_returnValue); \ > LLINT_END_IMPL(); \ > } while (false) > >-#define LLINT_PROFILE_VALUE(opcode, value) do { \ >- pc[OPCODE_LENGTH(opcode) - 1].u.profile->m_buckets[0] = \ >- JSValue::encode(value); \ >+#define LLINT_PROFILE_VALUE(value) do { \ >+ bytecode.metadata(exec).profile.m_buckets[0] = JSValue::encode(value); \ > } while (false) > > #define LLINT_CALL_END_IMPL(exec, callTarget, callTargetTag) \ >@@ -226,51 +226,46 @@ template<typename... Types> void slowPathLogF(const char*, const Types&...) { } > > #endif // LLINT_TRACING > >-extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction* pc, int fromWhere, int operand) >+// TODO >+extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, const Instruction* pc, int fromWhere, int /*operand*/) > { > if (!Options::traceLLIntExecution()) > LLINT_END_IMPL(); > > LLINT_BEGIN(); >- dataLogF("<%p> %p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d\n", >+ dataLogF("<%p> %p / %p: executing bc#%zu, op#%u: Trace(%d)\n", > &Thread::current(), > exec->codeBlock(), > exec, > static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)), >- Interpreter::getOpcodeID(pc[0].u.opcode), >- fromWhere, >- operand, >- pc[operand].u.operand); >+ pc->opcodeID(), >+ fromWhere); > LLINT_END(); > } > >-extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc, int fromWhere, int operand) >+// TODO >+extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, const Instruction* pc, int fromWhere, int /*operand*/) > { > if (!Options::traceLLIntExecution()) > LLINT_END_IMPL(); > >- JSValue value = LLINT_OP_C(operand).jsValue(); >- union { >- struct { >- uint32_t tag; >- uint32_t payload; >- } bits; >- EncodedJSValue asValue; >- } u; >- u.asValue = JSValue::encode(value); >+ //JSValue value = LLINT_OP_C(operand).jsValue(); >+ //union { >+ //struct { >+ //uint32_t tag; >+ //uint32_t payload; >+ //} bits; >+ //EncodedJSValue asValue; >+ //} u; >+ //u.asValue = JSValue::encode(value); > dataLogF( >- "<%p> %p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d: %08x:%08x: %s\n", >+ "<%p> %p / %p: executing bc#%zu, op#%u: Trace(%d)\n", > &Thread::current(), > exec->codeBlock(), > exec, > static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)), >- Interpreter::getOpcodeID(pc[0].u.opcode), >- fromWhere, >- operand, >- pc[operand].u.operand, >- u.bits.tag, >- u.bits.payload, >- toCString(value).data()); >+ pc->opcodeID(), >+ fromWhere); > LLINT_END_IMPL(); > } > >@@ -327,13 +322,14 @@ LLINT_SLOW_PATH_DECL(trace) > if (!Options::traceLLIntExecution()) > LLINT_END_IMPL(); > >- OpcodeID opcodeID = Interpreter::getOpcodeID(pc[0].u.opcode); >+ OpcodeID opcodeID = pc->opcodeID(); > dataLogF("<%p> %p / %p: executing bc#%zu, %s, pc = %p\n", > &Thread::current(), > exec->codeBlock(), > exec, > static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)), >- opcodeNames[opcodeID], pc); >+ pc->name(), >+ pc); > if (opcodeID == op_enter) { > dataLogF("Frame will eventually return to %p\n", exec->returnPC().value()); > *removeCodePtrTag<volatile char*>(exec->returnPC().value()); >@@ -403,7 +399,7 @@ inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec, un > } > } > >-static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind) >+static SlowPathReturnType entryOSR(ExecState* exec, const Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind) > { > if (Options::verboseOSR()) { > dataLog( >@@ -426,7 +422,7 @@ static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* cod > LLINT_RETURN_TWO(codeBlock->jitCode()->addressForCall(MustCheckArity).executableAddress(), 0); > } > #else // ENABLE(JIT) >-static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char*, EntryKind) >+static SlowPathReturnType entryOSR(ExecState* exec, const Instruction*, CodeBlock* codeBlock, const char*, EntryKind) > { > codeBlock->dontJITAnytimeSoon(); > LLINT_RETURN_TWO(0, exec); >@@ -572,25 +568,32 @@ LLINT_SLOW_PATH_DECL(stack_check) > LLINT_SLOW_PATH_DECL(slow_path_new_object) > { > LLINT_BEGIN(); >- LLINT_RETURN(constructEmptyObject(exec, pc[3].u.objectAllocationProfile->structure())); >+ auto bytecode = pc->as<OpNewObject>(); >+ auto& metadata = bytecode.metadata(exec); >+ LLINT_RETURN(constructEmptyObject(exec, metadata.allocationProfile.structure())); > } > > LLINT_SLOW_PATH_DECL(slow_path_new_array) > { > LLINT_BEGIN(); >- LLINT_RETURN(constructArrayNegativeIndexed(exec, pc[4].u.arrayAllocationProfile, bitwise_cast<JSValue*>(&LLINT_OP(2)), pc[3].u.operand)); >+ auto bytecode = pc->as<OpNewArray>(); >+ auto& metadata = bytecode.metadata(exec); >+ LLINT_RETURN(constructArrayNegativeIndexed(exec, &metadata.allocationProfile, bitwise_cast<JSValue*>(&LLINT_OP(bytecode.argv)), bytecode.argc)); > } > > LLINT_SLOW_PATH_DECL(slow_path_new_array_with_size) > { > LLINT_BEGIN(); >- LLINT_RETURN(constructArrayWithSizeQuirk(exec, pc[3].u.arrayAllocationProfile, exec->lexicalGlobalObject(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpNewArrayWithSize>(); >+ auto& metadata = bytecode.metadata(exec); >+ LLINT_RETURN(constructArrayWithSizeQuirk(exec, &metadata.allocationProfile, exec->lexicalGlobalObject(), LLINT_OP_C(bytecode.length).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_new_regexp) > { > LLINT_BEGIN(); >- RegExp* regExp = jsCast<RegExp*>(LLINT_OP_C(2).jsValue()); >+ auto bytecode = pc->as<OpNewRegexp>(); >+ RegExp* regExp = jsCast<RegExp*>(LLINT_OP_C(bytecode.regexp).jsValue()); > ASSERT(regExp->isValid()); > LLINT_RETURN(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regExp)); > } >@@ -598,8 +601,9 @@ LLINT_SLOW_PATH_DECL(slow_path_new_regexp) > LLINT_SLOW_PATH_DECL(slow_path_instanceof) > { > LLINT_BEGIN(); >- JSValue value = LLINT_OP_C(2).jsValue(); >- JSValue proto = LLINT_OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpInstanceof>(); >+ JSValue value = LLINT_OP_C(bytecode.value).jsValue(); >+ JSValue proto = LLINT_OP_C(bytecode.prototype).jsValue(); > LLINT_RETURN(jsBoolean(JSObject::defaultHasInstance(exec, value, proto))); > } > >@@ -607,9 +611,10 @@ LLINT_SLOW_PATH_DECL(slow_path_instanceof_custom) > { > LLINT_BEGIN(); > >- JSValue value = LLINT_OP_C(2).jsValue(); >- JSValue constructor = LLINT_OP_C(3).jsValue(); >- JSValue hasInstanceValue = LLINT_OP_C(4).jsValue(); >+ auto bytecode = pc->as<OpInstanceofCustom>(); >+ JSValue value = LLINT_OP_C(bytecode.value).jsValue(); >+ JSValue constructor = LLINT_OP_C(bytecode.constructor).jsValue(); >+ JSValue hasInstanceValue = LLINT_OP_C(bytecode.hasInstanceValue).jsValue(); > > ASSERT(constructor.isObject()); > ASSERT(hasInstanceValue != exec->lexicalGlobalObject()->functionProtoHasInstanceSymbolFunction() || !constructor.getObject()->structure(vm)->typeInfo().implementsDefaultHasInstance()); >@@ -621,23 +626,25 @@ LLINT_SLOW_PATH_DECL(slow_path_instanceof_custom) > LLINT_SLOW_PATH_DECL(slow_path_try_get_by_id) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpTryGetById>(); > CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[3].u.operand); >- JSValue baseValue = LLINT_OP_C(2).jsValue(); >+ const Identifier& ident = codeBlock->identifier(bytecode.property); >+ JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); > PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::VMInquiry); > > baseValue.getPropertySlot(exec, ident, slot); > JSValue result = slot.getPureResult(); > >- LLINT_RETURN_PROFILED(op_try_get_by_id, result); >+ LLINT_RETURN_PROFILED(result); > } > > LLINT_SLOW_PATH_DECL(slow_path_get_by_id_direct) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpGetByIdDirect>(); > CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[3].u.operand); >- JSValue baseValue = LLINT_OP_C(2).jsValue(); >+ const Identifier& ident = codeBlock->identifier(bytecode.property); >+ JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); > PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::GetOwnProperty); > > bool found = baseValue.getOwnPropertySlot(exec, ident, slot); >@@ -646,8 +653,9 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id_direct) > LLINT_CHECK_EXCEPTION(); > > if (!LLINT_ALWAYS_ACCESS_SLOW && slot.isCacheable()) { >+ auto& metadata = bytecode.metadata(exec); > { >- StructureID oldStructureID = pc[4].u.structureID; >+ StructureID oldStructureID = metadata.structure; > if (oldStructureID) { > Structure* a = vm.heap.structureIDTable().get(oldStructureID); > Structure* b = baseValue.asCell()->structure(vm); >@@ -663,8 +671,8 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id_direct) > Structure* structure = baseCell->structure(vm); > if (slot.isValue()) { > // Start out by clearing out the old cache. >- pc[4].u.pointer = nullptr; // old structure >- pc[5].u.pointer = nullptr; // offset >+ metadata.structure = 0; >+ metadata.offset = 0; > > if (structure->propertyAccessesAreCacheable() > && !structure->needImpurePropertyWatchpoint()) { >@@ -672,17 +680,17 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id_direct) > > ConcurrentJSLocker locker(codeBlock->m_lock); > >- pc[4].u.structureID = structure->id(); >- pc[5].u.operand = slot.cachedOffset(); >+ metadata.structure = structure->id(); >+ metadata.offset = slot.cachedOffset(); > } > } > } > >- LLINT_RETURN_PROFILED(op_get_by_id_direct, result); >+ LLINT_RETURN_PROFILED(result); > } > > >-static void setupGetByIdPrototypeCache(ExecState* exec, VM& vm, Instruction* pc, JSCell* baseCell, PropertySlot& slot, const Identifier& ident) >+static void setupGetByIdPrototypeCache(ExecState* exec, VM& vm, const Instruction* pc, OpGetById::Metadata& metadata, JSCell* baseCell, PropertySlot& slot, const Identifier& ident) > { > CodeBlock* codeBlock = exec->codeBlock(); > Structure* structure = baseCell->structure(vm); >@@ -716,7 +724,7 @@ static void setupGetByIdPrototypeCache(ExecState* exec, VM& vm, Instruction* pc, > return; > if (condition.condition().kind() == PropertyCondition::Presence) > offset = condition.condition().offset(); >- watchpoints.add(condition, pc)->install(vm); >+ watchpoints.add(condition, metadata)->install(vm); > } > > ASSERT((offset == invalidOffset) == slot.isUnset()); >@@ -726,44 +734,46 @@ static void setupGetByIdPrototypeCache(ExecState* exec, VM& vm, Instruction* pc, > ConcurrentJSLocker locker(codeBlock->m_lock); > > if (slot.isUnset()) { >- pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_unset); >- pc[4].u.structureID = structure->id(); >+ metadata.mode = GetByIdMode::Unset; >+ metadata.structure = structure->id(); > return; > } > ASSERT(slot.isValue()); > >- pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_proto_load); >- pc[4].u.structureID = structure->id(); >- pc[5].u.operand = offset; >+ metadata.mode = GetByIdMode::ProtoLoad; >+ metadata.structure = structure->id(); >+ metadata.modeMetadata.protoLoadMode.cachedOffset = offset; >+ metadata.modeMetadata.protoLoadMode.cachedSlot = slot.slotBase();; > // We know that this pointer will remain valid because it will be cleared by either a watchpoint fire or > // during GC when we clear the LLInt caches. >- pc[6].u.pointer = slot.slotBase(); >+ metadata.modeMetadata.protoLoadMode.cachedSlot = slot.slotBase(); > } > > > LLINT_SLOW_PATH_DECL(slow_path_get_by_id) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpGetById>(); >+ auto& metadata = bytecode.metadata(exec); > CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[3].u.operand); >- JSValue baseValue = LLINT_OP_C(2).jsValue(); >+ const Identifier& ident = codeBlock->identifier(bytecode.property); >+ JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); > PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::Get); > > JSValue result = baseValue.get(exec, ident, slot); > LLINT_CHECK_EXCEPTION(); >- LLINT_OP(1) = result; >+ LLINT_OP(bytecode.dst) = result; > > if (!LLINT_ALWAYS_ACCESS_SLOW > && baseValue.isCell() > && slot.isCacheable()) { >- > { >- StructureID oldStructureID = pc[4].u.structureID; >+ StructureID oldStructureID = metadata.structure; > if (oldStructureID) { >- auto opcode = Interpreter::getOpcodeID(pc[0]); >- if (opcode == op_get_by_id >- || opcode == op_get_by_id_unset >- || opcode == op_get_by_id_proto_load) { >+ auto mode = metadata.mode; >+ if (mode == GetByIdMode::Default >+ || mode == GetByIdMode::Unset >+ || mode == GetByIdMode::ProtoLoad) { > Structure* a = vm.heap.structureIDTable().get(oldStructureID); > Structure* b = baseValue.asCell()->structure(vm); > >@@ -779,12 +789,11 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id) > Structure* structure = baseCell->structure(vm); > if (slot.isValue() && slot.slotBase() == baseValue) { > // Start out by clearing out the old cache. >- pc[0].u.opcode = LLInt::getOpcode(op_get_by_id); >- pc[4].u.pointer = nullptr; // old structure >- pc[5].u.pointer = nullptr; // offset >+ metadata.mode = GetByIdMode::Default; >+ //metadata.modeMetadata.clear(); > > // Prevent the prototype cache from ever happening. >- pc[7].u.operand = 0; >+ metadata.hitCountForLLIntCaching = 0; > > if (structure->propertyAccessesAreCacheable() > && !structure->needImpurePropertyWatchpoint()) { >@@ -792,62 +801,51 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id) > > ConcurrentJSLocker locker(codeBlock->m_lock); > >- pc[4].u.structureID = structure->id(); >- pc[5].u.operand = slot.cachedOffset(); >+ metadata.structure = structure->id(); >+ metadata.modeMetadata.defaultMode.cachedOffset = slot.cachedOffset(); > } >- } else if (UNLIKELY(pc[7].u.operand && (slot.isValue() || slot.isUnset()))) { >+ } else if (UNLIKELY(metadata.hitCountForLLIntCaching && (slot.isValue() || slot.isUnset()))) { > ASSERT(slot.slotBase() != baseValue); > >- if (!(--pc[7].u.operand)) >- setupGetByIdPrototypeCache(exec, vm, pc, baseCell, slot, ident); >+ if (!(--metadata.hitCountForLLIntCaching)) >+ setupGetByIdPrototypeCache(exec, vm, pc, metadata, baseCell, slot, ident); > } > } else if (!LLINT_ALWAYS_ACCESS_SLOW > && isJSArray(baseValue) > && ident == vm.propertyNames->length) { >- pc[0].u.opcode = LLInt::getOpcode(op_get_array_length); >- ArrayProfile* arrayProfile = codeBlock->getOrAddArrayProfile(codeBlock->bytecodeOffset(pc)); >- arrayProfile->observeStructure(baseValue.asCell()->structure(vm)); >- pc[4].u.arrayProfile = arrayProfile; >+ metadata.mode = GetByIdMode::ArrayLength; >+ metadata.modeMetadata.arrayLengthMode.arrayProfile.observeStructure(baseValue.asCell()->structure(vm)); > > // Prevent the prototype cache from ever happening. >- pc[7].u.operand = 0; >+ metadata.hitCountForLLIntCaching = 0; > } > >- pc[OPCODE_LENGTH(op_get_by_id) - 1].u.profile->m_buckets[0] = JSValue::encode(result); >+ LLINT_PROFILE_VALUE(result); > LLINT_END(); > } > >-LLINT_SLOW_PATH_DECL(slow_path_get_arguments_length) >-{ >- LLINT_BEGIN(); >- CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[3].u.operand); >- JSValue baseValue = LLINT_OP(2).jsValue(); >- PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get); >- LLINT_RETURN(baseValue.get(exec, ident, slot)); >-} >- > LLINT_SLOW_PATH_DECL(slow_path_put_by_id) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpPutById>(); >+ auto& metadata = bytecode.metadata(exec); > CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[2].u.operand); >+ const Identifier& ident = codeBlock->identifier(bytecode.property); > >- JSValue baseValue = LLINT_OP_C(1).jsValue(); >+ JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); > PutPropertySlot slot(baseValue, codeBlock->isStrictMode(), codeBlock->putByIdContext()); >- if (pc[8].u.putByIdFlags & PutByIdIsDirect) >- CommonSlowPaths::putDirectWithReify(vm, exec, asObject(baseValue), ident, LLINT_OP_C(3).jsValue(), slot); >+ if (metadata.flags & PutByIdIsDirect) >+ CommonSlowPaths::putDirectWithReify(vm, exec, asObject(baseValue), ident, LLINT_OP_C(bytecode.value).jsValue(), slot); > else >- baseValue.putInline(exec, ident, LLINT_OP_C(3).jsValue(), slot); >+ baseValue.putInline(exec, ident, LLINT_OP_C(bytecode.value).jsValue(), slot); > LLINT_CHECK_EXCEPTION(); > > if (!LLINT_ALWAYS_ACCESS_SLOW > && baseValue.isCell() > && slot.isCacheablePut()) { > >- > { >- StructureID oldStructureID = pc[4].u.structureID; >+ StructureID oldStructureID = metadata.oldStructure; > if (oldStructureID) { > Structure* a = vm.heap.structureIDTable().get(oldStructureID); > Structure* b = baseValue.asCell()->structure(vm); >@@ -862,12 +860,11 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) > } > > // Start out by clearing out the old cache. >- pc[4].u.pointer = nullptr; // old structure >- pc[5].u.pointer = nullptr; // offset >- pc[6].u.pointer = nullptr; // new structure >- pc[7].u.pointer = nullptr; // structure chain >- pc[8].u.putByIdFlags = >- static_cast<PutByIdFlags>(pc[8].u.putByIdFlags & PutByIdPersistentFlagsMask); >+ metadata.oldStructure = 0; >+ metadata.offset = 0; >+ metadata.newStructure = 0; >+ metadata.structureChain.clear(); >+ metadata.flags = static_cast<PutByIdFlags>(metadata.flags & PutByIdPersistentFlagsMask); > > JSCell* baseCell = baseValue.asCell(); > Structure* structure = baseCell->structure(vm); >@@ -888,25 +885,25 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) > auto result = normalizePrototypeChain(exec, baseCell, sawPolyProto); > if (result != InvalidPrototypeChain && !sawPolyProto) { > ASSERT(structure->previousID()->isObject()); >- pc[4].u.structureID = structure->previousID()->id(); >- pc[5].u.operand = slot.cachedOffset(); >- pc[6].u.structureID = structure->id(); >- if (!(pc[8].u.putByIdFlags & PutByIdIsDirect)) { >+ metadata.oldStructure = structure->previousID()->id(); >+ metadata.offset = slot.cachedOffset(); >+ metadata.newStructure = structure->id(); >+ if (!(metadata.flags & PutByIdIsDirect)) { > StructureChain* chain = structure->prototypeChain(exec, asObject(baseCell)); > ASSERT(chain); >- pc[7].u.structureChain.set(vm, codeBlock, chain); >+ metadata.structureChain.set(vm, codeBlock, chain); > } >- pc[8].u.putByIdFlags = static_cast<PutByIdFlags>( >- pc[8].u.putByIdFlags | >+ metadata.flags = static_cast<PutByIdFlags>( >+ metadata.flags | > structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags()); > } > } > } else { > structure->didCachePropertyReplacement(vm, slot.cachedOffset()); >- pc[4].u.structureID = structure->id(); >- pc[5].u.operand = slot.cachedOffset(); >- pc[8].u.putByIdFlags = static_cast<PutByIdFlags>( >- pc[8].u.putByIdFlags | >+ metadata.oldStructure = structure->id(); >+ metadata.offset = slot.cachedOffset(); >+ metadata.flags = static_cast<PutByIdFlags>( >+ metadata.flags | > structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags()); > } > } >@@ -918,18 +915,21 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) > LLINT_SLOW_PATH_DECL(slow_path_del_by_id) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpDelById>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSObject* baseObject = LLINT_OP_C(2).jsValue().toObject(exec); >+ JSObject* baseObject = LLINT_OP_C(bytecode.base).jsValue().toObject(exec); > LLINT_CHECK_EXCEPTION(); >- bool couldDelete = baseObject->methodTable(vm)->deleteProperty(baseObject, exec, codeBlock->identifier(pc[3].u.operand)); >+ bool couldDelete = baseObject->methodTable(vm)->deleteProperty(baseObject, exec, codeBlock->identifier(bytecode.property)); > LLINT_CHECK_EXCEPTION(); > if (!couldDelete && codeBlock->isStrictMode()) > LLINT_THROW(createTypeError(exec, UnableToDeletePropertyError)); > LLINT_RETURN(jsBoolean(couldDelete)); > } > >-static ALWAYS_INLINE JSValue getByVal(VM& vm, ExecState* exec, Instruction* pc, JSValue baseValue, JSValue subscript) >+static ALWAYS_INLINE JSValue getByVal(VM& vm, ExecState* exec, OpGetByVal bytecode) > { >+ JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); >+ JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); > auto scope = DECLARE_THROW_SCOPE(vm); > > if (LIKELY(baseValue.isCell() && subscript.isString())) { >@@ -944,7 +944,8 @@ static ALWAYS_INLINE JSValue getByVal(VM& vm, ExecState* exec, Instruction* pc, > > if (subscript.isUInt32()) { > uint32_t i = subscript.asUInt32(); >- ArrayProfile* arrayProfile = pc[4].u.arrayProfile; >+ auto& metadata = bytecode.metadata(exec); >+ ArrayProfile* arrayProfile = &metadata.arrayProfile; > > if (isJSString(baseValue)) { > if (asString(baseValue)->canGetIndex(i)) { >@@ -985,16 +986,18 @@ static ALWAYS_INLINE JSValue getByVal(VM& vm, ExecState* exec, Instruction* pc, > LLINT_SLOW_PATH_DECL(slow_path_get_by_val) > { > LLINT_BEGIN(); >- LLINT_RETURN_PROFILED(op_get_by_val, getByVal(vm, exec, pc, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())); >+ auto bytecode = pc->as<OpGetByVal>(); >+ LLINT_RETURN_PROFILED(getByVal(vm, exec, bytecode)); > } > > LLINT_SLOW_PATH_DECL(slow_path_put_by_val) > { > LLINT_BEGIN(); > >- JSValue baseValue = LLINT_OP_C(1).jsValue(); >- JSValue subscript = LLINT_OP_C(2).jsValue(); >- JSValue value = LLINT_OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpPutByVal>(); >+ JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); >+ JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); >+ JSValue value = LLINT_OP_C(bytecode.value).jsValue(); > bool isStrictMode = exec->codeBlock()->isStrictMode(); > > if (LIKELY(subscript.isUInt32())) { >@@ -1022,9 +1025,10 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val_direct) > { > LLINT_BEGIN(); > >- JSValue baseValue = LLINT_OP_C(1).jsValue(); >- JSValue subscript = LLINT_OP_C(2).jsValue(); >- JSValue value = LLINT_OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpPutByValDirect>(); >+ JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); >+ JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); >+ JSValue value = LLINT_OP_C(bytecode.value).jsValue(); > RELEASE_ASSERT(baseValue.isObject()); > JSObject* baseObject = asObject(baseValue); > bool isStrictMode = exec->codeBlock()->isStrictMode(); >@@ -1061,11 +1065,12 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val_direct) > LLINT_SLOW_PATH_DECL(slow_path_del_by_val) > { > LLINT_BEGIN(); >- JSValue baseValue = LLINT_OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpDelByVal>(); >+ JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); > JSObject* baseObject = baseValue.toObject(exec); > LLINT_CHECK_EXCEPTION(); > >- JSValue subscript = LLINT_OP_C(3).jsValue(); >+ JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); > > bool couldDelete; > >@@ -1089,58 +1094,62 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_val) > LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_id) > { > LLINT_BEGIN(); >- ASSERT(LLINT_OP(1).jsValue().isObject()); >- JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); >+ auto bytecode = pc->as<OpPutGetterById>(); >+ ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); >+ JSObject* baseObj = asObject(LLINT_OP(bytecode.base).jsValue()); > >- unsigned options = pc[3].u.operand; >+ unsigned options = bytecode.attributes; > >- JSValue getter = LLINT_OP(4).jsValue(); >+ JSValue getter = LLINT_OP(bytecode.accessor).jsValue(); > ASSERT(getter.isObject()); > >- baseObj->putGetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(getter), options); >+ baseObj->putGetter(exec, exec->codeBlock()->identifier(bytecode.property), asObject(getter), options); > LLINT_END(); > } > > LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_id) > { > LLINT_BEGIN(); >- ASSERT(LLINT_OP(1).jsValue().isObject()); >- JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); >+ auto bytecode = pc->as<OpPutSetterById>(); >+ ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); >+ JSObject* baseObj = asObject(LLINT_OP(bytecode.base).jsValue()); > >- unsigned options = pc[3].u.operand; >+ unsigned options = bytecode.attributes; > >- JSValue setter = LLINT_OP(4).jsValue(); >+ JSValue setter = LLINT_OP(bytecode.accessor).jsValue(); > ASSERT(setter.isObject()); > >- baseObj->putSetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(setter), options); >+ baseObj->putSetter(exec, exec->codeBlock()->identifier(bytecode.property), asObject(setter), options); > LLINT_END(); > } > > LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter_by_id) > { > LLINT_BEGIN(); >- ASSERT(LLINT_OP(1).jsValue().isObject()); >- JSObject* baseObject = asObject(LLINT_OP(1).jsValue()); >+ auto bytecode = pc->as<OpPutGetterSetterById>(); >+ ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); >+ JSObject* baseObject = asObject(LLINT_OP(bytecode.base).jsValue()); > >- JSValue getter = LLINT_OP(4).jsValue(); >- JSValue setter = LLINT_OP(5).jsValue(); >+ JSValue getter = LLINT_OP(bytecode.getter).jsValue(); >+ JSValue setter = LLINT_OP(bytecode.setter).jsValue(); > ASSERT(getter.isObject() || setter.isObject()); > GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject(), getter, setter); > >- CommonSlowPaths::putDirectAccessorWithReify(vm, exec, baseObject, exec->codeBlock()->identifier(pc[2].u.operand), accessor, pc[3].u.operand); >+ CommonSlowPaths::putDirectAccessorWithReify(vm, exec, baseObject, exec->codeBlock()->identifier(bytecode.property), accessor, bytecode.attributes); > LLINT_END(); > } > > LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_val) > { > LLINT_BEGIN(); >- ASSERT(LLINT_OP(1).jsValue().isObject()); >- JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); >- JSValue subscript = LLINT_OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpPutGetterByVal>(); >+ ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); >+ JSObject* baseObj = asObject(LLINT_OP(bytecode.base).jsValue()); >+ JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); > >- unsigned options = pc[3].u.operand; >+ unsigned options = bytecode.attributes; > >- JSValue getter = LLINT_OP(4).jsValue(); >+ JSValue getter = LLINT_OP(bytecode.accessor).jsValue(); > ASSERT(getter.isObject()); > > auto property = subscript.toPropertyKey(exec); >@@ -1153,13 +1162,14 @@ LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_val) > LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_val) > { > LLINT_BEGIN(); >- ASSERT(LLINT_OP(1).jsValue().isObject()); >- JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); >- JSValue subscript = LLINT_OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpPutSetterByVal>(); >+ ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); >+ JSObject* baseObj = asObject(LLINT_OP(bytecode.base).jsValue()); >+ JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); > >- unsigned options = pc[3].u.operand; >+ unsigned options = bytecode.attributes; > >- JSValue setter = LLINT_OP(4).jsValue(); >+ JSValue setter = LLINT_OP(bytecode.accessor).jsValue(); > ASSERT(setter.isObject()); > > auto property = subscript.toPropertyKey(exec); >@@ -1172,98 +1182,113 @@ LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_val) > LLINT_SLOW_PATH_DECL(slow_path_jtrue) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jtrue, LLINT_OP_C(1).jsValue().toBoolean(exec)); >+ auto bytecode = pc->as<OpJtrue>(); >+ LLINT_BRANCH(LLINT_OP_C(bytecode.condition).jsValue().toBoolean(exec)); > } > > LLINT_SLOW_PATH_DECL(slow_path_jfalse) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jfalse, !LLINT_OP_C(1).jsValue().toBoolean(exec)); >+ auto bytecode = pc->as<OpJfalse>(); >+ LLINT_BRANCH(!LLINT_OP_C(bytecode.condition).jsValue().toBoolean(exec)); > } > > LLINT_SLOW_PATH_DECL(slow_path_jless) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jless, jsLess<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpJless>(); >+ LLINT_BRANCH(jsLess<true>(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jnless) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jnless, !jsLess<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpJnless>(); >+ LLINT_BRANCH(!jsLess<true>(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jgreater) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jgreater, jsLess<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue())); >+ auto bytecode = pc->as<OpJgreater>(); >+ LLINT_BRANCH(jsLess<false>(exec, LLINT_OP_C(bytecode.rhs).jsValue(), LLINT_OP_C(bytecode.lhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jngreater) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jngreater, !jsLess<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue())); >+ auto bytecode = pc->as<OpJngreater>(); >+ LLINT_BRANCH(!jsLess<false>(exec, LLINT_OP_C(bytecode.rhs).jsValue(), LLINT_OP_C(bytecode.lhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jlesseq) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jlesseq, jsLessEq<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpJlesseq>(); >+ LLINT_BRANCH(jsLessEq<true>(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jnlesseq) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jnlesseq, !jsLessEq<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpJnlesseq>(); >+ LLINT_BRANCH(!jsLessEq<true>(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jgreatereq) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jgreatereq, jsLessEq<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue())); >+ auto bytecode = pc->as<OpJgreatereq>(); >+ LLINT_BRANCH(jsLessEq<false>(exec, LLINT_OP_C(bytecode.rhs).jsValue(), LLINT_OP_C(bytecode.lhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jngreatereq) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jngreatereq, !jsLessEq<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue())); >+ auto bytecode = pc->as<OpJngreatereq>(); >+ LLINT_BRANCH(!jsLessEq<false>(exec, LLINT_OP_C(bytecode.rhs).jsValue(), LLINT_OP_C(bytecode.lhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jeq) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jeq, JSValue::equal(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpJeq>(); >+ LLINT_BRANCH(JSValue::equal(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jneq) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jneq, !JSValue::equal(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpJneq>(); >+ LLINT_BRANCH(!JSValue::equal(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jstricteq) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jstricteq, JSValue::strictEqual(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpJstricteq>(); >+ LLINT_BRANCH(JSValue::strictEqual(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_jnstricteq) > { > LLINT_BEGIN(); >- LLINT_BRANCH(op_jnstricteq, !JSValue::strictEqual(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpJnstricteq>(); >+ LLINT_BRANCH(!JSValue::strictEqual(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); > } > > LLINT_SLOW_PATH_DECL(slow_path_switch_imm) > { > LLINT_BEGIN(); >- JSValue scrutinee = LLINT_OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpSwitchImm>(); >+ JSValue scrutinee = LLINT_OP_C(bytecode.scrutinee).jsValue(); > ASSERT(scrutinee.isDouble()); > double value = scrutinee.asDouble(); > int32_t intValue = static_cast<int32_t>(value); >- int defaultOffset = pc[2].u.operand; >+ int defaultOffset = bytecode.defaultOffset; > if (value == intValue) { > CodeBlock* codeBlock = exec->codeBlock(); >- pc += codeBlock->switchJumpTable(pc[1].u.operand).offsetForValue(intValue, defaultOffset); >+ pc += codeBlock->switchJumpTable(bytecode.tableIndex).offsetForValue(intValue, defaultOffset); > } else > pc += defaultOffset; > LLINT_END(); >@@ -1272,27 +1297,29 @@ LLINT_SLOW_PATH_DECL(slow_path_switch_imm) > LLINT_SLOW_PATH_DECL(slow_path_switch_char) > { > LLINT_BEGIN(); >- JSValue scrutinee = LLINT_OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpSwitchChar>(); >+ JSValue scrutinee = LLINT_OP_C(bytecode.scrutinee).jsValue(); > ASSERT(scrutinee.isString()); > JSString* string = asString(scrutinee); > ASSERT(string->length() == 1); >- int defaultOffset = pc[2].u.operand; >+ int defaultOffset = bytecode.defaultOffset; > StringImpl* impl = string->value(exec).impl(); > CodeBlock* codeBlock = exec->codeBlock(); >- pc += codeBlock->switchJumpTable(pc[1].u.operand).offsetForValue((*impl)[0], defaultOffset); >+ pc += codeBlock->switchJumpTable(bytecode.tableIndex).offsetForValue((*impl)[0], defaultOffset); > LLINT_END(); > } > > LLINT_SLOW_PATH_DECL(slow_path_switch_string) > { > LLINT_BEGIN(); >- JSValue scrutinee = LLINT_OP_C(3).jsValue(); >- int defaultOffset = pc[2].u.operand; >+ auto bytecode = pc->as<OpSwitchString>(); >+ JSValue scrutinee = LLINT_OP_C(bytecode.scrutinee).jsValue(); >+ int defaultOffset = bytecode.defaultOffset; > if (!scrutinee.isString()) > pc += defaultOffset; > else { > CodeBlock* codeBlock = exec->codeBlock(); >- pc += codeBlock->stringSwitchJumpTable(pc[1].u.operand).offsetForValue(asString(scrutinee)->value(exec).impl(), defaultOffset); >+ pc += codeBlock->stringSwitchJumpTable(bytecode.tableIndex).offsetForValue(asString(scrutinee)->value(exec).impl(), defaultOffset); > } > LLINT_END(); > } >@@ -1300,46 +1327,51 @@ LLINT_SLOW_PATH_DECL(slow_path_switch_string) > LLINT_SLOW_PATH_DECL(slow_path_new_func) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpNewFunc>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); > slowPathLogF("Creating function!\n"); >- LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope)); >+ LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(bytecode.functionDecl), scope)); > } > > LLINT_SLOW_PATH_DECL(slow_path_new_generator_func) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpNewGeneratorFunc>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); > slowPathLogF("Creating function!\n"); >- LLINT_RETURN(JSGeneratorFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope)); >+ LLINT_RETURN(JSGeneratorFunction::create(vm, codeBlock->functionDecl(bytecode.functionDecl), scope)); > } > > LLINT_SLOW_PATH_DECL(slow_path_new_async_func) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpNewAsyncFunc>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); > slowPathLogF("Creating async function!\n"); >- LLINT_RETURN(JSAsyncFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope)); >+ LLINT_RETURN(JSAsyncFunction::create(vm, codeBlock->functionDecl(bytecode.functionDecl), scope)); > } > > LLINT_SLOW_PATH_DECL(slow_path_new_async_generator_func) > { > LLINT_BEGIN(); >+ auto bytecode = pc->as<OpNewAsyncGeneratorFunc>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); > slowPathLogF("Creating async generator function!\n"); >- LLINT_RETURN(JSAsyncGeneratorFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope)); >+ LLINT_RETURN(JSAsyncGeneratorFunction::create(vm, codeBlock->functionDecl(bytecode.functionDecl), scope)); > } > > LLINT_SLOW_PATH_DECL(slow_path_new_func_exp) > { > LLINT_BEGIN(); > >+ auto bytecode = pc->as<OpNewFuncExp>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >- FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); >+ FunctionExecutable* executable = codeBlock->functionExpr(bytecode.functionDecl); > > LLINT_RETURN(JSFunction::create(vm, executable, scope)); > } >@@ -1348,9 +1380,10 @@ LLINT_SLOW_PATH_DECL(slow_path_new_generator_func_exp) > { > LLINT_BEGIN(); > >+ auto bytecode = pc->as<OpNewGeneratorFuncExp>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >- FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); >+ FunctionExecutable* executable = codeBlock->functionExpr(bytecode.functionDecl); > > LLINT_RETURN(JSGeneratorFunction::create(vm, executable, scope)); > } >@@ -1359,9 +1392,10 @@ LLINT_SLOW_PATH_DECL(slow_path_new_async_func_exp) > { > LLINT_BEGIN(); > >+ auto bytecode = pc->as<OpNewAsyncFuncExp>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >- FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); >+ FunctionExecutable* executable = codeBlock->functionExpr(bytecode.functionDecl); > > LLINT_RETURN(JSAsyncFunction::create(vm, executable, scope)); > } >@@ -1370,9 +1404,10 @@ LLINT_SLOW_PATH_DECL(slow_path_new_async_generator_func_exp) > { > LLINT_BEGIN(); > >+ auto bytecode = pc->as<OpNewAsyncGeneratorFuncExp>(); > CodeBlock* codeBlock = exec->codeBlock(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >- FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); >+ FunctionExecutable* executable = codeBlock->functionExpr(bytecode.functionDecl); > > LLINT_RETURN(JSAsyncGeneratorFunction::create(vm, executable, scope)); > } >@@ -1380,16 +1415,15 @@ LLINT_SLOW_PATH_DECL(slow_path_new_async_generator_func_exp) > LLINT_SLOW_PATH_DECL(slow_path_set_function_name) > { > LLINT_BEGIN(); >- JSFunction* func = jsCast<JSFunction*>(LLINT_OP(1).Register::unboxedCell()); >- JSValue name = LLINT_OP_C(2).Register::jsValue(); >+ auto bytecode = pc->as<OpSetFunctionName>(); >+ JSFunction* func = jsCast<JSFunction*>(LLINT_OP(bytecode.function).Register::unboxedCell()); >+ JSValue name = LLINT_OP_C(bytecode.name).Register::jsValue(); > func->setFunctionName(exec, name); > LLINT_END(); > } > >-static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, JSValue callee, CodeSpecializationKind kind) >+static SlowPathReturnType handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializationKind kind) > { >- UNUSED_PARAM(pc); >- > slowPathLog("Performing host call.\n"); > > ExecState* exec = execCallee->callerFrame(); >@@ -1442,7 +1476,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, > LLINT_CALL_THROW(exec, createNotAConstructorError(exec, callee)); > } > >-inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = 0) >+inline SlowPathReturnType setUpCall(ExecState* execCallee, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = nullptr) > { > ExecState* exec = execCallee->callerFrame(); > VM& vm = exec->vm(); >@@ -1473,7 +1507,7 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code > LLINT_CALL_RETURN(exec, execCallee, codePtr.executableAddress(), JSEntryPtrTag); > } > throwScope.release(); >- return handleHostCall(execCallee, pc, calleeAsValue, kind); >+ return handleHostCall(execCallee, calleeAsValue, kind); > } > JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell); > JSScope* scope = callee->scopeUnchecked(); >@@ -1525,7 +1559,8 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code > LLINT_CALL_RETURN(exec, execCallee, codePtr.executableAddress(), JSEntryPtrTag); > } > >-inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpecializationKind kind) >+template<typename Op> >+inline SlowPathReturnType genericCall(ExecState* exec, Op&& bytecode, CodeSpecializationKind kind) > { > // This needs to: > // - Set up a call frame. >@@ -1533,30 +1568,30 @@ inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpec > // - If possible, link the call's inline cache. > // - Return a tuple of machine code address to call and the new call frame. > >- JSValue calleeAsValue = LLINT_OP_C(2).jsValue(); >+ JSValue calleeAsValue = LLINT_OP_C(bytecode.callee).jsValue(); > >- ExecState* execCallee = exec - pc[4].u.operand; >+ ExecState* execCallee = exec - bytecode.argv; > >- execCallee->setArgumentCountIncludingThis(pc[3].u.operand); >+ execCallee->setArgumentCountIncludingThis(bytecode.argc); > execCallee->uncheckedR(CallFrameSlot::callee) = calleeAsValue; > execCallee->setCallerFrame(exec); > >- ASSERT(pc[5].u.callLinkInfo); >- return setUpCall(execCallee, pc, kind, calleeAsValue, pc[5].u.callLinkInfo); >+ auto& metadata = bytecode.metadata(exec); >+ return setUpCall(execCallee, kind, calleeAsValue, &metadata.callLinkInfo); > } > > LLINT_SLOW_PATH_DECL(slow_path_call) > { > LLINT_BEGIN_NO_SET_PC(); > throwScope.release(); >- return genericCall(exec, pc, CodeForCall); >+ return genericCall(exec, pc->as<OpCall>(), CodeForCall); > } > > LLINT_SLOW_PATH_DECL(slow_path_construct) > { > LLINT_BEGIN_NO_SET_PC(); > throwScope.release(); >- return genericCall(exec, pc, CodeForConstruct); >+ return genericCall(exec, pc->as<OpConstruct>(), CodeForConstruct); > } > > LLINT_SLOW_PATH_DECL(slow_path_size_frame_for_varargs) >@@ -1565,9 +1600,36 @@ LLINT_SLOW_PATH_DECL(slow_path_size_frame_for_varargs) > // This needs to: > // - Set up a call frame while respecting the variable arguments. > >- unsigned numUsedStackSlots = -pc[5].u.operand; >- unsigned length = sizeFrameForVarargs(exec, vm, >- LLINT_OP_C(4).jsValue(), numUsedStackSlots, pc[6].u.operand); >+ unsigned numUsedStackSlots; >+ JSValue arguments; >+ int firstVarArg; >+ // TODO: simplify this >+ switch (pc->opcodeID()) { >+ case op_call_varargs: { >+ auto bytecode = pc->as<OpCallVarargs>(); >+ numUsedStackSlots = bytecode.firstFree.offset(); >+ arguments = LLINT_OP_C(bytecode.arguments).jsValue(); >+ firstVarArg = bytecode.firstVarArg; >+ break; >+ } >+ case op_tail_call_varargs: { >+ auto bytecode = pc->as<OpTailCallVarargs>(); >+ numUsedStackSlots = bytecode.firstFree.offset(); >+ arguments = LLINT_OP_C(bytecode.arguments).jsValue(); >+ firstVarArg = bytecode.firstVarArg; >+ break; >+ } >+ case op_construct_varargs: { >+ auto bytecode = pc->as<OpConstructVarargs>(); >+ numUsedStackSlots = bytecode.firstFree.offset(); >+ arguments = LLINT_OP_C(bytecode.arguments).jsValue(); >+ firstVarArg = bytecode.firstVarArg; >+ break; >+ } >+ default: >+ ASSERT_NOT_REACHED(); >+ } >+ unsigned length = sizeFrameForVarargs(exec, vm, arguments, numUsedStackSlots, firstVarArg); > LLINT_CALL_CHECK_EXCEPTION(exec, exec); > > ExecState* execCallee = calleeFrameForVarargs(exec, numUsedStackSlots, length + 1); >@@ -1583,7 +1645,8 @@ LLINT_SLOW_PATH_DECL(slow_path_size_frame_for_forward_arguments) > // This needs to: > // - Set up a call frame with the same arguments as the current frame. > >- unsigned numUsedStackSlots = -pc[5].u.operand; >+ auto bytecode = pc->as<OpTailCallForwardArguments>(); >+ unsigned numUsedStackSlots = -bytecode.firstFree.offset(); > > unsigned arguments = sizeFrameForForwardArguments(exec, vm, numUsedStackSlots); > LLINT_CALL_CHECK_EXCEPTION(exec, exec); >@@ -1601,55 +1664,58 @@ enum class SetArgumentsWith { > CurrentArguments > }; > >-inline SlowPathReturnType varargsSetup(ExecState* exec, Instruction* pc, CodeSpecializationKind kind, SetArgumentsWith set) >+template<typename Op> >+inline SlowPathReturnType varargsSetup(ExecState* exec, const Instruction* pc, CodeSpecializationKind kind, SetArgumentsWith set) > { > LLINT_BEGIN_NO_SET_PC(); > // This needs to: > // - Figure out what to call and compile it if necessary. > // - Return a tuple of machine code address to call and the new call frame. > >- JSValue calleeAsValue = LLINT_OP_C(2).jsValue(); >+ auto bytecode = pc->as<Op>(); >+ JSValue calleeAsValue = LLINT_OP_C(bytecode.callee).jsValue(); > > ExecState* execCallee = vm.newCallFrameReturnValue; > > if (set == SetArgumentsWith::Object) { >- setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength); >+ setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(bytecode.thisValue).jsValue(), LLINT_OP_C(bytecode.arguments).jsValue(), bytecode.firstVarArg, vm.varargsLength); > LLINT_CALL_CHECK_EXCEPTION(exec, exec); > } else >- setupForwardArgumentsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), vm.varargsLength); >+ setupForwardArgumentsFrameAndSetThis(exec, execCallee, LLINT_OP_C(bytecode.thisValue).jsValue(), vm.varargsLength); > > execCallee->setCallerFrame(exec); > execCallee->uncheckedR(CallFrameSlot::callee) = calleeAsValue; > exec->setCurrentVPC(pc); > > throwScope.release(); >- return setUpCall(execCallee, pc, kind, calleeAsValue); >+ return setUpCall(execCallee, kind, calleeAsValue); > } > > LLINT_SLOW_PATH_DECL(slow_path_call_varargs) > { >- return varargsSetup(exec, pc, CodeForCall, SetArgumentsWith::Object); >+ return varargsSetup<OpCallVarargs>(exec, pc, CodeForCall, SetArgumentsWith::Object); > } > > LLINT_SLOW_PATH_DECL(slow_path_tail_call_forward_arguments) > { >- return varargsSetup(exec, pc, CodeForCall, SetArgumentsWith::CurrentArguments); >+ return varargsSetup<OpTailCallForwardArguments>(exec, pc, CodeForCall, SetArgumentsWith::CurrentArguments); > } > > LLINT_SLOW_PATH_DECL(slow_path_construct_varargs) > { >- return varargsSetup(exec, pc, CodeForConstruct, SetArgumentsWith::Object); >+ return varargsSetup<OpConstructVarargs>(exec, pc, CodeForConstruct, SetArgumentsWith::Object); > } > > > LLINT_SLOW_PATH_DECL(slow_path_call_eval) > { > LLINT_BEGIN_NO_SET_PC(); >- JSValue calleeAsValue = LLINT_OP(2).jsValue(); >+ auto bytecode = pc->as<OpCallEval>(); >+ JSValue calleeAsValue = LLINT_OP(bytecode.callee).jsValue(); > >- ExecState* execCallee = exec - pc[4].u.operand; >+ ExecState* execCallee = exec - bytecode.argv; > >- execCallee->setArgumentCountIncludingThis(pc[3].u.operand); >+ execCallee->setArgumentCountIncludingThis(bytecode.argc); > execCallee->setCallerFrame(exec); > execCallee->uncheckedR(CallFrameSlot::callee) = calleeAsValue; > execCallee->setReturnPC(LLInt::getCodePtr<JSEntryPtrTag>(llint_generic_return_point).executableAddress()); >@@ -1658,7 +1724,7 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval) > > if (!isHostFunction(calleeAsValue, globalFuncEval)) { > throwScope.release(); >- return setUpCall(execCallee, pc, CodeForCall, calleeAsValue); >+ return setUpCall(execCallee, CodeForCall, calleeAsValue); > } > > vm.hostCallReturnValue = eval(execCallee); >@@ -1668,19 +1734,22 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval) > LLINT_SLOW_PATH_DECL(slow_path_strcat) > { > LLINT_BEGIN(); >- LLINT_RETURN(jsStringFromRegisterArray(exec, &LLINT_OP(2), pc[3].u.operand)); >+ auto bytecode = pc->as<OpStrcat>(); >+ LLINT_RETURN(jsStringFromRegisterArray(exec, &LLINT_OP(bytecode.src), bytecode.count)); > } > > LLINT_SLOW_PATH_DECL(slow_path_to_primitive) > { > LLINT_BEGIN(); >- LLINT_RETURN(LLINT_OP_C(2).jsValue().toPrimitive(exec)); >+ auto bytecode = pc->as<OpToPrimitive>(); >+ LLINT_RETURN(LLINT_OP_C(bytecode.src).jsValue().toPrimitive(exec)); > } > > LLINT_SLOW_PATH_DECL(slow_path_throw) > { > LLINT_BEGIN(); >- LLINT_THROW(LLINT_OP_C(1).jsValue()); >+ auto bytecode = pc->as<OpThrow>(); >+ LLINT_THROW(LLINT_OP_C(bytecode.value).jsValue()); > } > > LLINT_SLOW_PATH_DECL(slow_path_handle_traps) >@@ -1695,8 +1764,8 @@ LLINT_SLOW_PATH_DECL(slow_path_handle_traps) > LLINT_SLOW_PATH_DECL(slow_path_debug) > { > LLINT_BEGIN(); >- int debugHookType = pc[1].u.operand; >- vm.interpreter->debug(exec, static_cast<DebugHookType>(debugHookType)); >+ auto bytecode = pc->as<OpDebug>(); >+ vm.interpreter->debug(exec, bytecode.debugHookType); > > LLINT_END(); > } >@@ -1712,16 +1781,17 @@ LLINT_SLOW_PATH_DECL(slow_path_handle_exception) > LLINT_SLOW_PATH_DECL(slow_path_get_from_scope) > { > LLINT_BEGIN(); >- const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); >- JSObject* scope = jsCast<JSObject*>(LLINT_OP(2).jsValue()); >- GetPutInfo getPutInfo(pc[4].u.operand); >+ auto bytecode = pc->as<OpGetFromScope>(); >+ auto& metadata = bytecode.metadata(exec); >+ const Identifier& ident = exec->codeBlock()->identifier(bytecode.var); >+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(bytecode.scope).jsValue()); > > // ModuleVar is always converted to ClosureVar for get_from_scope. >- ASSERT(getPutInfo.resolveType() != ModuleVar); >+ ASSERT(metadata.getPutInfo.resolveType() != ModuleVar); > > LLINT_RETURN(scope->getPropertySlot(exec, ident, [&] (bool found, PropertySlot& slot) -> JSValue { > if (!found) { >- if (getPutInfo.resolveMode() == ThrowIfNotFound) >+ if (metadata.getPutInfo.resolveMode() == ThrowIfNotFound) > return throwException(exec, throwScope, createUndefinedVariableError(exec, ident)); > return jsUndefined(); > } >@@ -1734,7 +1804,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_from_scope) > return throwException(exec, throwScope, createTDZError(exec)); > } > >- CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident); >+ CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, bytecode, scope, slot, ident); > > if (!result) > return slot.getValue(exec, ident); >@@ -1746,20 +1816,21 @@ LLINT_SLOW_PATH_DECL(slow_path_put_to_scope) > { > LLINT_BEGIN(); > >+ auto bytecode = pc->as<OpPutToScope>(); >+ auto& metadata = bytecode.metadata(exec); > CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[2].u.operand); >- JSObject* scope = jsCast<JSObject*>(LLINT_OP(1).jsValue()); >- JSValue value = LLINT_OP_C(3).jsValue(); >- GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); >- if (getPutInfo.resolveType() == LocalClosureVar) { >+ const Identifier& ident = codeBlock->identifier(bytecode.var); >+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(bytecode.scope).jsValue()); >+ JSValue value = LLINT_OP_C(bytecode.value).jsValue(); >+ if (metadata.getPutInfo.resolveType() == LocalClosureVar) { > JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope); >- environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value); >+ environment->variableAt(ScopeOffset(bytecode.offset)).set(vm, environment, value); > > // Have to do this *after* the write, because if this puts the set into IsWatched, then we need > // to have already changed the value of the variable. Otherwise we might watch and constant-fold > // to the Undefined value from before the assignment. >- if (WatchpointSet* set = pc[5].u.watchpointSet) >- set->touch(vm, "Executed op_put_scope<LocalClosureVar>"); >+ if (metadata.watchpointSet) >+ metadata.watchpointSet->touch(vm, "Executed op_put_scope<LocalClosureVar>"); > LLINT_END(); > } > >@@ -1767,7 +1838,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_to_scope) > LLINT_CHECK_EXCEPTION(); > if (hasProperty > && scope->isGlobalLexicalEnvironment() >- && !isInitialization(getPutInfo.initializationMode())) { >+ && !isInitialization(metadata.getPutInfo.initializationMode())) { > // When we can't statically prove we need a TDZ check, we must perform the check on the slow path. > PropertySlot slot(scope, PropertySlot::InternalMethodType::Get); > JSGlobalLexicalEnvironment::getOwnPropertySlot(scope, exec, ident, slot); >@@ -1775,13 +1846,13 @@ LLINT_SLOW_PATH_DECL(slow_path_put_to_scope) > LLINT_THROW(createTDZError(exec)); > } > >- if (getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) >+ if (metadata.getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) > LLINT_THROW(createUndefinedVariableError(exec, ident)); > >- PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, isInitialization(getPutInfo.initializationMode())); >+ PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, isInitialization(metadata.getPutInfo.initializationMode())); > scope->methodTable(vm)->put(scope, exec, ident, value, slot); > >- CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident); >+ CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, bytecode, scope, slot, ident); > > LLINT_END(); > } >@@ -1800,7 +1871,8 @@ LLINT_SLOW_PATH_DECL(slow_path_log_shadow_chicken_prologue) > { > LLINT_BEGIN(); > >- JSScope* scope = exec->uncheckedR(pc[1].u.operand).Register::scope(); >+ auto bytecode = pc->as<OpLogShadowChickenPrologue>(); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); > vm.shadowChicken().log(vm, exec, ShadowChicken::Packet::prologue(exec->jsCallee(), exec, exec->callerFrame(), scope)); > > LLINT_END(); >@@ -1810,8 +1882,9 @@ LLINT_SLOW_PATH_DECL(slow_path_log_shadow_chicken_tail) > { > LLINT_BEGIN(); > >- JSValue thisValue = LLINT_OP(1).jsValue(); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >+ auto bytecode = pc->as<OpLogShadowChickenTail>(); >+ JSValue thisValue = LLINT_OP(bytecode.thisValue).jsValue(); >+ JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); > > #if USE(JSVALUE64) > CallSiteIndex callSiteIndex(exec->codeBlock()->bytecodeOffset(pc)); >@@ -1829,8 +1902,9 @@ LLINT_SLOW_PATH_DECL(slow_path_profile_catch) > > exec->codeBlock()->ensureCatchLivenessIsComputedForBytecodeOffset(exec->bytecodeOffset()); > >- ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(pc[3].u.pointer); >- buffer->forEach([&] (ValueProfileAndOperand& profile) { >+ auto bytecode = pc->as<OpCatch>(); >+ auto& metadata = bytecode.metadata(exec); >+ metadata.buffer->forEach([&] (ValueProfileAndOperand& profile) { > profile.m_profile.m_buckets[0] = JSValue::encode(exec->uncheckedR(profile.m_operand).jsValue()); > }); > >diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h >index 7cfeca7a816d1dc530a347e86fb9d5fd360bcb80..9cbd9ed95879117b49a8720b3443b4ddaa4bea66 100644 >--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h >+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h >@@ -36,12 +36,12 @@ struct ProtoCallFrame; > > namespace LLInt { > >-extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand); >-extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand); >+extern "C" SlowPathReturnType llint_trace_operand(ExecState*, const Instruction*, int fromWhere, int operand); >+extern "C" SlowPathReturnType llint_trace_value(ExecState*, const Instruction*, int fromWhere, int operand); > extern "C" void llint_write_barrier_slow(ExecState*, JSCell*) WTF_INTERNAL; > > #define LLINT_SLOW_PATH_DECL(name) \ >- extern "C" SlowPathReturnType llint_##name(ExecState* exec, Instruction* pc) >+ extern "C" SlowPathReturnType llint_##name(ExecState* exec, const Instruction* pc) > > #define LLINT_SLOW_PATH_HIDDEN_DECL(name) \ > LLINT_SLOW_PATH_DECL(name) WTF_INTERNAL >@@ -69,7 +69,6 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_instanceof_custom); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_try_get_by_id); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id_direct); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id); >-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_arguments_length); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_id); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_id); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_val); >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >index 88b80d37720c4251f8235d79cd15026dd5b9f1d5..fe405b1a3783ba79bc2f64b857353f6ba829b165 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >@@ -352,6 +352,50 @@ else > end > end > >+macro dispatchWide(advance) >+ addi advance * 4, PC >+ loadb [PB, PC, 1], t0 >+ loadp [OpcodeMapWide, t0], t0 >+ jmp t0, BytecodePtrTag >+end >+ >+macro dispatch(advance) >+ addi advance, PC >+ loadb [PB, PC, 1], t0 >+ loadp [OpcodeMap, t0], t0 >+ jmp t0, BytecodePtrTag >+end >+ >+macro dispatchIndirect(offset) >+ dispatch(offset[PB, PC, 1]) >+end >+ >+macro getOperandNarrow(offset, dst) >+ loadb offset[PB, PC, 1], dst >+end >+ >+macro getOperandWide(offset, dst) >+ loadis offset * 4[PB, PC, 1], dst >+end >+ >+macro commonOp(label, op, fn) >+_%label%: >+ traceExecution() >+ fn(getOperandNarrow, macro () dispatch(constexpr %op%_length) end) >+ >+_%label%_wide: >+ traceExecution() >+ fn(getOperandWide, macro () dispatch(constexpr %op%_wide_length) end) >+end >+ >+macro op(l, fn) >+ commonOp(l, l, fn) >+end >+ >+macro llintOp(l, fn) >+ commonOp(llint_%l%, l, fn) >+end >+ > if X86_64_WIN > const extraTempReg = t0 > else >@@ -1236,33 +1280,41 @@ else > end > end > >-# The PC base is in t1, as this is what _llint_entry leaves behind through >-# initPCRelative(t1) >+# The PC base is in t2, as this is what _llint_entry leaves behind through >+# initPCRelative(t2) > macro setEntryAddress(index, label) >+ setEntryAddressCommon(index, label, a0) >+end >+ >+macro setEntryAddressWide(index, label) >+ setEntryAddressCommon(index, label, a1) >+end >+ >+macro setEntryAddressCommon(index, label, map) > if X86_64 or X86_64_WIN >- leap (label - _relativePCBase)[t1], t3 >+ leap (label - _relativePCBase)[t2], t3 > move index, t4 >- storep t3, [a0, t4, 8] >+ storep t3, [map, t4, 8] > elsif X86 or X86_WIN >- leap (label - _relativePCBase)[t1], t3 >+ leap (label - _relativePCBase)[t2], t3 > move index, t4 >- storep t3, [a0, t4, 4] >+ storep t3, [map, t4, 4] > elsif ARM64 or ARM64E >- pcrtoaddr label, t1 >+ pcrtoaddr label, t2 > move index, t4 >- storep t1, [a0, t4, 8] >+ storep t2, [map, t4, 8] > elsif ARM or ARMv7 or ARMv7_TRADITIONAL > mvlbl (label - _relativePCBase), t4 >- addp t4, t1, t4 >+ addp t4, t2, t4 > move index, t3 >- storep t4, [a0, t3, 4] >+ storep t4, [map, t3, 4] > elsif MIPS > la label, t4 > la _relativePCBase, t3 > subp t3, t4 >- addp t4, t1, t4 >+ addp t4, t2, t4 > move index, t3 >- storep t4, [a0, t3, 4] >+ storep t4, [map, t3, 4] > end > end > >@@ -1273,8 +1325,13 @@ _llint_entry: > pushCalleeSaves() > if X86 or X86_WIN > loadp 20[sp], a0 >+ loadp 24[sp], a1 > end >- initPCRelative(t1) >+ >+ const OpcodeMap = a0 >+ const OpcodeMapWide = a1 >+ >+ initPCRelative(t2) > > # Include generated bytecode initialization file. > include InitBytecodes >@@ -1284,47 +1341,54 @@ _llint_entry: > ret > end > >-_llint_program_prologue: >+op(llint_program_prologue, macro (getOperand, disp__) > prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) >- dispatch(0) >+ disp__() >+end) > > >-_llint_module_program_prologue: >+op(llint_module_program_prologue, macro (getOperand, disp__) > prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) >- dispatch(0) >+ disp__() >+end) > > >-_llint_eval_prologue: >+op(llint_eval_prologue, macro (getOperand, disp__) > prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) >- dispatch(0) >+ disp__() >+end) > > >-_llint_function_for_call_prologue: >+op(llint_function_for_call_prologue, macro (getOperand, disp__) > prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call) > functionInitialization(0) >- dispatch(0) >+ disp__() >+end) > > >-_llint_function_for_construct_prologue: >+op(llint_function_for_construct_prologue, macro (getOperand, disp__) > prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct) > functionInitialization(1) >- dispatch(0) >+ disp__() >+end) > > >-_llint_function_for_call_arity_check: >+op(llint_function_for_call_arity_check, macro (getOperand, disp__) > prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call) > functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck) > .functionForCallBegin: > functionInitialization(0) >- dispatch(0) >+ disp__() >+end) > > >-_llint_function_for_construct_arity_check: >+op(llint_function_for_construct_arity_check, macro (getOperand, disp__) > prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct) > functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck) > .functionForConstructBegin: > functionInitialization(1) >- dispatch(0) >+ disp__() >+end) > > > # Value-representation-specific code. >@@ -1336,374 +1400,378 @@ end > > > # Value-representation-agnostic code. >-_llint_op_create_direct_arguments: >- traceExecution() >+llintOp(op_create_direct_arguments, macro (getOperand, disp__) > callSlowPath(_slow_path_create_direct_arguments) >- dispatch(constexpr op_create_direct_arguments_length) >+ disp__() >+end) > > >-_llint_op_create_scoped_arguments: >- traceExecution() >+llintOp(op_create_scoped_arguments, macro (getOperand, disp__) > callSlowPath(_slow_path_create_scoped_arguments) >- dispatch(constexpr op_create_scoped_arguments_length) >+ disp__() >+end) > > >-_llint_op_create_cloned_arguments: >- traceExecution() >+llintOp(op_create_cloned_arguments, macro (getOperand, disp__) > callSlowPath(_slow_path_create_cloned_arguments) >- dispatch(constexpr op_create_cloned_arguments_length) >+ disp__() >+end) > > >-_llint_op_create_this: >- traceExecution() >+llintOp(op_create_this, macro (getOperand, disp__) > callSlowPath(_slow_path_create_this) >- dispatch(constexpr op_create_this_length) >+ disp__() >+end) > > >-_llint_op_new_object: >- traceExecution() >+llintOp(op_new_object, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_object) >- dispatch(constexpr op_new_object_length) >+ disp__() >+end) > > >-_llint_op_new_func: >- traceExecution() >+llintOp(op_new_func, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_func) >- dispatch(constexpr op_new_func_length) >+ disp__() >+end) > > >-_llint_op_new_generator_func: >- traceExecution() >+llintOp(op_new_generator_func, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_generator_func) >- dispatch(constexpr op_new_generator_func_length) >+ disp__() >+end) > >-_llint_op_new_async_generator_func: >- traceExecution() >+ >+llintOp(op_new_async_generator_func, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_async_generator_func) >- dispatch(constexpr op_new_async_generator_func_length) >+ disp__() >+end) > >-_llint_op_new_async_generator_func_exp: >- traceExecution() >+ >+llintOp(op_new_async_generator_func_exp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_async_generator_func_exp) >- dispatch(constexpr op_new_async_generator_func_exp_length) >+ disp__() >+end) > >-_llint_op_new_async_func: >- traceExecution() >+ >+llintOp(op_new_async_func, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_async_func) >- dispatch(constexpr op_new_async_func_length) >+ disp__() >+end) > > >-_llint_op_new_array: >- traceExecution() >+llintOp(op_new_array, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_array) >- dispatch(constexpr op_new_array_length) >+ disp__() >+end) > > >-_llint_op_new_array_with_spread: >- traceExecution() >+llintOp(op_new_array_with_spread, macro (getOperand, disp__) > callSlowPath(_slow_path_new_array_with_spread) >- dispatch(constexpr op_new_array_with_spread_length) >+ disp__() >+end) > > >-_llint_op_spread: >- traceExecution() >+llintOp(op_spread, macro (getOperand, disp__) > callSlowPath(_slow_path_spread) >- dispatch(constexpr op_spread_length) >+ disp__() >+end) > > >-_llint_op_new_array_with_size: >- traceExecution() >+llintOp(op_new_array_with_size, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_array_with_size) >- dispatch(constexpr op_new_array_with_size_length) >+ disp__() >+end) > > >-_llint_op_new_array_buffer: >- traceExecution() >+llintOp(op_new_array_buffer, macro (getOperand, disp__) > callSlowPath(_slow_path_new_array_buffer) >- dispatch(constexpr op_new_array_buffer_length) >+ disp__() >+end) > > >-_llint_op_new_regexp: >- traceExecution() >+llintOp(op_new_regexp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_regexp) >- dispatch(constexpr op_new_regexp_length) >+ disp__() >+end) > > >-_llint_op_less: >- traceExecution() >+llintOp(op_less, macro (getOperand, disp__) > callSlowPath(_slow_path_less) >- dispatch(constexpr op_less_length) >+ disp__() >+end) > > >-_llint_op_lesseq: >- traceExecution() >+llintOp(op_lesseq, macro (getOperand, disp__) > callSlowPath(_slow_path_lesseq) >- dispatch(constexpr op_lesseq_length) >+ disp__() >+end) > > >-_llint_op_greater: >- traceExecution() >+llintOp(op_greater, macro (getOperand, disp__) > callSlowPath(_slow_path_greater) >- dispatch(constexpr op_greater_length) >+ disp__() >+end) > > >-_llint_op_greatereq: >- traceExecution() >+llintOp(op_greatereq, macro (getOperand, disp__) > callSlowPath(_slow_path_greatereq) >- dispatch(constexpr op_greatereq_length) >+ disp__() >+end) > > >-_llint_op_eq: >- traceExecution() >+llintOp(op_eq, macro (getOperand, disp__) > equalityComparison( > macro (left, right, result) cieq left, right, result end, > _slow_path_eq) >+end) > > >-_llint_op_neq: >- traceExecution() >+llintOp(op_neq, macro (getOperand, disp__) > equalityComparison( > macro (left, right, result) cineq left, right, result end, > _slow_path_neq) >+end) > > >-_llint_op_below: >- traceExecution() >+llintOp(op_below, macro (getOperand, disp__) > compareUnsigned( > macro (left, right, result) cib left, right, result end) >+end) > > >-_llint_op_beloweq: >- traceExecution() >+llintOp(op_beloweq, macro (getOperand, disp__) > compareUnsigned( > macro (left, right, result) cibeq left, right, result end) >+end) > > >-_llint_op_mod: >- traceExecution() >+llintOp(op_mod, macro (getOperand, disp__) > callSlowPath(_slow_path_mod) >- dispatch(constexpr op_mod_length) >+ disp__() >+end) > > >-_llint_op_pow: >- traceExecution() >+llintOp(op_pow, macro (getOperand, disp__) > callSlowPath(_slow_path_pow) >- dispatch(constexpr op_pow_length) >+ disp__() >+end) > > >-_llint_op_typeof: >- traceExecution() >+llintOp(op_typeof, macro (getOperand, disp__) > callSlowPath(_slow_path_typeof) >- dispatch(constexpr op_typeof_length) >+ disp__() >+end) > > >-_llint_op_is_object_or_null: >- traceExecution() >+llintOp(op_is_object_or_null, macro (getOperand, disp__) > callSlowPath(_slow_path_is_object_or_null) >- dispatch(constexpr op_is_object_or_null_length) >+ disp__() >+end) > >-_llint_op_is_function: >- traceExecution() >+ >+llintOp(op_is_function, macro (getOperand, disp__) > callSlowPath(_slow_path_is_function) >- dispatch(constexpr op_is_function_length) >+ disp__() >+end) > > >-_llint_op_in_by_id: >- traceExecution() >+llintOp(op_in_by_id, macro (getOperand, disp__) > callSlowPath(_slow_path_in_by_id) >- dispatch(constexpr op_in_by_id_length) >+ disp__() >+end) > > >-_llint_op_in_by_val: >- traceExecution() >+llintOp(op_in_by_val, macro (getOperand, disp__) > callSlowPath(_slow_path_in_by_val) >- dispatch(constexpr op_in_by_val_length) >+ disp__() >+end) > > >-_llint_op_try_get_by_id: >- traceExecution() >+llintOp(op_try_get_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_try_get_by_id) >- dispatch(constexpr op_try_get_by_id_length) >+ disp__() >+end) > > >-_llint_op_del_by_id: >- traceExecution() >+llintOp(op_del_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_del_by_id) >- dispatch(constexpr op_del_by_id_length) >+ disp__() >+end) > > >-_llint_op_del_by_val: >- traceExecution() >+llintOp(op_del_by_val, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_del_by_val) >- dispatch(constexpr op_del_by_val_length) >+ disp__() >+end) > > >-_llint_op_put_getter_by_id: >- traceExecution() >+llintOp(op_put_getter_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_getter_by_id) >- dispatch(constexpr op_put_getter_by_id_length) >+ disp__() >+end) > > >-_llint_op_put_setter_by_id: >- traceExecution() >+llintOp(op_put_setter_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_setter_by_id) >- dispatch(constexpr op_put_setter_by_id_length) >+ disp__() >+end) > > >-_llint_op_put_getter_setter_by_id: >- traceExecution() >+llintOp(op_put_getter_setter_by_id, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_getter_setter_by_id) >- dispatch(constexpr op_put_getter_setter_by_id_length) >+ disp__() >+end) > > >-_llint_op_put_getter_by_val: >- traceExecution() >+llintOp(op_put_getter_by_val, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_getter_by_val) >- dispatch(constexpr op_put_getter_by_val_length) >+ disp__() >+end) > > >-_llint_op_put_setter_by_val: >- traceExecution() >+llintOp(op_put_setter_by_val, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_put_setter_by_val) >- dispatch(constexpr op_put_setter_by_val_length) >+ disp__() >+end) > > >-_llint_op_define_data_property: >- traceExecution() >+llintOp(op_define_data_property, macro (getOperand, disp__) > callSlowPath(_slow_path_define_data_property) >- dispatch(constexpr op_define_data_property_length) >+ disp__() >+end) > > >-_llint_op_define_accessor_property: >- traceExecution() >+llintOp(op_define_accessor_property, macro (getOperand, disp__) > callSlowPath(_slow_path_define_accessor_property) >- dispatch(constexpr op_define_accessor_property_length) >+ disp__() >+end) > > >-_llint_op_jtrue: >- traceExecution() >+llintOp(op_jtrue, macro (getOperand, disp__) > jumpTrueOrFalse( > macro (value, target) btinz value, 1, target end, > _llint_slow_path_jtrue) >+end) > > >-_llint_op_jfalse: >- traceExecution() >+llintOp(op_jfalse, macro (getOperand, disp__) > jumpTrueOrFalse( > macro (value, target) btiz value, 1, target end, > _llint_slow_path_jfalse) >+end) > > >-_llint_op_jless: >- traceExecution() >+llintOp(op_jless, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bilt left, right, target end, > macro (left, right, target) bdlt left, right, target end, > _llint_slow_path_jless) >+end) > > >-_llint_op_jnless: >- traceExecution() >+llintOp(op_jnless, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bigteq left, right, target end, > macro (left, right, target) bdgtequn left, right, target end, > _llint_slow_path_jnless) >+end) > > >-_llint_op_jgreater: >- traceExecution() >+llintOp(op_jgreater, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bigt left, right, target end, > macro (left, right, target) bdgt left, right, target end, > _llint_slow_path_jgreater) >+end) > > >-_llint_op_jngreater: >- traceExecution() >+llintOp(op_jngreater, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bilteq left, right, target end, > macro (left, right, target) bdltequn left, right, target end, > _llint_slow_path_jngreater) >+end) > > >-_llint_op_jlesseq: >- traceExecution() >+llintOp(op_jlesseq, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bilteq left, right, target end, > macro (left, right, target) bdlteq left, right, target end, > _llint_slow_path_jlesseq) >+end) > > >-_llint_op_jnlesseq: >- traceExecution() >+llintOp(op_jnlesseq, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bigt left, right, target end, > macro (left, right, target) bdgtun left, right, target end, > _llint_slow_path_jnlesseq) >+end) > > >-_llint_op_jgreatereq: >- traceExecution() >+llintOp(op_jgreatereq, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bigteq left, right, target end, > macro (left, right, target) bdgteq left, right, target end, > _llint_slow_path_jgreatereq) >+end) > > >-_llint_op_jngreatereq: >- traceExecution() >+llintOp(op_jngreatereq, macro (getOperand, disp__) > compareJump( > macro (left, right, target) bilt left, right, target end, > macro (left, right, target) bdltun left, right, target end, > _llint_slow_path_jngreatereq) >+end) > > >-_llint_op_jeq: >- traceExecution() >+llintOp(op_jeq, macro (getOperand, disp__) > equalityJump( > macro (left, right, target) bieq left, right, target end, > _llint_slow_path_jeq) >+end) > > >-_llint_op_jneq: >- traceExecution() >+llintOp(op_jneq, macro (getOperand, disp__) > equalityJump( > macro (left, right, target) bineq left, right, target end, > _llint_slow_path_jneq) >+end) > > >-_llint_op_jbelow: >- traceExecution() >+llintOp(op_jbelow, macro (getOperand, disp__) > compareUnsignedJump( > macro (left, right, target) bib left, right, target end) >+end) > > >-_llint_op_jbeloweq: >- traceExecution() >+llintOp(op_jbeloweq, macro (getOperand, disp__) > compareUnsignedJump( > macro (left, right, target) bibeq left, right, target end) >+end) > > >-_llint_op_loop_hint: >- traceExecution() >+llintOp(op_loop_hint, macro (getOperand, disp__) > checkSwitchToJITForLoop() >- dispatch(constexpr op_loop_hint_length) >+ disp__() >+end) > > >-_llint_op_check_traps: >- traceExecution() >+llintOp(op_check_traps, macro (getOperand, disp__) > loadp CodeBlock[cfr], t1 > loadp CodeBlock::m_poisonedVM[t1], t1 > unpoison(_g_CodeBlockPoison, t1, t2) > loadb VM::m_traps+VMTraps::m_needTrapHandling[t1], t0 > btpnz t0, .handleTraps > .afterHandlingTraps: >- dispatch(constexpr op_check_traps_length) >+ disp__() > .handleTraps: > callTrapHandler(.throwHandler) > jmp .afterHandlingTraps > .throwHandler: > jmp _llint_throw_from_slow_path_trampoline >+end) > > > # Returns the packet pointer in t0. >@@ -1719,62 +1787,68 @@ macro acquireShadowChickenPacket(slow) > end > > >-_llint_op_nop: >- dispatch(constexpr op_nop_length) >+llintOp(op_nop, macro (getOperand, disp__) >+ disp__() >+end) > > >-_llint_op_super_sampler_begin: >+llintOp(op_super_sampler_begin, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_super_sampler_begin) >- dispatch(constexpr op_super_sampler_begin_length) >+ disp__() >+end) > > >-_llint_op_super_sampler_end: >- traceExecution() >+llintOp(op_super_sampler_end, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_super_sampler_end) >- dispatch(constexpr op_super_sampler_end_length) >+ disp__() >+end) > > >-_llint_op_switch_string: >- traceExecution() >+llintOp(op_switch_string, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_switch_string) >- dispatch(0) >+ disp__() >+end) > > >-_llint_op_new_func_exp: >- traceExecution() >+llintOp(op_new_func_exp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_func_exp) >- dispatch(constexpr op_new_func_exp_length) >+ disp__() >+end) > >-_llint_op_new_generator_func_exp: >- traceExecution() >+llintOp(op_new_generator_func_exp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_generator_func_exp) >- dispatch(constexpr op_new_generator_func_exp_length) >+ disp__() >+end) > >-_llint_op_new_async_func_exp: >- traceExecution() >+llintOp(op_new_async_func_exp, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_new_async_func_exp) >- dispatch(constexpr op_new_async_func_exp_length) >+ disp__() >+end) > > >-_llint_op_set_function_name: >- traceExecution() >+llintOp(op_set_function_name, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_set_function_name) >- dispatch(constexpr op_set_function_name_length) >+ disp__() >+end) > >-_llint_op_call: >- traceExecution() >+ >+llintOp(op_call, macro (getOperand, disp__) > arrayProfileForCall() > doCall(_llint_slow_path_call, prepareForRegularCall) >+end) > >-_llint_op_tail_call: >- traceExecution() >+ >+llintOp(op_tail_call, macro (getOperand, disp__) > arrayProfileForCall() > checkSwitchToJITForEpilogue() > doCall(_llint_slow_path_call, prepareForTailCall) >+end) > >-_llint_op_construct: >- traceExecution() >+ >+llintOp(op_construct, macro (getOperand, disp__) > doCall(_llint_slow_path_construct, prepareForRegularCall) >+end) >+ > > macro doCallVarargs(frameSlowPath, slowPath, prepareCall) > callSlowPath(frameSlowPath) >@@ -1794,34 +1868,33 @@ macro doCallVarargs(frameSlowPath, slowPath, prepareCall) > slowPathForCall(slowPath, prepareCall) > end > >-_llint_op_call_varargs: >- traceExecution() >+ >+llintOp(op_call_varargs, macro (getOperand, disp__) > doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForRegularCall) >+end) > >-_llint_op_tail_call_varargs: >- traceExecution() >+llintOp(op_tail_call_varargs, macro (getOperand, disp__) > checkSwitchToJITForEpilogue() > # We lie and perform the tail call instead of preparing it since we can't > # prepare the frame for a call opcode > doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForTailCall) >+end) > > >-_llint_op_tail_call_forward_arguments: >- traceExecution() >+llintOp(op_tail_call_forward_arguments, macro (getOperand, disp__) > checkSwitchToJITForEpilogue() > # We lie and perform the tail call instead of preparing it since we can't > # prepare the frame for a call opcode > doCallVarargs(_llint_slow_path_size_frame_for_forward_arguments, _llint_slow_path_tail_call_forward_arguments, prepareForTailCall) >+end) > > >-_llint_op_construct_varargs: >- traceExecution() >+llintOp(op_construct_varargs, macro (getOperand, disp__) > doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_construct_varargs, prepareForRegularCall) >+end) > > >-_llint_op_call_eval: >- traceExecution() >- >+llintOp(op_call_eval, macro (getOperand, disp__) > # Eval is executed in one of two modes: > # > # 1) We find that we're really invoking eval() in which case the >@@ -1856,162 +1929,169 @@ _llint_op_call_eval: > # returns the JS value that the eval returned. > > slowPathForCall(_llint_slow_path_call_eval, prepareForRegularCall) >+end) > > >-_llint_generic_return_point: >+op(llint_generic_return_point, macro (getOperand, disp__) > dispatchAfterCall() >+end) > > >-_llint_op_strcat: >- traceExecution() >+llintOp(op_strcat, macro (getOperand, disp__) > callSlowPath(_slow_path_strcat) >- dispatch(constexpr op_strcat_length) >+ disp__() >+end) > > >-_llint_op_push_with_scope: >- traceExecution() >+llintOp(op_push_with_scope, macro (getOperand, disp__) > callSlowPath(_slow_path_push_with_scope) >- dispatch(constexpr op_push_with_scope_length) >+ disp__() >+end) > > >-_llint_op_identity_with_profile: >- traceExecution() >- dispatch(constexpr op_identity_with_profile_length) >+llintOp(op_identity_with_profile, macro (getOperand, disp__) >+ disp__() >+end) > > >-_llint_op_unreachable: >- traceExecution() >+llintOp(op_unreachable, macro (getOperand, disp__) > callSlowPath(_slow_path_unreachable) >- dispatch(constexpr op_unreachable_length) >+ disp__() >+end) > > >-_llint_op_yield: >+llintOp(op_yield, macro (getOperand, disp__) > notSupported() >+end) > > >-_llint_op_create_lexical_environment: >- traceExecution() >+llintOp(op_create_lexical_environment, macro (getOperand, disp__) > callSlowPath(_slow_path_create_lexical_environment) >- dispatch(constexpr op_create_lexical_environment_length) >+ disp__() >+end) > > >-_llint_op_throw: >- traceExecution() >+llintOp(op_throw, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_throw) >- dispatch(constexpr op_throw_length) >+ disp__() >+end) > > >-_llint_op_throw_static_error: >- traceExecution() >+llintOp(op_throw_static_error, macro (getOperand, disp__) > callSlowPath(_slow_path_throw_static_error) >- dispatch(constexpr op_throw_static_error_length) >+ disp__() >+end) > > >-_llint_op_debug: >- traceExecution() >+llintOp(op_debug, macro (getOperand, disp__) > loadp CodeBlock[cfr], t0 > loadi CodeBlock::m_debuggerRequests[t0], t0 > btiz t0, .opDebugDone > callSlowPath(_llint_slow_path_debug) > .opDebugDone: >- dispatch(constexpr op_debug_length) >+ disp__() >+end) > > >-_llint_native_call_trampoline: >+op(llint_native_call_trampoline, macro (getOperand, disp__) > nativeCallTrampoline(NativeExecutable::m_function) >+end) > > >-_llint_native_construct_trampoline: >+op(llint_native_construct_trampoline, macro (getOperand, disp__) > nativeCallTrampoline(NativeExecutable::m_constructor) >+end) > > >-_llint_internal_function_call_trampoline: >+op(llint_internal_function_call_trampoline, macro (getOperand, disp__) > internalFunctionCallTrampoline(InternalFunction::m_functionForCall) >+end) > > >-_llint_internal_function_construct_trampoline: >+op(llint_internal_function_construct_trampoline, macro (getOperand, disp__) > internalFunctionCallTrampoline(InternalFunction::m_functionForConstruct) >+end) > > >-_llint_op_get_enumerable_length: >- traceExecution() >+llintOp(op_get_enumerable_length, macro (getOperand, disp__) > callSlowPath(_slow_path_get_enumerable_length) >- dispatch(constexpr op_get_enumerable_length_length) >+ disp__() >+end) > >-_llint_op_has_indexed_property: >- traceExecution() >+llintOp(op_has_indexed_property, macro (getOperand, disp__) > callSlowPath(_slow_path_has_indexed_property) >- dispatch(constexpr op_has_indexed_property_length) >+ disp__() >+end) > >-_llint_op_has_structure_property: >- traceExecution() >+llintOp(op_has_structure_property, macro (getOperand, disp__) > callSlowPath(_slow_path_has_structure_property) >- dispatch(constexpr op_has_structure_property_length) >+ disp__() >+end) > >-_llint_op_has_generic_property: >- traceExecution() >+llintOp(op_has_generic_property, macro (getOperand, disp__) > callSlowPath(_slow_path_has_generic_property) >- dispatch(constexpr op_has_generic_property_length) >+ disp__() >+end) > >-_llint_op_get_direct_pname: >- traceExecution() >+llintOp(op_get_direct_pname, macro (getOperand, disp__) > callSlowPath(_slow_path_get_direct_pname) >- dispatch(constexpr op_get_direct_pname_length) >+ disp__() >+end) > >-_llint_op_get_property_enumerator: >- traceExecution() >+llintOp(op_get_property_enumerator, macro (getOperand, disp__) > callSlowPath(_slow_path_get_property_enumerator) >- dispatch(constexpr op_get_property_enumerator_length) >+ disp__() >+end) > >-_llint_op_enumerator_structure_pname: >- traceExecution() >+llintOp(op_enumerator_structure_pname, macro (getOperand, disp__) > callSlowPath(_slow_path_next_structure_enumerator_pname) >- dispatch(constexpr op_enumerator_structure_pname_length) >+ disp__() >+end) > >-_llint_op_enumerator_generic_pname: >- traceExecution() >+llintOp(op_enumerator_generic_pname, macro (getOperand, disp__) > callSlowPath(_slow_path_next_generic_enumerator_pname) >- dispatch(constexpr op_enumerator_generic_pname_length) >+ disp__() >+end) > >-_llint_op_to_index_string: >- traceExecution() >+llintOp(op_to_index_string, macro (getOperand, disp__) > callSlowPath(_slow_path_to_index_string) >- dispatch(constexpr op_to_index_string_length) >+ disp__() >+end) > >-_llint_op_create_rest: >- traceExecution() >+llintOp(op_create_rest, macro (getOperand, disp__) > callSlowPath(_slow_path_create_rest) >- dispatch(constexpr op_create_rest_length) >+ disp__() >+end) > >-_llint_op_instanceof: >- traceExecution() >+llintOp(op_instanceof, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_instanceof) >- dispatch(constexpr op_instanceof_length) >+ disp__() >+end) > >-_llint_op_get_by_id_with_this: >- traceExecution() >+llintOp(op_get_by_id_with_this, macro (getOperand, disp__) > callSlowPath(_slow_path_get_by_id_with_this) >- dispatch(constexpr op_get_by_id_with_this_length) >+ disp__() >+end) > >-_llint_op_get_by_val_with_this: >- traceExecution() >+llintOp(op_get_by_val_with_this, macro (getOperand, disp__) > callSlowPath(_slow_path_get_by_val_with_this) >- dispatch(constexpr op_get_by_val_with_this_length) >+ disp__() >+end) > >-_llint_op_put_by_id_with_this: >- traceExecution() >+llintOp(op_put_by_id_with_this, macro (getOperand, disp__) > callSlowPath(_slow_path_put_by_id_with_this) >- dispatch(constexpr op_put_by_id_with_this_length) >+ disp__() >+end) > >-_llint_op_put_by_val_with_this: >- traceExecution() >+llintOp(op_put_by_val_with_this, macro (getOperand, disp__) > callSlowPath(_slow_path_put_by_val_with_this) >- dispatch(constexpr op_put_by_val_with_this_length) >+ disp__() >+end) > >-_llint_op_resolve_scope_for_hoisting_func_decl_in_eval: >- traceExecution() >+llintOp(op_resolve_scope_for_hoisting_func_decl_in_eval, macro (getOperand, disp__) > callSlowPath(_slow_path_resolve_scope_for_hoisting_func_decl_in_eval) >- dispatch(constexpr op_resolve_scope_for_hoisting_func_decl_in_eval_length) >+ disp__() >+end) > > # Lastly, make sure that we can link even though we don't support all opcodes. > # These opcodes should never arise when using LLInt or either JIT. We assert >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >index 78bff0884c4802939a4de860f76b582eaa9a4265..828fc4f85ac7495a75f5cf9b4ae8fb6d68669ebd 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >@@ -108,13 +108,20 @@ using namespace JSC::LLInt; > > #define OFFLINE_ASM_GLOBAL_LABEL(label) label: USE_LABEL(label); > >+#if ENABLE(LABEL_TRACING) >+#define TRACE_LABEL(prefix, label) dataLog(#prefix, ": ", #label, "\n") >+#else >+#define TRACE_LABEL(prefix, label) do { } while (false); >+#endif >+ >+ > #if ENABLE(COMPUTED_GOTO_OPCODES) >-#define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label); >+#define OFFLINE_ASM_GLUE_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_GLUE_LABEL", label); USE_LABEL(label); > #else > #define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label); > #endif > >-#define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label); >+#define OFFLINE_ASM_LOCAL_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_LOCAL_LABEL", #label); USE_LABEL(label); > > > //============================================================================ >@@ -238,7 +245,7 @@ struct CLoopRegister { > EncodedJSValue encodedJSValue; > double castToDouble; > #endif >- Opcode opcode; >+ OpcodeID opcode; > }; > > operator ExecState*() { return execState; } >@@ -288,8 +295,8 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > // can depend on the opcodeMap. > Instruction* exceptionInstructions = LLInt::exceptionInstructions(); > for (int i = 0; i < maxOpcodeLength + 1; ++i) >- exceptionInstructions[i].u.pointer = >- LLInt::getCodePtr(llint_throw_from_slow_path_trampoline); >+ exceptionInstructions[i].u.unsignedValue = >+ llint_throw_from_slow_path_trampoline; > > return JSValue(); > } >@@ -353,7 +360,7 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > CLoopStack& cloopStack = vm->interpreter->cloopStack(); > StackPointerScope stackPointerScope(cloopStack); > >- lr.opcode = getOpcode(llint_return_to_host); >+ lr.opcode = llint_return_to_host; > sp.vp = cloopStack.currentStackPointer(); > cfr.callFrame = vm->topCallFrame; > #ifndef NDEBUG >@@ -376,7 +383,7 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > // Interpreter variables for value passing between opcodes and/or helpers: > NativeFunction nativeFunc = nullptr; > JSValue functionReturnValue; >- Opcode opcode = getOpcode(entryOpcodeID); >+ OpcodeID opcode = entryOpcodeID; > > #define PUSH(cloopReg) \ > do { \ >@@ -399,7 +406,7 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > #if USE(JSVALUE32_64) > #define FETCH_OPCODE() pc.opcode > #else // USE(JSVALUE64) >-#define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8) >+#define FETCH_OPCODE() *bitwise_cast<OpcodeID*>(pcBase.i8p + pc.i * 8) > #endif // USE(JSVALUE64) > > #define NEXT_INSTRUCTION() \ >@@ -413,7 +420,7 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > //======================================================================== > // Loop dispatch mechanism using computed goto statements: > >- #define DISPATCH_OPCODE() goto *opcode >+ #define DISPATCH_OPCODE() goto *getOpcode(opcode); > > #define DEFINE_OPCODE(__opcode) \ > __opcode: \ >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >index f867597fc46e531a385561f271b871a98422bab0..535c1289df9246581f442251168a10d643734977 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >@@ -23,23 +23,6 @@ > > > # Utilities. >-macro jumpToInstruction() >- jmp [PB, PC, 8], BytecodePtrTag >-end >- >-macro dispatch(advance) >- addp advance, PC >- jumpToInstruction() >-end >- >-macro dispatchInt(advance) >- addi advance, PC >- jumpToInstruction() >-end >- >-macro dispatchIntIndirect(offset) >- dispatchInt(offset * 8[PB, PC, 8]) >-end > > macro dispatchAfterCall() > loadi ArgumentCount + TagOffset[cfr], PC >@@ -225,7 +208,7 @@ macro doVMEntry(makeCall) > > checkStackPointerAlignment(extraTempReg, 0xbad0dc02) > >- makeCall(entry, t3) >+ makeCall(entry, t3, t4) > > # We may have just made a call into a JS function, so we can't rely on sp > # for anything but the fact that our own locals (ie the VMEntryRecord) are >@@ -249,7 +232,7 @@ macro doVMEntry(makeCall) > end > > >-macro makeJavaScriptCall(entry, temp) >+macro makeJavaScriptCall(entry, temp, unused) > addp 16, sp > if C_LOOP > cloopCallJSFunction entry >@@ -259,8 +242,7 @@ macro makeJavaScriptCall(entry, temp) > subp 16, sp > end > >- >-macro makeHostFunctionCall(entry, temp) >+macro makeHostFunctionCall(entry, temp, unused) > move entry, temp > storep cfr, [sp] > move sp, a0 >@@ -277,7 +259,7 @@ macro makeHostFunctionCall(entry, temp) > end > end > >-_handleUncaughtException: >+op(handleUncaughtException, macro (getOperand, disp__) > loadp Callee[cfr], t3 > andp MarkedBlockMask, t3 > loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t3], t3 >@@ -299,6 +281,7 @@ _handleUncaughtException: > popCalleeSaves() > functionEpilogue() > ret >+end) > > > macro prepareStateForCCall() >@@ -591,8 +574,15 @@ end > > > # Instruction implementations >-_llint_op_enter: >+_llint_op_wide: >+ traceExecution() >+ dispatchWide(constexpr op_wide_length) >+ >+_llint_op_wide_wide: > traceExecution() >+ crash() >+ >+llintOp(op_enter, macro (getOperand, disp__) > checkStackPointerAlignment(t2, 0xdead00e1) > loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock > loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars >@@ -609,11 +599,11 @@ _llint_op_enter: > btqnz t2, .opEnterLoop > .opEnterDone: > callSlowPath(_slow_path_enter) >- dispatch(constexpr op_enter_length) >+ disp__() >+end) > > >-_llint_op_get_argument: >- traceExecution() >+llintOp(op_get_argument, macro (getOperand, disp__) > loadisFromInstruction(1, t1) > loadisFromInstruction(2, t2) > loadi PayloadOffset + ArgumentCount[cfr], t0 >@@ -621,35 +611,35 @@ _llint_op_get_argument: > loadq ThisArgumentOffset[cfr, t2, 8], t0 > storeq t0, [cfr, t1, 8] > valueProfile(t0, 3, t2) >- dispatch(constexpr op_get_argument_length) >+ disp__() > > .opGetArgumentOutOfBounds: > storeq ValueUndefined, [cfr, t1, 8] > valueProfile(ValueUndefined, 3, t2) >- dispatch(constexpr op_get_argument_length) >+ disp__() >+end) > > >-_llint_op_argument_count: >- traceExecution() >- loadisFromInstruction(1, t1) >+llintOp(op_argument_count, macro (getOperand, disp__) >+ getOperand(1, t1) > loadi PayloadOffset + ArgumentCount[cfr], t0 > subi 1, t0 > orq TagTypeNumber, t0 > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_argument_count_length) >+ disp__() >+end) > > >-_llint_op_get_scope: >- traceExecution() >+llintOp(op_get_scope, macro (getOperand, disp__) > loadp Callee[cfr], t0 > loadp JSCallee::m_scope[t0], t0 > loadisFromInstruction(1, t1) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_scope_length) >+ disp__() >+end) > > >-_llint_op_to_this: >- traceExecution() >+llintOp(op_to_this, macro (getOperand, disp__) > loadisFromInstruction(1, t0) > loadq [cfr, t0, 8], t0 > btqnz t0, tagMask, .opToThisSlow >@@ -657,47 +647,48 @@ _llint_op_to_this: > loadStructureWithScratch(t0, t1, t2, t3) > loadpFromInstruction(2, t2) > bpneq t1, t2, .opToThisSlow >- dispatch(constexpr op_to_this_length) >+ disp__() > > .opToThisSlow: > callSlowPath(_slow_path_to_this) >- dispatch(constexpr op_to_this_length) >+ disp__() >+end) > > >-_llint_op_check_tdz: >- traceExecution() >- loadisFromInstruction(1, t0) >+llintOp(op_check_tdz, macro (getOperand, disp__) >+ getOperand(1, t0) > loadConstantOrVariable(t0, t1) > bqneq t1, ValueEmpty, .opNotTDZ > callSlowPath(_slow_path_throw_tdz_error) > > .opNotTDZ: >- dispatch(constexpr op_check_tdz_length) >+ disp__() >+end) > > >-_llint_op_mov: >- traceExecution() >- loadisFromInstruction(2, t1) >- loadisFromInstruction(1, t0) >+llintOp(op_mov, macro (getOperand, disp__) >+ getOperand(2, t1) >+ getOperand(1, t0) > loadConstantOrVariable(t1, t2) > storeq t2, [cfr, t0, 8] >- dispatch(constexpr op_mov_length) >+ disp__() >+end) > > >-_llint_op_not: >- traceExecution() >- loadisFromInstruction(2, t0) >- loadisFromInstruction(1, t1) >+llintOp(op_not, macro (getOperand, disp__) >+ getOperand(2, t0) >+ getOperand(1, t1) > loadConstantOrVariable(t0, t2) > xorq ValueFalse, t2 > btqnz t2, ~1, .opNotSlow > xorq ValueTrue, t2 > storeq t2, [cfr, t1, 8] >- dispatch(constexpr op_not_length) >+ disp__() > > .opNotSlow: > callSlowPath(_slow_path_not) >- dispatch(constexpr op_not_length) >+ disp__() >+end) > > > macro equalityComparison(integerComparison, slowPath) >@@ -726,7 +717,7 @@ macro equalityJump(integerComparison, slowPath) > dispatch(constexpr op_jeq_length) > > .jumpTarget: >- dispatchIntIndirect(3) >+ dispatchIndirect(3) > > .slow: > callSlowPath(slowPath) >@@ -753,22 +744,22 @@ macro equalNullComparison() > .done: > end > >-_llint_op_eq_null: >- traceExecution() >+llintOp(op_eq_null, macro (getOperand, disp__) > equalNullComparison() >- loadisFromInstruction(1, t1) >+ getOperand(1, t1) > orq ValueFalse, t0 > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_eq_null_length) >+ disp__() >+end) > > >-_llint_op_neq_null: >- traceExecution() >+llintOp(op_neq_null, macro (getOperand, disp__) > equalNullComparison() > loadisFromInstruction(1, t1) > xorq ValueTrue, t0 > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_neq_null_length) >+ disp__() >+end) > > > macro strictEq(equalityOperation, slowPath) >@@ -812,47 +803,46 @@ macro strictEqualityJump(equalityOperation, slowPath) > btqnz t1, tagTypeNumber, .slow > .rightOK: > equalityOperation(t0, t1, .jumpTarget) >- dispatch(constexpr op_jstricteq_length) >+ dispatch(4) > > .jumpTarget: >- dispatchIntIndirect(3) >+ dispatchIndirect(3) > > .slow: > callSlowPath(slowPath) >- dispatch(0) >+ dispatch(4) > end > > >-_llint_op_stricteq: >- traceExecution() >+llintOp(op_stricteq, macro (getOperand, disp__) > strictEq( > macro (left, right, result) cqeq left, right, result end, > _slow_path_stricteq) >+end) > > >-_llint_op_nstricteq: >- traceExecution() >+llintOp(op_nstricteq, macro (getOperand, disp__) > strictEq( > macro (left, right, result) cqneq left, right, result end, > _slow_path_nstricteq) >+end) > > >-_llint_op_jstricteq: >- traceExecution() >+llintOp(op_jstricteq, macro (getOperand, disp__) > strictEqualityJump( > macro (left, right, target) bqeq left, right, target end, > _llint_slow_path_jstricteq) >+end) > > >-_llint_op_jnstricteq: >- traceExecution() >+llintOp(op_jnstricteq, macro (getOperand, disp__) > strictEqualityJump( > macro (left, right, target) bqneq left, right, target end, > _llint_slow_path_jnstricteq) >+end) > > > macro preOp(arithmeticOperation, slowPath) >- traceExecution() > loadisFromInstruction(1, t0) > loadq [cfr, t0, 8], t1 > bqb t1, tagTypeNumber, .slow >@@ -866,20 +856,21 @@ macro preOp(arithmeticOperation, slowPath) > dispatch(2) > end > >-_llint_op_inc: >+llintOp(op_inc, macro (getOperand, disp__) > preOp( > macro (value, slow) baddio 1, value, slow end, > _slow_path_inc) >+end) > > >-_llint_op_dec: >+llintOp(op_dec, macro (getOperand, disp__) > preOp( > macro (value, slow) bsubio 1, value, slow end, > _slow_path_dec) >+end) > > >-_llint_op_to_number: >- traceExecution() >+llintOp(op_to_number, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadisFromInstruction(1, t1) > loadConstantOrVariable(t0, t2) >@@ -888,15 +879,15 @@ _llint_op_to_number: > .opToNumberIsImmediate: > storeq t2, [cfr, t1, 8] > valueProfile(t2, 3, t0) >- dispatch(constexpr op_to_number_length) >+ disp__() > > .opToNumberSlow: > callSlowPath(_slow_path_to_number) >- dispatch(constexpr op_to_number_length) >+ disp__() >+end) > > >-_llint_op_to_string: >- traceExecution() >+llintOp(op_to_string, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) >@@ -904,15 +895,15 @@ _llint_op_to_string: > bbneq JSCell::m_type[t0], StringType, .opToStringSlow > .opToStringIsString: > storeq t0, [cfr, t2, 8] >- dispatch(constexpr op_to_string_length) >+ disp__() > > .opToStringSlow: > callSlowPath(_slow_path_to_string) >- dispatch(constexpr op_to_string_length) >+ disp__() >+end) > > >-_llint_op_to_object: >- traceExecution() >+llintOp(op_to_object, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadisFromInstruction(1, t1) > loadConstantOrVariable(t0, t2) >@@ -920,15 +911,15 @@ _llint_op_to_object: > bbb JSCell::m_type[t2], ObjectType, .opToObjectSlow > storeq t2, [cfr, t1, 8] > valueProfile(t2, 4, t0) >- dispatch(constexpr op_to_object_length) >+ disp__() > > .opToObjectSlow: > callSlowPath(_slow_path_to_object) >- dispatch(constexpr op_to_object_length) >+ disp__() >+end) > > >-_llint_op_negate: >- traceExecution() >+llintOp(op_negate, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadisFromInstruction(1, t1) > loadConstantOrVariable(t0, t3) >@@ -940,18 +931,19 @@ _llint_op_negate: > orq tagTypeNumber, t3 > storeisToInstruction(t2, 3) > storeq t3, [cfr, t1, 8] >- dispatch(constexpr op_negate_length) >+ disp__() > .opNegateNotInt: > btqz t3, tagTypeNumber, .opNegateSlow > xorq 0x8000000000000000, t3 > ori ArithProfileNumber, t2 > storeq t3, [cfr, t1, 8] > storeisToInstruction(t2, 3) >- dispatch(constexpr op_negate_length) >+ disp__() > > .opNegateSlow: > callSlowPath(_slow_path_negate) >- dispatch(constexpr op_negate_length) >+ disp__() >+end) > > > macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath) >@@ -1025,16 +1017,15 @@ macro binaryOp(integerOperation, doubleOperation, slowPath) > doubleOperation, slowPath) > end > >-_llint_op_add: >- traceExecution() >+llintOp(op_add, macro (getOperand, disp__) > binaryOp( > macro (left, right, slow) baddio left, right, slow end, > macro (left, right) addd left, right end, > _slow_path_add) >+end) > > >-_llint_op_mul: >- traceExecution() >+llintOp(op_mul, macro (getOperand, disp__) > binaryOpCustomStore( > macro (left, right, slow, index) > # Assume t3 is scratchable. >@@ -1049,18 +1040,18 @@ _llint_op_mul: > end, > macro (left, right) muld left, right end, > _slow_path_mul) >+end) > > >-_llint_op_sub: >- traceExecution() >+llintOp(op_sub, macro (getOperand, disp__) > binaryOp( > macro (left, right, slow) bsubio left, right, slow end, > macro (left, right) subd left, right end, > _slow_path_sub) >+end) > > >-_llint_op_div: >- traceExecution() >+llintOp(op_div, macro (getOperand, disp__) > if X86_64 or X86_64_WIN > binaryOpCustomStore( > macro (left, right, slow, index) >@@ -1084,8 +1075,9 @@ _llint_op_div: > _slow_path_div) > else > callSlowPath(_slow_path_div) >- dispatch(constexpr op_div_length) >+ disp__() > end >+end) > > > macro bitOp(operation, slowPath, advance) >@@ -1106,109 +1098,108 @@ macro bitOp(operation, slowPath, advance) > dispatch(advance) > end > >-_llint_op_lshift: >- traceExecution() >+llintOp(op_lshift, macro (getOperand, disp__) > bitOp( > macro (left, right) lshifti left, right end, > _slow_path_lshift, > constexpr op_lshift_length) >+end) > > >-_llint_op_rshift: >- traceExecution() >+llintOp(op_rshift, macro (getOperand, disp__) > bitOp( > macro (left, right) rshifti left, right end, > _slow_path_rshift, > constexpr op_rshift_length) >+end) > > >-_llint_op_urshift: >- traceExecution() >+llintOp(op_urshift, macro (getOperand, disp__) > bitOp( > macro (left, right) urshifti left, right end, > _slow_path_urshift, > constexpr op_urshift_length) >+end) > > >-_llint_op_unsigned: >- traceExecution() >+llintOp(op_unsigned, macro (getOperand, disp__) > loadisFromInstruction(1, t0) > loadisFromInstruction(2, t1) > loadConstantOrVariable(t1, t2) > bilt t2, 0, .opUnsignedSlow > storeq t2, [cfr, t0, 8] >- dispatch(constexpr op_unsigned_length) >+ disp__() > .opUnsignedSlow: > callSlowPath(_slow_path_unsigned) >- dispatch(constexpr op_unsigned_length) >+ disp__() >+end) > > >-_llint_op_bitand: >- traceExecution() >+llintOp(op_bitand, macro (getOperand, disp__) > bitOp( > macro (left, right) andi left, right end, > _slow_path_bitand, > constexpr op_bitand_length) >+end) > > >-_llint_op_bitxor: >- traceExecution() >+llintOp(op_bitxor, macro (getOperand, disp__) > bitOp( > macro (left, right) xori left, right end, > _slow_path_bitxor, > constexpr op_bitxor_length) >+end) > > >-_llint_op_bitor: >- traceExecution() >+llintOp(op_bitor, macro (getOperand, disp__) > bitOp( > macro (left, right) ori left, right end, > _slow_path_bitor, > constexpr op_bitor_length) >+end) > > >-_llint_op_overrides_has_instance: >- traceExecution() >- loadisFromStruct(OpOverridesHasInstance::m_dst, t3) >+llintOp(op_overrides_has_instance, macro (getOperand, disp__) >+ loadisFromStruct(OpOverridesHasInstance::dst, t3) > >- loadisFromStruct(OpOverridesHasInstance::m_hasInstanceValue, t1) >+ loadisFromStruct(OpOverridesHasInstance::hasInstanceValue, t1) > loadConstantOrVariable(t1, t0) > loadp CodeBlock[cfr], t2 > loadp CodeBlock::m_globalObject[t2], t2 > loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t2], t2 > bqneq t0, t2, .opOverridesHasInstanceNotDefaultSymbol > >- loadisFromStruct(OpOverridesHasInstance::m_constructor, t1) >+ loadisFromStruct(OpOverridesHasInstance::constructor, t1) > loadConstantOrVariable(t1, t0) > tbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, t1 > orq ValueFalse, t1 > storeq t1, [cfr, t3, 8] >- dispatch(constexpr op_overrides_has_instance_length) >+ disp__() > > .opOverridesHasInstanceNotDefaultSymbol: > storeq ValueTrue, [cfr, t3, 8] >- dispatch(constexpr op_overrides_has_instance_length) >+ disp__() >+end) > > >-_llint_op_instanceof_custom: >- traceExecution() >+llintOp(op_instanceof_custom, macro (getOperand, disp__) > callSlowPath(_llint_slow_path_instanceof_custom) >- dispatch(constexpr op_instanceof_custom_length) >+ disp__() >+end) > > >-_llint_op_is_empty: >- traceExecution() >+llintOp(op_is_empty, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) > cqeq t0, ValueEmpty, t3 > orq ValueFalse, t3 > storeq t3, [cfr, t2, 8] >- dispatch(constexpr op_is_empty_length) >+ disp__() >+end) > > >-_llint_op_is_undefined: >- traceExecution() >+llintOp(op_is_undefined, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) >@@ -1216,12 +1207,12 @@ _llint_op_is_undefined: > cqeq t0, ValueUndefined, t3 > orq ValueFalse, t3 > storeq t3, [cfr, t2, 8] >- dispatch(constexpr op_is_undefined_length) >+ disp__() > .opIsUndefinedCell: > btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined > move ValueFalse, t1 > storeq t1, [cfr, t2, 8] >- dispatch(constexpr op_is_undefined_length) >+ disp__() > .masqueradesAsUndefined: > loadStructureWithScratch(t0, t3, t1, t5) > loadp CodeBlock[cfr], t1 >@@ -1229,11 +1220,11 @@ _llint_op_is_undefined: > cpeq Structure::m_globalObject[t3], t1, t0 > orq ValueFalse, t0 > storeq t0, [cfr, t2, 8] >- dispatch(constexpr op_is_undefined_length) >+ disp__() >+end) > > >-_llint_op_is_boolean: >- traceExecution() >+llintOp(op_is_boolean, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) >@@ -1241,22 +1232,22 @@ _llint_op_is_boolean: > tqz t0, ~1, t0 > orq ValueFalse, t0 > storeq t0, [cfr, t2, 8] >- dispatch(constexpr op_is_boolean_length) >+ disp__() >+end) > > >-_llint_op_is_number: >- traceExecution() >+llintOp(op_is_number, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) > tqnz t0, tagTypeNumber, t1 > orq ValueFalse, t1 > storeq t1, [cfr, t2, 8] >- dispatch(constexpr op_is_number_length) >+ disp__() >+end) > > >-_llint_op_is_cell_with_type: >- traceExecution() >+llintOp(op_is_cell_with_type, macro (getOperand, disp__) > loadisFromInstruction(3, t0) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) >@@ -1265,14 +1256,14 @@ _llint_op_is_cell_with_type: > cbeq JSCell::m_type[t3], t0, t1 > orq ValueFalse, t1 > storeq t1, [cfr, t2, 8] >- dispatch(constexpr op_is_cell_with_type_length) >+ disp__() > .notCellCase: > storeq ValueFalse, [cfr, t2, 8] >- dispatch(constexpr op_is_cell_with_type_length) >+ disp__() >+end) > > >-_llint_op_is_object: >- traceExecution() >+llintOp(op_is_object, macro (getOperand, disp__) > loadisFromInstruction(2, t1) > loadisFromInstruction(1, t2) > loadConstantOrVariable(t1, t0) >@@ -1280,10 +1271,11 @@ _llint_op_is_object: > cbaeq JSCell::m_type[t0], ObjectType, t1 > orq ValueFalse, t1 > storeq t1, [cfr, t2, 8] >- dispatch(constexpr op_is_object_length) >+ disp__() > .opIsObjectNotCell: > storeq ValueFalse, [cfr, t2, 8] >- dispatch(constexpr op_is_object_length) >+ disp__() >+end) > > > macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value) >@@ -1312,8 +1304,7 @@ macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value > end > > >-_llint_op_get_by_id_direct: >- traceExecution() >+llintOp(op_get_by_id_direct, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadConstantOrVariableCell(t0, t3, .opGetByIdDirectSlow) > loadi JSCell::m_structureID[t3], t1 >@@ -1324,15 +1315,15 @@ _llint_op_get_by_id_direct: > loadPropertyAtVariableOffset(t1, t3, t0) > storeq t0, [cfr, t2, 8] > valueProfile(t0, 6, t1) >- dispatch(constexpr op_get_by_id_direct_length) >+ disp__() > > .opGetByIdDirectSlow: > callSlowPath(_llint_slow_path_get_by_id_direct) >- dispatch(constexpr op_get_by_id_direct_length) >+ disp__() >+end) > > >-_llint_op_get_by_id: >- traceExecution() >+llintOp(op_get_by_id, macro (getOperand, disp__) > loadisFromInstruction(2, t0) > loadConstantOrVariableCell(t0, t3, .opGetByIdSlow) > loadi JSCell::m_structureID[t3], t1 >@@ -1343,15 +1334,13 @@ _llint_op_get_by_id: > loadPropertyAtVariableOffset(t1, t3, t0) > storeq t0, [cfr, t2, 8] > valueProfile(t0, 8, t1) >- dispatch(constexpr op_get_by_id_length) >+ disp__() > > .opGetByIdSlow: > callSlowPath(_llint_slow_path_get_by_id) >- dispatch(constexpr op_get_by_id_length) >- >+ disp__() > >-_llint_op_get_by_id_proto_load: >- traceExecution() >+.op_get_by_id_proto_load: > loadisFromInstruction(2, t0) > loadConstantOrVariableCell(t0, t3, .opGetByIdProtoSlow) > loadi JSCell::m_structureID[t3], t1 >@@ -1363,15 +1352,13 @@ _llint_op_get_by_id_proto_load: > loadPropertyAtVariableOffset(t1, t3, t0) > storeq t0, [cfr, t2, 8] > valueProfile(t0, 8, t1) >- dispatch(constexpr op_get_by_id_proto_load_length) >+ disp__() > > .opGetByIdProtoSlow: > callSlowPath(_llint_slow_path_get_by_id) >- dispatch(constexpr op_get_by_id_proto_load_length) >+ disp__() > >- >-_llint_op_get_by_id_unset: >- traceExecution() >+.op_get_by_id_unset: > loadisFromInstruction(2, t0) > loadConstantOrVariableCell(t0, t3, .opGetByIdUnsetSlow) > loadi JSCell::m_structureID[t3], t1 >@@ -1380,15 +1367,13 @@ _llint_op_get_by_id_unset: > loadisFromInstruction(1, t2) > storeq ValueUndefined, [cfr, t2, 8] > valueProfile(ValueUndefined, 8, t1) >- dispatch(constexpr op_get_by_id_unset_length) >+ disp__() > > .opGetByIdUnsetSlow: > callSlowPath(_llint_slow_path_get_by_id) >- dispatch(constexpr op_get_by_id_unset_length) >- >+ disp__() > >-_llint_op_get_array_length: >- traceExecution() >+.op_get_array_length: > loadisFromInstruction(2, t0) > loadpFromInstruction(4, t1) > loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow) >@@ -1403,15 +1388,15 @@ _llint_op_get_array_length: > orq tagTypeNumber, t0 > valueProfile(t0, 8, t2) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_array_length_length) >+ disp__() > > .opGetArrayLengthSlow: > callSlowPath(_llint_slow_path_get_by_id) >- dispatch(constexpr op_get_array_length_length) >+ disp__() >+end) > > >-_llint_op_put_by_id: >- traceExecution() >+llintOp(op_put_by_id, macro (getOperand, disp__) > loadisFromInstruction(1, t3) > loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) > loadisFromInstruction(4, t2) >@@ -1546,11 +1531,12 @@ _llint_op_put_by_id: > loadisFromInstruction(5, t1) > storePropertyAtVariableOffset(t1, t0, t2) > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_by_id_length) >+ disp__() > > .opPutByIdSlow: > callSlowPath(_llint_slow_path_put_by_id) >- dispatch(constexpr op_put_by_id_length) >+ disp__() >+end) > > > macro finishGetByVal(result, scratch) >@@ -1571,8 +1557,7 @@ macro finishDoubleGetByVal(result, scratch1, scratch2) > finishGetByVal(scratch1, scratch2) > end > >-_llint_op_get_by_val: >- traceExecution() >+llintOp(op_get_by_val, macro (getOperand, disp__) > loadisFromInstruction(2, t2) > loadConstantOrVariableCell(t2, t0, .opGetByValSlow) > loadpFromInstruction(4, t3) >@@ -1614,7 +1599,7 @@ _llint_op_get_by_val: > .opGetByValDone: > storeq t2, [cfr, t0, 8] > valueProfile(t2, 5, t0) >- dispatch(constexpr op_get_by_val_length) >+ disp__() > > .opGetByValNotIndexedStorage: > # First lets check if we even have a typed array. This lets us do some boilerplate up front. >@@ -1711,7 +1696,8 @@ _llint_op_get_by_val: > > .opGetByValSlow: > callSlowPath(_llint_slow_path_get_by_val) >- dispatch(constexpr op_get_by_val_length) >+ disp__() >+end) > > > macro contiguousPutByVal(storeCallback) >@@ -1806,17 +1792,18 @@ macro putByVal(slowPath) > dispatch(5) > end > >-_llint_op_put_by_val: >+llintOp(op_put_by_val, macro (getOperand, disp__) > putByVal(_llint_slow_path_put_by_val) >+end) > >-_llint_op_put_by_val_direct: >+llintOp(op_put_by_val_direct, macro (getOperand, disp__) > putByVal(_llint_slow_path_put_by_val_direct) >+end) > > >-_llint_op_jmp: >- traceExecution() >- dispatchIntIndirect(1) >- >+llintOp(op_jmp, macro (getOperand, disp__) >+ dispatchIndirect(1) >+end) > > macro jumpTrueOrFalse(conditionOp, slow) > loadisFromInstruction(1, t1) >@@ -1826,7 +1813,7 @@ macro jumpTrueOrFalse(conditionOp, slow) > dispatch(3) > > .target: >- dispatchIntIndirect(2) >+ dispatchIndirect(2) > > .slow: > callSlowPath(slow) >@@ -1844,7 +1831,7 @@ macro equalNull(cellHandler, immediateHandler) > dispatch(3) > > .target: >- dispatchIntIndirect(2) >+ dispatchIndirect(2) > > .immediate: > andq ~TagBitUndefined, t0 >@@ -1852,8 +1839,7 @@ macro equalNull(cellHandler, immediateHandler) > dispatch(3) > end > >-_llint_op_jeq_null: >- traceExecution() >+llintOp(op_jeq_null, macro (getOperand, disp__) > equalNull( > macro (structure, value, target) > btbz value, MasqueradesAsUndefined, .notMasqueradesAsUndefined >@@ -1863,10 +1849,10 @@ _llint_op_jeq_null: > .notMasqueradesAsUndefined: > end, > macro (value, target) bqeq value, ValueNull, target end) >+end) > > >-_llint_op_jneq_null: >- traceExecution() >+llintOp(op_jneq_null, macro (getOperand, disp__) > equalNull( > macro (structure, value, target) > btbz value, MasqueradesAsUndefined, target >@@ -1875,21 +1861,22 @@ _llint_op_jneq_null: > bpneq Structure::m_globalObject[structure], t0, target > end, > macro (value, target) bqneq value, ValueNull, target end) >+end) > > >-_llint_op_jneq_ptr: >- traceExecution() >+llintOp(op_jneq_ptr, macro (getOperand, disp__) > loadisFromInstruction(1, t0) > loadisFromInstruction(2, t1) > loadp CodeBlock[cfr], t2 > loadp CodeBlock::m_globalObject[t2], t2 > loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1 > bpneq t1, [cfr, t0, 8], .opJneqPtrTarget >- dispatch(5) >+ disp__() > > .opJneqPtrTarget: > storei 1, 32[PB, PC, 8] >- dispatchIntIndirect(3) >+ dispatchIndirect(3) >+end) > > > macro compareJump(integerCompare, doubleCompare, slowPath) >@@ -1926,7 +1913,7 @@ macro compareJump(integerCompare, doubleCompare, slowPath) > dispatch(4) > > .jumpTarget: >- dispatchIntIndirect(3) >+ dispatchIndirect(3) > > .slow: > callSlowPath(slowPath) >@@ -1943,7 +1930,7 @@ macro compareUnsignedJump(integerCompare) > dispatch(4) > > .jumpTarget: >- dispatchIntIndirect(3) >+ dispatchIndirect(3) > end > > >@@ -1960,8 +1947,7 @@ macro compareUnsigned(integerCompareAndSet) > end > > >-_llint_op_switch_imm: >- traceExecution() >+llintOp(op_switch_imm, macro (getOperand, disp__) > loadisFromInstruction(3, t2) > loadisFromInstruction(1, t3) > loadConstantOrVariable(t2, t1) >@@ -1981,15 +1967,15 @@ _llint_op_switch_imm: > .opSwitchImmNotInt: > btqnz t1, tagTypeNumber, .opSwitchImmSlow # Go slow if it's a double. > .opSwitchImmFallThrough: >- dispatchIntIndirect(2) >+ dispatchIndirect(2) > > .opSwitchImmSlow: > callSlowPath(_llint_slow_path_switch_imm) >- dispatch(0) >+ disp__() >+end) > > >-_llint_op_switch_char: >- traceExecution() >+llintOp(op_switch_char, macro (getOperand, disp__) > loadisFromInstruction(3, t2) > loadisFromInstruction(1, t3) > loadConstantOrVariable(t2, t1) >@@ -2018,11 +2004,12 @@ _llint_op_switch_char: > dispatch(t1) > > .opSwitchCharFallThrough: >- dispatchIntIndirect(2) >+ dispatchIndirect(2) > > .opSwitchOnRope: > callSlowPath(_llint_slow_path_switch_char) >- dispatch(0) >+ disp__() >+end) > > > macro arrayProfileForCall() >@@ -2068,16 +2055,15 @@ macro doCall(slowPath, prepareCall) > slowPathForCall(slowPath, prepareCall) > end > >-_llint_op_ret: >- traceExecution() >+llintOp(op_ret, macro (getOperand, disp__) > checkSwitchToJITForEpilogue() > loadisFromInstruction(1, t2) > loadConstantOrVariable(t2, r0) > doReturn() >+end) > > >-_llint_op_to_primitive: >- traceExecution() >+llintOp(op_to_primitive, macro (getOperand, disp__) > loadisFromInstruction(2, t2) > loadisFromInstruction(1, t3) > loadConstantOrVariable(t2, t0) >@@ -2085,14 +2071,15 @@ _llint_op_to_primitive: > bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase > .opToPrimitiveIsImm: > storeq t0, [cfr, t3, 8] >- dispatch(constexpr op_to_primitive_length) >+ disp__() > > .opToPrimitiveSlowCase: > callSlowPath(_slow_path_to_primitive) >- dispatch(constexpr op_to_primitive_length) >+ disp__() >+end) > > >-_llint_op_catch: >+llintOp(op_catch, macro (getOperand, disp__) > # This is where we end up from the JIT's throw trampoline (because the > # machine code return address will be set to _llint_op_catch), and from > # the interpreter's throw trampoline (see _llint_throw_trampoline). >@@ -2135,19 +2122,20 @@ _llint_op_catch: > > callSlowPath(_llint_slow_path_profile_catch) > >- dispatch(constexpr op_catch_length) >+ disp__() >+end) > > >-_llint_op_end: >- traceExecution() >+llintOp(op_end, macro (getOperand, disp__) > checkSwitchToJITForEpilogue() > loadisFromInstruction(1, t0) > assertNotConstant(t0) > loadq [cfr, t0, 8], r0 > doReturn() >+end) > > >-_llint_throw_from_slow_path_trampoline: >+op(llint_throw_from_slow_path_trampoline, macro (getOperand, disp__) > loadp Callee[cfr], t1 > andp MarkedBlockMask, t1 > loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1 >@@ -2162,11 +2150,13 @@ _llint_throw_from_slow_path_trampoline: > andp MarkedBlockMask, t1 > loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1 > jmp VM::targetMachinePCForThrow[t1], ExceptionHandlerPtrTag >+end) > > >-_llint_throw_during_call_trampoline: >+op(llint_throw_during_call_trampoline, macro (getOperand, disp__) > preserveReturnAddressAfterCall(t2) > jmp _llint_throw_from_slow_path_trampoline >+end) > > > macro nativeCallTrampoline(executableOffsetToFunction) >@@ -2288,62 +2278,62 @@ macro resolveScope() > end > > >-_llint_op_resolve_scope: >- traceExecution() >+llintOp(op_resolve_scope, macro (getOperand, disp__) > loadisFromInstruction(4, t0) > > #rGlobalProperty: > bineq t0, GlobalProperty, .rGlobalVar > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalVar: > bineq t0, GlobalVar, .rGlobalLexicalVar > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalLexicalVar: > bineq t0, GlobalLexicalVar, .rClosureVar > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rClosureVar: > bineq t0, ClosureVar, .rModuleVar > resolveScope() >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rModuleVar: > bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalPropertyWithVarInjectionChecks: > bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks > varInjectionCheck(.rDynamic) > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalVarWithVarInjectionChecks: > bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks > varInjectionCheck(.rDynamic) > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rGlobalLexicalVarWithVarInjectionChecks: > bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks > varInjectionCheck(.rDynamic) > getConstantScope(1) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rClosureVarWithVarInjectionChecks: > bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic > varInjectionCheck(.rDynamic) > resolveScope() >- dispatch(constexpr op_resolve_scope_length) >+ disp__() > > .rDynamic: > callSlowPath(_slow_path_resolve_scope) >- dispatch(constexpr op_resolve_scope_length) >+ disp__() >+end) > > > macro loadWithStructureCheck(operand, slowPath) >@@ -2379,8 +2369,7 @@ macro getClosureVar() > storeq t0, [cfr, t1, 8] > end > >-_llint_op_get_from_scope: >- traceExecution() >+llintOp(op_get_from_scope, macro (getOperand, disp__) > loadisFromInstruction(4, t0) > andi ResolveTypeMask, t0 > >@@ -2388,12 +2377,12 @@ _llint_op_get_from_scope: > bineq t0, GlobalProperty, .gGlobalVar > loadWithStructureCheck(2, .gDynamic) > getProperty() >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalVar: > bineq t0, GlobalVar, .gGlobalLexicalVar > getGlobalVar(macro(v) end) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalLexicalVar: > bineq t0, GlobalLexicalVar, .gClosureVar >@@ -2401,25 +2390,25 @@ _llint_op_get_from_scope: > macro (value) > bqeq value, ValueEmpty, .gDynamic > end) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gClosureVar: > bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks > loadVariable(2, t0) > getClosureVar() >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalPropertyWithVarInjectionChecks: > bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks > loadWithStructureCheck(2, .gDynamic) > getProperty() >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalVarWithVarInjectionChecks: > bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks > varInjectionCheck(.gDynamic) > getGlobalVar(macro(v) end) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gGlobalLexicalVarWithVarInjectionChecks: > bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks >@@ -2428,18 +2417,19 @@ _llint_op_get_from_scope: > macro (value) > bqeq value, ValueEmpty, .gDynamic > end) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gClosureVarWithVarInjectionChecks: > bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic > varInjectionCheck(.gDynamic) > loadVariable(2, t0) > getClosureVar() >- dispatch(constexpr op_get_from_scope_length) >+ disp__() > > .gDynamic: > callSlowPath(_llint_slow_path_get_from_scope) >- dispatch(constexpr op_get_from_scope_length) >+ disp__() >+end) > > > macro putProperty() >@@ -2488,8 +2478,7 @@ macro checkTDZInGlobalPutToScopeIfNecessary() > end > > >-_llint_op_put_to_scope: >- traceExecution() >+llintOp(op_put_to_scope, macro (getOperand, disp__) > loadisFromInstruction(4, t0) > andi ResolveTypeMask, t0 > >@@ -2498,48 +2487,48 @@ _llint_op_put_to_scope: > loadVariable(1, t0) > putLocalClosureVar() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalProperty: > bineq t0, GlobalProperty, .pGlobalVar > loadWithStructureCheck(1, .pDynamic) > putProperty() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalVar: > bineq t0, GlobalVar, .pGlobalLexicalVar > writeBarrierOnGlobalObject(3) > putGlobalVariable() >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalLexicalVar: > bineq t0, GlobalLexicalVar, .pClosureVar > writeBarrierOnGlobalLexicalEnvironment(3) > checkTDZInGlobalPutToScopeIfNecessary() > putGlobalVariable() >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pClosureVar: > bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks > loadVariable(1, t0) > putClosureVar() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalPropertyWithVarInjectionChecks: > bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks > loadWithStructureCheck(1, .pDynamic) > putProperty() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalVarWithVarInjectionChecks: > bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks > writeBarrierOnGlobalObject(3) > varInjectionCheck(.pDynamic) > putGlobalVariable() >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pGlobalLexicalVarWithVarInjectionChecks: > bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks >@@ -2547,7 +2536,7 @@ _llint_op_put_to_scope: > varInjectionCheck(.pDynamic) > checkTDZInGlobalPutToScopeIfNecessary() > putGlobalVariable() >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pClosureVarWithVarInjectionChecks: > bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar >@@ -2555,51 +2544,51 @@ _llint_op_put_to_scope: > loadVariable(1, t0) > putClosureVar() > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pModuleVar: > bineq t0, ModuleVar, .pDynamic > callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() > > .pDynamic: > callSlowPath(_llint_slow_path_put_to_scope) >- dispatch(constexpr op_put_to_scope_length) >+ disp__() >+end) > > >-_llint_op_get_from_arguments: >- traceExecution() >+llintOp(op_get_from_arguments, macro (getOperand, disp__) > loadVariable(2, t0) > loadi 24[PB, PC, 8], t1 > loadq DirectArguments_storage[t0, t1, 8], t0 > valueProfile(t0, 4, t1) > loadisFromInstruction(1, t1) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_from_arguments_length) >+ disp__() >+end) > > >-_llint_op_put_to_arguments: >- traceExecution() >+llintOp(op_put_to_arguments, macro (getOperand, disp__) > loadVariable(1, t0) > loadi 16[PB, PC, 8], t1 > loadisFromInstruction(3, t3) > loadConstantOrVariable(t3, t2) > storeq t2, DirectArguments_storage[t0, t1, 8] > writeBarrierOnOperands(1, 3) >- dispatch(constexpr op_put_to_arguments_length) >+ disp__() >+end) > > >-_llint_op_get_parent_scope: >- traceExecution() >+llintOp(op_get_parent_scope, macro (getOperand, disp__) > loadVariable(2, t0) > loadp JSScope::m_next[t0], t0 > loadisFromInstruction(1, t1) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_parent_scope_length) >+ disp__() >+end) > > >-_llint_op_profile_type: >- traceExecution() >+llintOp(op_profile_type, macro (getOperand, disp__) > loadp CodeBlock[cfr], t1 > loadp CodeBlock::m_poisonedVM[t1], t1 > unpoison(_g_CodeBlockPoison, t1, t3) >@@ -2637,17 +2626,18 @@ _llint_op_profile_type: > callSlowPath(_slow_path_profile_type_clear_log) > > .opProfileTypeDone: >- dispatch(constexpr op_profile_type_length) >+ disp__() >+end) > >-_llint_op_profile_control_flow: >- traceExecution() >+ >+llintOp(op_profile_control_flow, macro (getOperand, disp__) > loadpFromInstruction(1, t0) > addq 1, BasicBlockLocation::m_executionCount[t0] >- dispatch(constexpr op_profile_control_flow_length) >+ disp__() >+end) > > >-_llint_op_get_rest_length: >- traceExecution() >+llintOp(op_get_rest_length, macro (getOperand, disp__) > loadi PayloadOffset + ArgumentCount[cfr], t0 > subi 1, t0 > loadisFromInstruction(2, t1) >@@ -2660,11 +2650,11 @@ _llint_op_get_rest_length: > orq tagTypeNumber, t0 > loadisFromInstruction(1, t1) > storeq t0, [cfr, t1, 8] >- dispatch(constexpr op_get_rest_length_length) >+ disp__() >+end) > > >-_llint_op_log_shadow_chicken_prologue: >- traceExecution() >+llintOp(op_log_shadow_chicken_prologue, macro (getOperand, disp__) > acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow) > storep cfr, ShadowChicken::Packet::frame[t0] > loadp CallerFrame[cfr], t1 >@@ -2673,14 +2663,14 @@ _llint_op_log_shadow_chicken_prologue: > storep t1, ShadowChicken::Packet::callee[t0] > loadVariable(1, t1) > storep t1, ShadowChicken::Packet::scope[t0] >- dispatch(constexpr op_log_shadow_chicken_prologue_length) >+ disp__() > .opLogShadowChickenPrologueSlow: > callSlowPath(_llint_slow_path_log_shadow_chicken_prologue) >- dispatch(constexpr op_log_shadow_chicken_prologue_length) >+ disp__() >+end) > > >-_llint_op_log_shadow_chicken_tail: >- traceExecution() >+llintOp(op_log_shadow_chicken_tail, macro (getOperand, disp__) > acquireShadowChickenPacket(.opLogShadowChickenTailSlow) > storep cfr, ShadowChicken::Packet::frame[t0] > storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0] >@@ -2691,7 +2681,8 @@ _llint_op_log_shadow_chicken_tail: > loadp CodeBlock[cfr], t1 > storep t1, ShadowChicken::Packet::codeBlock[t0] > storei PC, ShadowChicken::Packet::callSiteIndex[t0] >- dispatch(constexpr op_log_shadow_chicken_tail_length) >+ disp__() > .opLogShadowChickenTailSlow: > callSlowPath(_llint_slow_path_log_shadow_chicken_tail) >- dispatch(constexpr op_log_shadow_chicken_tail_length) >+ disp__() >+end) >diff --git a/Source/JavaScriptCore/offlineasm/asm.rb b/Source/JavaScriptCore/offlineasm/asm.rb >index 06041497423eb4c5767d52fa894f914f53953c2b..3d3112b47c4fc56b74fdcac92dedf3a7d41d4a41 100644 >--- a/Source/JavaScriptCore/offlineasm/asm.rb >+++ b/Source/JavaScriptCore/offlineasm/asm.rb >@@ -371,12 +371,13 @@ File.open(outputFlnm, "w") { > $asm = Assembler.new($output) > > ast = parse(asmFile) >+ settingsCombinations = computeSettingsCombinations(ast) > > configurationList.each { > | configuration | > offsetsList = configuration[0] > configIndex = configuration[1] >- forSettings(computeSettingsCombinations(ast)[configIndex], ast) { >+ forSettings(settingsCombinations[configIndex], ast) { > | concreteSettings, lowLevelAST, backend | > > # There could be multiple backends we are generating for, but the C_LOOP is >@@ -386,6 +387,7 @@ File.open(outputFlnm, "w") { > $enableDebugAnnotations = false > end > >+ lowLevelAST = lowLevelAST.demacroify({}) > lowLevelAST = lowLevelAST.resolve(buildOffsetsMap(lowLevelAST, offsetsList)) > lowLevelAST.validate > emitCodeInConfiguration(concreteSettings, lowLevelAST, backend) { >diff --git a/Source/JavaScriptCore/offlineasm/ast.rb b/Source/JavaScriptCore/offlineasm/ast.rb >index 0ccf7b331bbb30ee11c976c08eb6b29660d8de15..ac54644a1fd2d4c36ff3804eb286dcf982153cb9 100644 >--- a/Source/JavaScriptCore/offlineasm/ast.rb >+++ b/Source/JavaScriptCore/offlineasm/ast.rb >@@ -73,6 +73,18 @@ class Node > def filter(type) > flatten.select{|v| v.is_a? type} > end >+ >+ def empty? >+ false >+ end >+ >+ def to_json(options={}) >+ hash = {} >+ self.instance_variables.each do |var| >+ hash[var] = self.instance_variable_get var >+ end >+ hash.to_json(options) >+ end > end > > class NoChildren < Node >@@ -910,7 +922,7 @@ class Instruction < Node > end > > def children >- operands >+ @operands > end > > def mapChildren(&proc) >@@ -961,7 +973,7 @@ class Error < NoChildren > end > > class ConstExpr < NoChildren >- attr_reader :variable, :value >+ attr_reader :value > > def initialize(codeOrigin, value) > super(codeOrigin) >@@ -1016,11 +1028,10 @@ $labelMapping = {} > $referencedExternLabels = Array.new > > class Label < NoChildren >- attr_reader :name >- >- def initialize(codeOrigin, name) >+ def initialize(codeOrigin, name, definedInFile = false) > super(codeOrigin) > @name = name >+ @definedInFile = definedInFile > @extern = true > @global = false > end >@@ -1029,7 +1040,7 @@ class Label < NoChildren > if $labelMapping[name] > raise "Label name collision: #{name}" unless $labelMapping[name].is_a? Label > else >- $labelMapping[name] = Label.new(codeOrigin, name) >+ $labelMapping[name] = Label.new(codeOrigin, name, definedInFile) > end > if definedInFile > $labelMapping[name].clearExtern() >@@ -1076,6 +1087,10 @@ class Label < NoChildren > @global > end > >+ def name >+ @name >+ end >+ > def dump > "#{name}:" > end >@@ -1250,6 +1265,10 @@ class Sequence < Node > def dump > list.collect{|v| v.dump}.join("\n") > end >+ >+ def empty? >+ list.all?(&:empty?) >+ end > end > > class True < NoChildren >@@ -1399,6 +1418,10 @@ class Skip < NoChildren > def dump > "\tskip" > end >+ >+ def empty? >+ true >+ end > end > > class IfThenElse < Node >@@ -1421,12 +1444,18 @@ class IfThenElse < Node > end > > def mapChildren >- IfThenElse.new(codeOrigin, (yield @predicate), (yield @thenCase), (yield @elseCase)) >+ ifThenElse = IfThenElse.new(codeOrigin, (yield @predicate), (yield @thenCase)) >+ ifThenElse.elseCase = yield @elseCase >+ ifThenElse > end > > def dump > "if #{predicate.dump}\n" + thenCase.dump + "\nelse\n" + elseCase.dump + "\nend" > end >+ >+ def empty? >+ @thenCase.empty? && @elseCase.empty? >+ end > end > > class Macro < Node >diff --git a/Source/JavaScriptCore/offlineasm/cloop.rb b/Source/JavaScriptCore/offlineasm/cloop.rb >index 870525922f02a4447e8732f99a0d8bfe5d186cc4..9dd818dc623d7e7f02e2384e5649a0fa04525324 100644 >--- a/Source/JavaScriptCore/offlineasm/cloop.rb >+++ b/Source/JavaScriptCore/offlineasm/cloop.rb >@@ -222,7 +222,7 @@ class Address > "*CAST<NativeFunction*>(#{pointerExpr})" > end > def opcodeMemRef >- "*CAST<Opcode*>(#{pointerExpr})" >+ "*CAST<OpcodeID*>(#{pointerExpr})" > end > def dblMemRef > "*CAST<double*>(#{pointerExpr})" >@@ -286,7 +286,7 @@ class BaseIndex > "*CAST<uintptr_t*>(#{pointerExpr})" > end > def opcodeMemRef >- "*CAST<Opcode*>(#{pointerExpr})" >+ "*CAST<OpcodeID*>(#{pointerExpr})" > end > def dblMemRef > "*CAST<double*>(#{pointerExpr})" >@@ -1077,7 +1077,7 @@ class Instruction > # as an opcode dispatch. > when "cloopCallJSFunction" > uid = $asm.newUID >- $asm.putc "lr.opcode = getOpcode(llint_cloop_did_return_from_js_#{uid});" >+ $asm.putc "lr.opcode = llint_cloop_did_return_from_js_#{uid};" > $asm.putc "opcode = #{operands[0].clValue(:opcode)};" > $asm.putc "DISPATCH_OPCODE();" > $asm.putsLabel("llint_cloop_did_return_from_js_#{uid}", false) >diff --git a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb >index fff398255f678dd2db422de2491fb92a7b099c24..7a1caff62cab6064ba9149f6426c840c100db665 100644 >--- a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb >+++ b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb >@@ -37,11 +37,21 @@ require "transform" > IncludeFile.processIncludeOptions() > > inputFlnm = ARGV.shift >+settingsFlnm = ARGV.shift > outputFlnm = ARGV.shift > >+$stderr.puts settingsFlnm >+ > validBackends = canonicalizeBackendNames(ARGV.shift.split(/[,\s]+/)) > includeOnlyBackends(validBackends) > >+begin >+ configurationList = configurationIndices(settingsFlnm) >+rescue MissingMagicValuesException >+ $stderr.puts "offlineasm: No magic values found. Skipping assembly file generation." >+ exit 1 >+end >+ > def emitMagicNumber > OFFSET_MAGIC_NUMBERS.each { > | number | >@@ -49,7 +59,8 @@ def emitMagicNumber > } > end > >-inputHash = "// offlineasm input hash: #{parseHash(inputFlnm)} #{selfHash}" >+configurationHash = Digest::SHA1.hexdigest(configurationList.join(' ')) >+inputHash = "// offlineasm input hash: #{parseHash(inputFlnm)} #{configurationHash} #{selfHash}" > > if FileTest.exist? outputFlnm > File.open(outputFlnm, "r") { >@@ -62,108 +73,49 @@ if FileTest.exist? outputFlnm > } > end > >-originalAST = parse(inputFlnm) >- >-# >-# Optimize the AST to make configuration extraction faster. This reduces the AST to a form >-# that only contains the things that matter for our purposes: offsets, sizes, and if >-# statements. >-# >- >-class Node >- def offsetsPruneTo(sequence) >- children.each { >- | child | >- child.offsetsPruneTo(sequence) >- } >- end >- >- def offsetsPrune >- result = Sequence.new(codeOrigin, []) >- offsetsPruneTo(result) >- result >- end >-end >- >-class IfThenElse >- def offsetsPruneTo(sequence) >- ifThenElse = IfThenElse.new(codeOrigin, predicate, thenCase.offsetsPrune) >- ifThenElse.elseCase = elseCase.offsetsPrune >- sequence.list << ifThenElse >- end >-end >- >-class StructOffset >- def offsetsPruneTo(sequence) >- sequence.list << self >- end >-end >- >-class Sizeof >- def offsetsPruneTo(sequence) >- sequence.list << self >- end >-end >- >-class ConstExpr >- def offsetsPruneTo(sequence) >- sequence.list << self >- end >-end >- >-prunedAST = originalAST.offsetsPrune >+ast = parse(inputFlnm) >+settingsCombinations = computeSettingsCombinations(ast) > > File.open(outputFlnm, "w") { > | outp | > $output = outp > outp.puts inputHash >- length = 0 >- >- emitCodeInAllConfigurations(prunedAST) { >- | settings, ast, backend, index | >- constsList = ast.filter(ConstExpr).uniq.sort >- >- constsList.each_with_index { >- | const, index | >- outp.puts "constexpr int64_t constValue#{index} = static_cast<int64_t>(#{const.value});" >- } >- } >- >- emitCodeInAllConfigurations(prunedAST) { >- | settings, ast, backend, index | >- offsetsList = ast.filter(StructOffset).uniq.sort >- sizesList = ast.filter(Sizeof).uniq.sort >- constsList = ast.filter(ConstExpr).uniq.sort >- length += OFFSET_HEADER_MAGIC_NUMBERS.size + (OFFSET_MAGIC_NUMBERS.size + 1) * (1 + offsetsList.size + sizesList.size + constsList.size) >- } >- outp.puts "static const int64_t extractorTable[#{length}] = {" >- emitCodeInAllConfigurations(prunedAST) { >- | settings, ast, backend, index | >- OFFSET_HEADER_MAGIC_NUMBERS.each { >- | number | >- $output.puts "unsigned(#{number})," >- } > >- offsetsList = ast.filter(StructOffset).uniq.sort >- sizesList = ast.filter(Sizeof).uniq.sort >- constsList = ast.filter(ConstExpr).uniq.sort >- >- emitMagicNumber >- outp.puts "#{index}," >- offsetsList.each { >- | offset | >- emitMagicNumber >- outp.puts "OFFLINE_ASM_OFFSETOF(#{offset.struct}, #{offset.field})," >- } >- sizesList.each { >- | sizeof | >- emitMagicNumber >- outp.puts "sizeof(#{sizeof.struct})," >- } >- constsList.each_index { >- | index | >- emitMagicNumber >- outp.puts "constValue#{index}," >+ outp.puts "static const int64_t extractorTable[] = {" >+ configurationList.each { >+ | configIndex | >+ forSettings(settingsCombinations[configIndex], ast) { >+ | concreteSettings, lowLevelAST, backend | >+ >+ lowLevelAST = lowLevelAST.demacroify({}) >+ offsetsList = offsetsList(lowLevelAST) >+ sizesList = sizesList(lowLevelAST) >+ constsList = constsList(lowLevelAST) >+ >+ emitCodeInConfiguration(concreteSettings, lowLevelAST, backend) { >+ OFFSET_HEADER_MAGIC_NUMBERS.each { >+ | number | >+ outp.puts "unsigned(#{number})," >+ } >+ >+ emitMagicNumber >+ outp.puts "#{configIndex}," >+ offsetsList.each { >+ | offset | >+ emitMagicNumber >+ outp.puts "OFFLINE_ASM_OFFSETOF(#{offset.struct}, #{offset.field})," >+ } >+ sizesList.each { >+ | sizeof | >+ emitMagicNumber >+ outp.puts "sizeof(#{sizeof.struct})," >+ } >+ constsList.each { >+ | const | >+ emitMagicNumber >+ outp.puts "static_cast<int64_t>(#{const.value})," >+ } >+ } > } > } > outp.puts "};" >diff --git a/Source/JavaScriptCore/offlineasm/generate_settings_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_settings_extractor.rb >new file mode 100644 >index 0000000000000000000000000000000000000000..01c7dd5540e23d7da44c4b44ac718633b66db9a7 >--- /dev/null >+++ b/Source/JavaScriptCore/offlineasm/generate_settings_extractor.rb >@@ -0,0 +1,80 @@ >+#!/usr/bin/env ruby >+ >+# Copyright (C) 2011 Apple Inc. All rights reserved. >+# >+# Redistribution and use in source and binary forms, with or without >+# modification, are permitted provided that the following conditions >+# are met: >+# 1. Redistributions of source code must retain the above copyright >+# notice, this list of conditions and the following disclaimer. >+# 2. Redistributions in binary form must reproduce the above copyright >+# notice, this list of conditions and the following disclaimer in the >+# documentation and/or other materials provided with the distribution. >+# >+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' >+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, >+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS >+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR >+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF >+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS >+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN >+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) >+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF >+# THE POSSIBILITY OF SUCH DAMAGE. >+ >+$: << File.dirname(__FILE__) >+ >+require "config" >+require "backends" >+require "digest/sha1" >+require "offsets" >+require "parser" >+require "self_hash" >+require "settings" >+require "transform" >+ >+IncludeFile.processIncludeOptions() >+ >+inputFlnm = ARGV.shift >+outputFlnm = ARGV.shift >+ >+validBackends = canonicalizeBackendNames(ARGV.shift.split(/[,\s]+/)) >+includeOnlyBackends(validBackends) >+ >+inputHash = "// offlineasm input hash: #{parseHash(inputFlnm)} #{selfHash}" >+ >+if FileTest.exist? outputFlnm >+ File.open(outputFlnm, "r") { >+ | inp | >+ firstLine = inp.gets >+ if firstLine and firstLine.chomp == inputHash >+ $stderr.puts "offlineasm: Nothing changed." >+ exit 0 >+ end >+ } >+end >+ >+originalAST = parse(inputFlnm) >+prunedAST = Sequence.new(originalAST.codeOrigin, originalAST.filter(Setting)) >+ >+File.open(outputFlnm, "w") { >+ | outp | >+ $output = outp >+ outp.puts inputHash >+ >+ settingsCombinations = computeSettingsCombinations(prunedAST) >+ length = settingsCombinations.size * (1 + OFFSET_HEADER_MAGIC_NUMBERS.size) >+ >+ outp.puts "static const int64_t extractorTable[#{length}] = {" >+ emitCodeInAllConfigurations(prunedAST) { >+ | settings, ast, backend, index | >+ OFFSET_HEADER_MAGIC_NUMBERS.each { >+ | number | >+ $output.puts "unsigned(#{number})," >+ } >+ outp.puts "#{index}," >+ } >+ outp.puts "};" >+ >+} >diff --git a/Source/JavaScriptCore/offlineasm/offsets.rb b/Source/JavaScriptCore/offlineasm/offsets.rb >index 2c1c1b773ca0a5875b574b62d711cd69cf527213..fe6b331f8d58835507a4891049bafcc84e64c42e 100644 >--- a/Source/JavaScriptCore/offlineasm/offsets.rb >+++ b/Source/JavaScriptCore/offlineasm/offsets.rb >@@ -56,65 +56,53 @@ def constsList(ast) > ast.filter(ConstExpr).uniq.sort > end > >-# >-# offsetsAndConfigurationIndex(ast, file) -> >-# [[offsets, index], ...] >-# >-# Parses the offsets from a file and returns a list of offsets and the >-# index of the configuration that is valid in this build target. >-# >- >-def offsetsAndConfigurationIndex(file) >- endiannessMarkerBytes = nil >- result = {} >- >- def readInt(endianness, bytes) >- if endianness == :little >- # Little endian >- number = (bytes[0] << 0 | >- bytes[1] << 8 | >- bytes[2] << 16 | >- bytes[3] << 24 | >- bytes[4] << 32 | >- bytes[5] << 40 | >- bytes[6] << 48 | >- bytes[7] << 56) >- else >- # Big endian >- number = (bytes[0] << 56 | >- bytes[1] << 48 | >- bytes[2] << 40 | >- bytes[3] << 32 | >- bytes[4] << 24 | >- bytes[5] << 16 | >- bytes[6] << 8 | >- bytes[7] << 0) >- end >- if number > 0x7fffffff_ffffffff >- number -= 1 << 64 >- end >- number >+def readInt(endianness, bytes) >+ if endianness == :little >+ # Little endian >+ number = (bytes[0] << 0 | >+ bytes[1] << 8 | >+ bytes[2] << 16 | >+ bytes[3] << 24 | >+ bytes[4] << 32 | >+ bytes[5] << 40 | >+ bytes[6] << 48 | >+ bytes[7] << 56) >+ else >+ # Big endian >+ number = (bytes[0] << 56 | >+ bytes[1] << 48 | >+ bytes[2] << 40 | >+ bytes[3] << 32 | >+ bytes[4] << 24 | >+ bytes[5] << 16 | >+ bytes[6] << 8 | >+ bytes[7] << 0) > end >- >- def prepareMagic(endianness, numbers) >- magicBytes = [] >- numbers.each { >- | number | >- currentBytes = [] >- 8.times { >- currentBytes << (number & 0xff) >- number >>= 8 >- } >- if endianness == :big >- currentBytes.reverse! >- end >- magicBytes += currentBytes >- } >- magicBytes >+ if number > 0x7fffffff_ffffffff >+ number -= 1 << 64 > end >- >+ number >+end >+ >+def prepareMagic(endianness, numbers) >+ magicBytes = [] >+ numbers.each { >+ | number | >+ currentBytes = [] >+ 8.times { >+ currentBytes << (number & 0xff) >+ number >>= 8 >+ } >+ if endianness == :big >+ currentBytes.reverse! >+ end >+ magicBytes += currentBytes >+ } >+ magicBytes >+end >+ >+def fileBytes(file) > fileBytes = [] >- > File.open(file, "rb") { > | inp | > loop { >@@ -123,36 +111,50 @@ def offsetsAndConfigurationIndex(file) > fileBytes << byte > } > } >- >- def sliceByteArrays(byteArray, pattern) >- result = [] >- lastSlicePoint = 0 >- (byteArray.length - pattern.length + 1).times { >- | index | >- foundOne = true >- pattern.length.times { >- | subIndex | >- if byteArray[index + subIndex] != pattern[subIndex] >- foundOne = false >- break >- end >- } >- if foundOne >- result << byteArray[lastSlicePoint...index] >- lastSlicePoint = index + pattern.length >+ fileBytes >+end >+ >+def sliceByteArrays(byteArray, pattern) >+ result = [] >+ lastSlicePoint = 0 >+ (byteArray.length - pattern.length + 1).times { >+ | index | >+ foundOne = true >+ pattern.length.times { >+ | subIndex | >+ if byteArray[index + subIndex] != pattern[subIndex] >+ foundOne = false >+ break > end > } >- >- result << byteArray[lastSlicePoint...(byteArray.length)] >- >- result >- end >- >+ if foundOne >+ result << byteArray[lastSlicePoint...index] >+ lastSlicePoint = index + pattern.length >+ end >+ } >+ >+ result << byteArray[lastSlicePoint...(byteArray.length)] >+ >+ result >+end >+ >+# >+# offsetsAndConfigurationIndex(ast, file) -> >+# [[offsets, index], ...] >+# >+# Parses the offsets from a file and returns a list of offsets and the >+# index of the configuration that is valid in this build target. >+# >+ >+def offsetsAndConfigurationIndex(file) >+ fileBytes = fileBytes(file) >+ result = {} >+ > [:little, :big].each { > | endianness | > headerMagicBytes = prepareMagic(endianness, OFFSET_HEADER_MAGIC_NUMBERS) > magicBytes = prepareMagic(endianness, OFFSET_MAGIC_NUMBERS) >- >+ > bigArray = sliceByteArrays(fileBytes, headerMagicBytes) > unless bigArray.size <= 1 > bigArray[1..-1].each { >@@ -168,9 +170,9 @@ def offsetsAndConfigurationIndex(file) > } > end > } >- >+ > raise MissingMagicValuesException unless result.length >= 1 >- >+ > # result is {index1=>offsets1, index2=>offsets2} but we want to return > # [[offsets1, index1], [offsets2, index2]]. > return result.map { >@@ -179,6 +181,28 @@ def offsetsAndConfigurationIndex(file) > } > end > >+def configurationIndices(file) >+ fileBytes = fileBytes(file) >+ result = [] >+ >+ [:little, :big].each { >+ | endianness | >+ headerMagicBytes = prepareMagic(endianness, OFFSET_HEADER_MAGIC_NUMBERS) >+ >+ bigArray = sliceByteArrays(fileBytes, headerMagicBytes) >+ unless bigArray.size <= 1 >+ bigArray[1..-1].each { >+ | configArray | >+ result << readInt(endianness, configArray) >+ } >+ end >+ } >+ >+ raise MissingMagicValuesException unless result.length >= 1 >+ >+ return result >+end >+ > # > # buildOffsetsMap(ast, extractedConstants) -> map > # >diff --git a/Source/JavaScriptCore/offlineasm/parser.rb b/Source/JavaScriptCore/offlineasm/parser.rb >index 3869e6c3fe1ed3c0a7deb0d62aa27736dc8b8adf..580743ade92e8e7c71e3f7a26852a667a999900a 100644 >--- a/Source/JavaScriptCore/offlineasm/parser.rb >+++ b/Source/JavaScriptCore/offlineasm/parser.rb >@@ -177,11 +177,11 @@ def lex(str, file) > end > result << Token.new(CodeOrigin.new(file, lineNumber), $&) > lineNumber += 1 >- when /\A[a-zA-Z]([a-zA-Z0-9_.]*)/ >+ when /\A[a-zA-Z%]([a-zA-Z0-9_.%]*)/ > result << Token.new(CodeOrigin.new(file, lineNumber), $&) > when /\A\.([a-zA-Z0-9_]*)/ > result << Token.new(CodeOrigin.new(file, lineNumber), $&) >- when /\A_([a-zA-Z0-9_]*)/ >+ when /\A_([a-zA-Z0-9_%]*)/ > result << Token.new(CodeOrigin.new(file, lineNumber), $&) > when /\A([ \t]+)/ > # whitespace, ignore >@@ -228,11 +228,11 @@ def isKeyword(token) > end > > def isIdentifier(token) >- token =~ /\A[a-zA-Z]([a-zA-Z0-9_.]*)\Z/ and not isKeyword(token) >+ token =~ /\A[a-zA-Z%]([a-zA-Z0-9_.%]*)\Z/ and not isKeyword(token) > end > > def isLabel(token) >- token =~ /\A_([a-zA-Z0-9_]*)\Z/ >+ token =~ /\A_([a-zA-Z0-9_%]*)\Z/ > end > > def isLocalLabel(token) >diff --git a/Source/JavaScriptCore/offlineasm/settings.rb b/Source/JavaScriptCore/offlineasm/settings.rb >index eec092584fecf441619bd0f87de9ffa08e797d05..0647c82bae8f53c22fd115ae27a31073fba53749 100644 >--- a/Source/JavaScriptCore/offlineasm/settings.rb >+++ b/Source/JavaScriptCore/offlineasm/settings.rb >@@ -90,7 +90,6 @@ end > > def forSettings(concreteSettings, ast) > # Check which architectures this combinator claims to support. >- numClaimedBackends = 0 > selectedBackend = nil > BACKENDS.each { > | backend | >diff --git a/Source/JavaScriptCore/offlineasm/transform.rb b/Source/JavaScriptCore/offlineasm/transform.rb >index 2a082555b74a9fc21b5570117f5537ec15affecf..b5328bade1d958d26138903458cd754501dcd9a7 100644 >--- a/Source/JavaScriptCore/offlineasm/transform.rb >+++ b/Source/JavaScriptCore/offlineasm/transform.rb >@@ -118,7 +118,7 @@ class Node > child.demacroify(macros) > } > end >- >+ > def substitute(mapping) > mapChildren { > | child | >@@ -150,9 +150,16 @@ class Macro > end > end > >+ >+$concatenation = /%([a-zA-Z_]+)%/ > class Variable > def substitute(mapping) >- if mapping[self] >+ if @name =~ $concatenation >+ name = @name.gsub($concatenation) { |match| >+ Variable.forName(codeOrigin, match[1...-1]).substitute(mapping).dump >+ } >+ Variable.forName(codeOrigin, name) >+ elsif mapping[self] > mapping[self] > else > self >@@ -160,6 +167,32 @@ class Variable > end > end > >+class Label >+ def substitute(mapping) >+ if @name =~ $concatenation >+ name = @name.gsub($concatenation) { |match| >+ Variable.forName(codeOrigin, match[1...-1]).substitute(mapping).dump >+ } >+ Label.forName(codeOrigin, name, @definedInFile) >+ else >+ self >+ end >+ end >+end >+ >+class ConstExpr >+ def substitute(mapping) >+ if @value =~ $concatenation >+ value = @value.gsub($concatenation) { |match| >+ Variable.forName(codeOrigin, match[1...-1]).substitute(mapping).dump >+ } >+ ConstExpr.forName(codeOrigin, value) >+ else >+ self >+ end >+ end >+end >+ > class LocalLabel > def substituteLabels(mapping) > if mapping[self] >@@ -215,7 +248,7 @@ class Sequence > mapping = {} > myMyMacros = myMacros.dup > raise "Could not find macro #{item.name} at #{item.codeOriginString}" unless myMacros[item.name] >- raise "Argument count mismatch for call to #{item.name} at #{item.codeOriginString}" unless item.operands.size == myMacros[item.name].variables.size >+ raise "Argument count mismatch for call to #{item.name} at #{item.codeOriginString} (expected #{myMacros[item.name].variables.size} but got #{item.operands.size} arguments for macro #{item.name} defined at #{myMacros[item.name].codeOrigin})" unless item.operands.size == myMacros[item.name].variables.size > item.operands.size.times { > | idx | > if item.operands[idx].is_a? Variable and myMacros[item.operands[idx].name] >@@ -519,4 +552,3 @@ class Skip > def validate > end > end >- >diff --git a/Source/JavaScriptCore/parser/ResultType.h b/Source/JavaScriptCore/parser/ResultType.h >index 03cae2a47f903b6d2dfe7d775205aad88f5202fe..7a3ac37b09428eb5c07d46ab9a80ed748120f56d 100644 >--- a/Source/JavaScriptCore/parser/ResultType.h >+++ b/Source/JavaScriptCore/parser/ResultType.h >@@ -174,6 +174,12 @@ namespace JSC { > > Type bits() const { return m_bits; } > >+ void dump(PrintStream& out) const >+ { >+ // TODO: more meaningful information >+ out.print(bits()); >+ } >+ > private: > Type m_bits; > }; >@@ -197,12 +203,12 @@ namespace JSC { > int i; > } m_u; > >- ResultType first() >+ ResultType first() const > { > return ResultType(m_u.rds.first); > } > >- ResultType second() >+ ResultType second() const > { > return ResultType(m_u.rds.second); > } >@@ -217,6 +223,11 @@ namespace JSC { > types.m_u.i = value; > return types; > } >+ >+ void dump(PrintStream& out) const >+ { >+ out.print("OperandTypes(", first(), ", ", second(), ")"); >+ } > }; > > } // namespace JSC >diff --git a/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp b/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp >index 6e93ce810011618e8d4c8b80e670d83e8e18a129..30cafcb70f3e36ebe54a23442354751504ed5323 100644 >--- a/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp >+++ b/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp >@@ -55,9 +55,10 @@ BytecodeSequence::BytecodeSequence(CodeBlock* codeBlock) > for (unsigned bytecodeIndex = 0; bytecodeIndex < codeBlock->instructions().size();) { > out.reset(); > codeBlock->dumpBytecode(out, bytecodeIndex, statusMap); >- OpcodeID opcodeID = Interpreter::getOpcodeID(codeBlock->instructions()[bytecodeIndex].u.opcode); >+ auto instruction = codeBlock->instructions().at(bytecodeIndex); >+ OpcodeID opcodeID = instruction->opcodeID(); > m_sequence.append(Bytecode(bytecodeIndex, opcodeID, out.toCString())); >- bytecodeIndex += opcodeLength(opcodeID); >+ bytecodeIndex += instruction->size(); > } > } > >diff --git a/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp b/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp >index 24ead4cb97758f91932e374d7cbfc54715490313..0bac71f105feebb188f76613fd49813edd4e3860 100644 >--- a/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp >+++ b/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp >@@ -93,11 +93,8 @@ namespace JSC { > BEGIN_NO_SET_PC(); \ > SET_PC_FOR_STUBS() > >-#define OP(index) (exec->uncheckedR(pc[index].u.operand)) >-#define OP_C(index) (exec->r(pc[index].u.operand)) >- >-#define GET(operand) (exec->uncheckedR(operand)) >-#define GET_C(operand) (exec->r(operand)) >+#define GET(operand) (exec->uncheckedR(operand.offset())) >+#define GET_C(operand) (exec->r(operand.offset())) > > #define RETURN_TWO(first, second) do { \ > return encodeResult(first, second); \ >@@ -124,33 +121,34 @@ namespace JSC { > END_IMPL(); \ > } while (false) > >-#define BRANCH(opcode, condition) do { \ >+#define BRANCH(condition) do { \ > bool bCondition = (condition); \ > CHECK_EXCEPTION(); \ > if (bCondition) \ >- pc += pc[OPCODE_LENGTH(opcode) - 1].u.operand; \ >+ pc += bytecode.target; \ > else \ >- pc += OPCODE_LENGTH(opcode); \ >+ pc += pc->size(); \ > END_IMPL(); \ > } while (false) > >-#define RETURN_WITH_PROFILING(value__, profilingAction__) do { \ >+#define RETURN_WITH_PROFILING_CUSTOM(result__, value__, profilingAction__) do { \ > JSValue returnValue__ = (value__); \ > CHECK_EXCEPTION(); \ >- OP(1) = returnValue__; \ >+ GET(result__) = returnValue__; \ > profilingAction__; \ > END_IMPL(); \ > } while (false) > >+#define RETURN_WITH_PROFILING(...) RETURN_WITH_PROFILING_CUSTOM(bytecode.dst, __VA_ARGS__) >+ > #define RETURN(value) \ > RETURN_WITH_PROFILING(value, { }) > >-#define RETURN_PROFILED(opcode__, value__) \ >- RETURN_WITH_PROFILING(value__, PROFILE_VALUE(opcode__, returnValue__)) >+#define RETURN_PROFILED(value__) \ >+ RETURN_WITH_PROFILING(value__, PROFILE_VALUE(returnValue__)) > >-#define PROFILE_VALUE(opcode, value) do { \ >- pc[OPCODE_LENGTH(opcode) - 1].u.profile->m_buckets[0] = \ >- JSValue::encode(value); \ >+#define PROFILE_VALUE(value) do { \ >+ bytecode.metadata(exec).profile.m_buckets[0] = JSValue::encode(value); \ > } while (false) > > #define CALL_END_IMPL(exec, callTarget, callTargetTag) \ >@@ -195,13 +193,15 @@ SLOW_PATH_DECL(slow_path_construct_arityCheck) > SLOW_PATH_DECL(slow_path_create_direct_arguments) > { > BEGIN(); >+ auto bytecode = pc->as<OpCreateDirectArguments>(); > RETURN(DirectArguments::createByCopying(exec)); > } > > SLOW_PATH_DECL(slow_path_create_scoped_arguments) > { > BEGIN(); >- JSLexicalEnvironment* scope = jsCast<JSLexicalEnvironment*>(OP(2).jsValue()); >+ auto bytecode = pc->as<OpCreateScopedArguments>(); >+ JSLexicalEnvironment* scope = jsCast<JSLexicalEnvironment*>(GET(bytecode.scope).jsValue()); > ScopedArgumentsTable* table = scope->symbolTable()->arguments(); > RETURN(ScopedArguments::createByCopying(exec, table, scope)); > } >@@ -209,24 +209,25 @@ SLOW_PATH_DECL(slow_path_create_scoped_arguments) > SLOW_PATH_DECL(slow_path_create_cloned_arguments) > { > BEGIN(); >+ auto bytecode = pc->as<OpCreateClonedArguments>(); > RETURN(ClonedArguments::createWithMachineFrame(exec, exec, ArgumentsMode::Cloned)); > } > > SLOW_PATH_DECL(slow_path_create_this) > { > BEGIN(); >- auto& bytecode = *reinterpret_cast<OpCreateThis*>(pc); >+ auto bytecode = pc->as<OpCreateThis>(); > JSObject* result; >- JSObject* constructorAsObject = asObject(GET(bytecode.callee()).jsValue()); >+ JSObject* constructorAsObject = asObject(GET(bytecode.callee).jsValue()); > if (constructorAsObject->type() == JSFunctionType && jsCast<JSFunction*>(constructorAsObject)->canUseAllocationProfile()) { > JSFunction* constructor = jsCast<JSFunction*>(constructorAsObject); >- WriteBarrier<JSCell>& cachedCallee = bytecode.cachedCallee(); >+ WriteBarrier<JSCell>& cachedCallee = bytecode.metadata(exec).cachedCallee; > if (!cachedCallee) > cachedCallee.set(vm, exec->codeBlock(), constructor); > else if (cachedCallee.unvalidatedGet() != JSCell::seenMultipleCalleeObjects() && cachedCallee.get() != constructor) > cachedCallee.setWithoutWriteBarrier(JSCell::seenMultipleCalleeObjects()); > >- size_t inlineCapacity = bytecode.inlineCapacity(); >+ size_t inlineCapacity = bytecode.inlineCapacity; > ObjectAllocationProfile* allocationProfile = constructor->ensureRareDataAndAllocationProfile(exec, inlineCapacity)->objectAllocationProfile(); > Structure* structure = allocationProfile->structure(); > result = constructEmptyObject(exec, structure); >@@ -252,18 +253,20 @@ SLOW_PATH_DECL(slow_path_create_this) > SLOW_PATH_DECL(slow_path_to_this) > { > BEGIN(); >- JSValue v1 = OP(1).jsValue(); >+ auto bytecode = pc->as<OpToThis>(); >+ auto& metadata = bytecode.metadata(exec); >+ JSValue v1 = GET(bytecode.srcDst).jsValue(); > if (v1.isCell()) { > Structure* myStructure = v1.asCell()->structure(vm); >- Structure* otherStructure = pc[2].u.structure.get(); >+ Structure* otherStructure = metadata.cachedStructure.get(); > if (myStructure != otherStructure) { > if (otherStructure) >- pc[3].u.toThisStatus = ToThisConflicted; >- pc[2].u.structure.set(vm, exec->codeBlock(), myStructure); >+ metadata.toThisStatus = ToThisConflicted; >+ metadata.cachedStructure.set(vm, exec->codeBlock(), myStructure); > } > } else { >- pc[3].u.toThisStatus = ToThisConflicted; >- pc[2].u.structure.clear(); >+ metadata.toThisStatus = ToThisConflicted; >+ metadata.cachedStructure.clear(); > } > // Note: We only need to do this value profiling here on the slow path. The fast path > // just returns the input to to_this if the structure check succeeds. If the structure >@@ -271,7 +274,8 @@ SLOW_PATH_DECL(slow_path_to_this) > // different object that still has the same structure on the fast path since it'll produce > // the same SpeculatedType. Therefore, we don't need to worry about value profiling on the > // fast path. >- RETURN_PROFILED(op_to_this, v1.toThis(exec, exec->codeBlock()->isStrictMode() ? StrictMode : NotStrictMode)); >+ auto value = v1.toThis(exec, exec->codeBlock()->isStrictMode() ? StrictMode : NotStrictMode); >+ RETURN_WITH_PROFILING_CUSTOM(bytecode.srcDst, value, PROFILE_VALUE(value)); > } > > SLOW_PATH_DECL(slow_path_throw_tdz_error) >@@ -295,79 +299,91 @@ SLOW_PATH_DECL(slow_path_throw_strict_mode_readonly_property_write_error) > SLOW_PATH_DECL(slow_path_not) > { > BEGIN(); >- RETURN(jsBoolean(!OP_C(2).jsValue().toBoolean(exec))); >+ auto bytecode = pc->as<OpNot>(); >+ RETURN(jsBoolean(!GET_C(bytecode.operand).jsValue().toBoolean(exec))); > } > > SLOW_PATH_DECL(slow_path_eq) > { > BEGIN(); >- RETURN(jsBoolean(JSValue::equal(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); >+ auto bytecode = pc->as<OpEq>(); >+ RETURN(jsBoolean(JSValue::equal(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_neq) > { > BEGIN(); >- RETURN(jsBoolean(!JSValue::equal(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); >+ auto bytecode = pc->as<OpNeq>(); >+ RETURN(jsBoolean(!JSValue::equal(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_stricteq) > { > BEGIN(); >- RETURN(jsBoolean(JSValue::strictEqual(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); >+ auto bytecode = pc->as<OpStricteq>(); >+ RETURN(jsBoolean(JSValue::strictEqual(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_nstricteq) > { > BEGIN(); >- RETURN(jsBoolean(!JSValue::strictEqual(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); >+ auto bytecode = pc->as<OpNstricteq>(); >+ RETURN(jsBoolean(!JSValue::strictEqual(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_less) > { > BEGIN(); >- RETURN(jsBoolean(jsLess<true>(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); >+ auto bytecode = pc->as<OpLess>(); >+ RETURN(jsBoolean(jsLess<true>(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_lesseq) > { > BEGIN(); >- RETURN(jsBoolean(jsLessEq<true>(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); >+ auto bytecode = pc->as<OpLesseq>(); >+ RETURN(jsBoolean(jsLessEq<true>(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_greater) > { > BEGIN(); >- RETURN(jsBoolean(jsLess<false>(exec, OP_C(3).jsValue(), OP_C(2).jsValue()))); >+ auto bytecode = pc->as<OpGreater>(); >+ RETURN(jsBoolean(jsLess<false>(exec, GET_C(bytecode.rhs).jsValue(), GET_C(bytecode.lhs).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_greatereq) > { > BEGIN(); >- RETURN(jsBoolean(jsLessEq<false>(exec, OP_C(3).jsValue(), OP_C(2).jsValue()))); >+ auto bytecode = pc->as<OpGreatereq>(); >+ RETURN(jsBoolean(jsLessEq<false>(exec, GET_C(bytecode.rhs).jsValue(), GET_C(bytecode.lhs).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_inc) > { > BEGIN(); >- RETURN(jsNumber(OP(1).jsValue().toNumber(exec) + 1)); >+ auto bytecode = pc->as<OpInc>(); >+ RETURN_WITH_PROFILING_CUSTOM(bytecode.srcDst, jsNumber(GET(bytecode.srcDst).jsValue().toNumber(exec) + 1), { }); > } > > SLOW_PATH_DECL(slow_path_dec) > { > BEGIN(); >- RETURN(jsNumber(OP(1).jsValue().toNumber(exec) - 1)); >+ auto bytecode = pc->as<OpInc>(); >+ RETURN_WITH_PROFILING_CUSTOM(bytecode.srcDst, jsNumber(GET(bytecode.srcDst).jsValue().toNumber(exec) - 1), { }); > } > > SLOW_PATH_DECL(slow_path_to_string) > { > BEGIN(); >- RETURN(OP_C(2).jsValue().toString(exec)); >+ auto bytecode = pc->as<OpToString>(); >+ RETURN(GET_C(bytecode.operand).jsValue().toString(exec)); > } > > #if ENABLE(JIT) >-static void updateArithProfileForUnaryArithOp(Instruction* pc, JSValue result, JSValue operand) >+static void updateArithProfileForUnaryArithOp(OpNegate::Metadata& metadata, JSValue result, JSValue operand) > { >- ArithProfile& profile = *bitwise_cast<ArithProfile*>(&pc[3].u.operand); >+ ArithProfile& profile = metadata.arithProfile; > profile.observeLHS(operand); > ASSERT(result.isNumber() || result.isBigInt()); > if (result.isNumber()) { >@@ -394,32 +410,34 @@ static void updateArithProfileForUnaryArithOp(Instruction* pc, JSValue result, J > profile.setObservedNonNumber(); > } > #else >-static void updateArithProfileForUnaryArithOp(Instruction*, JSValue, JSValue) { } >+static void updateArithProfileForUnaryArithOp(OpNegate::Metadata&, JSValue, JSValue) { } > #endif > > SLOW_PATH_DECL(slow_path_negate) > { > BEGIN(); >- JSValue operand = OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpNegate>(); >+ auto& metadata = bytecode.metadata(exec); >+ JSValue operand = GET_C(bytecode.operand).jsValue(); > JSValue primValue = operand.toPrimitive(exec, PreferNumber); > CHECK_EXCEPTION(); > > if (primValue.isBigInt()) { > JSBigInt* result = JSBigInt::unaryMinus(vm, asBigInt(primValue)); > RETURN_WITH_PROFILING(result, { >- updateArithProfileForUnaryArithOp(pc, result, operand); >+ updateArithProfileForUnaryArithOp(metadata, result, operand); > }); > } > > JSValue result = jsNumber(-primValue.toNumber(exec)); > CHECK_EXCEPTION(); > RETURN_WITH_PROFILING(result, { >- updateArithProfileForUnaryArithOp(pc, result, operand); >+ updateArithProfileForUnaryArithOp(metadata, result, operand); > }); > } > > #if ENABLE(DFG_JIT) >-static void updateArithProfileForBinaryArithOp(ExecState* exec, Instruction* pc, JSValue result, JSValue left, JSValue right) >+static void updateArithProfileForBinaryArithOp(ExecState* exec, const Instruction* pc, JSValue result, JSValue left, JSValue right) > { > CodeBlock* codeBlock = exec->codeBlock(); > ArithProfile& profile = *codeBlock->arithProfileForPC(pc); >@@ -448,35 +466,38 @@ static void updateArithProfileForBinaryArithOp(ExecState* exec, Instruction* pc, > profile.setObservedNonNumber(); > } > #else >-static void updateArithProfileForBinaryArithOp(ExecState*, Instruction*, JSValue, JSValue, JSValue) { } >+static void updateArithProfileForBinaryArithOp(ExecState*, const Instruction*, JSValue, JSValue, JSValue) { } > #endif > > SLOW_PATH_DECL(slow_path_to_number) > { > BEGIN(); >- JSValue argument = OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpToNumber>(); >+ JSValue argument = GET_C(bytecode.operand).jsValue(); > JSValue result = jsNumber(argument.toNumber(exec)); >- RETURN_PROFILED(op_to_number, result); >+ RETURN_PROFILED(result); > } > > SLOW_PATH_DECL(slow_path_to_object) > { > BEGIN(); >- JSValue argument = OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpToObject>(); >+ JSValue argument = GET_C(bytecode.operand).jsValue(); > if (UNLIKELY(argument.isUndefinedOrNull())) { >- const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); >+ const Identifier& ident = exec->codeBlock()->identifier(bytecode.message); > if (!ident.isEmpty()) > THROW(createTypeError(exec, ident.impl())); > } > JSObject* result = argument.toObject(exec); >- RETURN_PROFILED(op_to_object, result); >+ RETURN_PROFILED(result); > } > > SLOW_PATH_DECL(slow_path_add) > { > BEGIN(); >- JSValue v1 = OP_C(2).jsValue(); >- JSValue v2 = OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpAdd>(); >+ JSValue v1 = GET_C(bytecode.lhs).jsValue(); >+ JSValue v2 = GET_C(bytecode.rhs).jsValue(); > JSValue result; > > ArithProfile& arithProfile = *exec->codeBlock()->arithProfileForPC(pc); >@@ -503,8 +524,9 @@ SLOW_PATH_DECL(slow_path_add) > SLOW_PATH_DECL(slow_path_mul) > { > BEGIN(); >- JSValue left = OP_C(2).jsValue(); >- JSValue right = OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpMul>(); >+ JSValue left = GET_C(bytecode.lhs).jsValue(); >+ JSValue right = GET_C(bytecode.rhs).jsValue(); > JSValue result = jsMul(exec, left, right); > CHECK_EXCEPTION(); > RETURN_WITH_PROFILING(result, { >@@ -515,8 +537,9 @@ SLOW_PATH_DECL(slow_path_mul) > SLOW_PATH_DECL(slow_path_sub) > { > BEGIN(); >- JSValue left = OP_C(2).jsValue(); >- JSValue right = OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpSub>(); >+ JSValue left = GET_C(bytecode.lhs).jsValue(); >+ JSValue right = GET_C(bytecode.rhs).jsValue(); > auto leftNumeric = left.toNumeric(exec); > CHECK_EXCEPTION(); > auto rightNumeric = right.toNumeric(exec); >@@ -542,8 +565,9 @@ SLOW_PATH_DECL(slow_path_sub) > SLOW_PATH_DECL(slow_path_div) > { > BEGIN(); >- JSValue left = OP_C(2).jsValue(); >- JSValue right = OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpDiv>(); >+ JSValue left = GET_C(bytecode.lhs).jsValue(); >+ JSValue right = GET_C(bytecode.rhs).jsValue(); > auto leftNumeric = left.toNumeric(exec); > CHECK_EXCEPTION(); > auto rightNumeric = right.toNumeric(exec); >@@ -572,8 +596,9 @@ SLOW_PATH_DECL(slow_path_div) > SLOW_PATH_DECL(slow_path_mod) > { > BEGIN(); >- JSValue left = OP_C(2).jsValue(); >- JSValue right = OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpMod>(); >+ JSValue left = GET_C(bytecode.lhs).jsValue(); >+ JSValue right = GET_C(bytecode.rhs).jsValue(); > auto leftNumeric = left.toNumeric(exec); > CHECK_EXCEPTION(); > auto rightNumeric = right.toNumeric(exec); >@@ -597,10 +622,11 @@ SLOW_PATH_DECL(slow_path_mod) > SLOW_PATH_DECL(slow_path_pow) > { > BEGIN(); >- double a = OP_C(2).jsValue().toNumber(exec); >+ auto bytecode = pc->as<OpPow>(); >+ double a = GET_C(bytecode.lhs).jsValue().toNumber(exec); > if (UNLIKELY(throwScope.exception())) > RETURN(JSValue()); >- double b = OP_C(3).jsValue().toNumber(exec); >+ double b = GET_C(bytecode.rhs).jsValue().toNumber(exec); > if (UNLIKELY(throwScope.exception())) > RETURN(JSValue()); > RETURN(jsNumber(operationMathPow(a, b))); >@@ -609,113 +635,127 @@ SLOW_PATH_DECL(slow_path_pow) > SLOW_PATH_DECL(slow_path_lshift) > { > BEGIN(); >- int32_t a = OP_C(2).jsValue().toInt32(exec); >+ auto bytecode = pc->as<OpLshift>(); >+ int32_t a = GET_C(bytecode.lhs).jsValue().toInt32(exec); > if (UNLIKELY(throwScope.exception())) > RETURN(JSValue()); >- uint32_t b = OP_C(3).jsValue().toUInt32(exec); >+ uint32_t b = GET_C(bytecode.rhs).jsValue().toUInt32(exec); > RETURN(jsNumber(a << (b & 31))); > } > > SLOW_PATH_DECL(slow_path_rshift) > { > BEGIN(); >- int32_t a = OP_C(2).jsValue().toInt32(exec); >+ auto bytecode = pc->as<OpRshift>(); >+ int32_t a = GET_C(bytecode.lhs).jsValue().toInt32(exec); > if (UNLIKELY(throwScope.exception())) > RETURN(JSValue()); >- uint32_t b = OP_C(3).jsValue().toUInt32(exec); >+ uint32_t b = GET_C(bytecode.rhs).jsValue().toUInt32(exec); > RETURN(jsNumber(a >> (b & 31))); > } > > SLOW_PATH_DECL(slow_path_urshift) > { > BEGIN(); >- uint32_t a = OP_C(2).jsValue().toUInt32(exec); >+ auto bytecode = pc->as<OpUrshift>(); >+ uint32_t a = GET_C(bytecode.lhs).jsValue().toUInt32(exec); > if (UNLIKELY(throwScope.exception())) > RETURN(JSValue()); >- uint32_t b = OP_C(3).jsValue().toUInt32(exec); >+ uint32_t b = GET_C(bytecode.rhs).jsValue().toUInt32(exec); > RETURN(jsNumber(static_cast<int32_t>(a >> (b & 31)))); > } > > SLOW_PATH_DECL(slow_path_unsigned) > { > BEGIN(); >- uint32_t a = OP_C(2).jsValue().toUInt32(exec); >+ auto bytecode = pc->as<OpUnsigned>(); >+ uint32_t a = GET_C(bytecode.operand).jsValue().toUInt32(exec); > RETURN(jsNumber(a)); > } > > SLOW_PATH_DECL(slow_path_bitand) > { > BEGIN(); >- int32_t a = OP_C(2).jsValue().toInt32(exec); >+ auto bytecode = pc->as<OpBitand>(); >+ int32_t a = GET_C(bytecode.lhs).jsValue().toInt32(exec); > if (UNLIKELY(throwScope.exception())) > RETURN(JSValue()); >- int32_t b = OP_C(3).jsValue().toInt32(exec); >+ int32_t b = GET_C(bytecode.rhs).jsValue().toInt32(exec); > RETURN(jsNumber(a & b)); > } > > SLOW_PATH_DECL(slow_path_bitor) > { > BEGIN(); >- int32_t a = OP_C(2).jsValue().toInt32(exec); >+ auto bytecode = pc->as<OpBitor>(); >+ int32_t a = GET_C(bytecode.lhs).jsValue().toInt32(exec); > if (UNLIKELY(throwScope.exception())) > RETURN(JSValue()); >- int32_t b = OP_C(3).jsValue().toInt32(exec); >+ int32_t b = GET_C(bytecode.rhs).jsValue().toInt32(exec); > RETURN(jsNumber(a | b)); > } > > SLOW_PATH_DECL(slow_path_bitxor) > { > BEGIN(); >- int32_t a = OP_C(2).jsValue().toInt32(exec); >+ auto bytecode = pc->as<OpBitxor>(); >+ int32_t a = GET_C(bytecode.lhs).jsValue().toInt32(exec); > if (UNLIKELY(throwScope.exception())) > RETURN(JSValue()); >- int32_t b = OP_C(3).jsValue().toInt32(exec); >+ int32_t b = GET_C(bytecode.rhs).jsValue().toInt32(exec); > RETURN(jsNumber(a ^ b)); > } > > SLOW_PATH_DECL(slow_path_typeof) > { > BEGIN(); >- RETURN(jsTypeStringForValue(exec, OP_C(2).jsValue())); >+ auto bytecode = pc->as<OpTypeof>(); >+ RETURN(jsTypeStringForValue(exec, GET_C(bytecode.value).jsValue())); > } > > SLOW_PATH_DECL(slow_path_is_object_or_null) > { > BEGIN(); >- RETURN(jsBoolean(jsIsObjectTypeOrNull(exec, OP_C(2).jsValue()))); >+ auto bytecode = pc->as<OpIsObjectOrNull>(); >+ RETURN(jsBoolean(jsIsObjectTypeOrNull(exec, GET_C(bytecode.operand).jsValue()))); > } > > SLOW_PATH_DECL(slow_path_is_function) > { > BEGIN(); >- RETURN(jsBoolean(OP_C(2).jsValue().isFunction(vm))); >+ auto bytecode = pc->as<OpIsFunction>(); >+ RETURN(jsBoolean(GET_C(bytecode.operand).jsValue().isFunction(vm))); > } > > SLOW_PATH_DECL(slow_path_in_by_val) > { > BEGIN(); >- RETURN(jsBoolean(CommonSlowPaths::opInByVal(exec, OP_C(2).jsValue(), OP_C(3).jsValue(), pc[4].u.arrayProfile))); >+ auto bytecode = pc->as<OpInByVal>(); >+ auto& metadata = bytecode.metadata(exec); >+ RETURN(jsBoolean(CommonSlowPaths::opInByVal(exec, GET_C(bytecode.base).jsValue(), GET_C(bytecode.property).jsValue(), &metadata.arrayProfile))); > } > > SLOW_PATH_DECL(slow_path_in_by_id) > { > BEGIN(); > >- JSValue baseValue = OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpInById>(); >+ JSValue baseValue = GET_C(bytecode.base).jsValue(); > if (!baseValue.isObject()) > THROW(createInvalidInParameterError(exec, baseValue)); > >- RETURN(jsBoolean(asObject(baseValue)->hasProperty(exec, exec->codeBlock()->identifier(pc[3].u.operand)))); >+ RETURN(jsBoolean(asObject(baseValue)->hasProperty(exec, exec->codeBlock()->identifier(bytecode.property)))); > } > > SLOW_PATH_DECL(slow_path_del_by_val) > { > BEGIN(); >- JSValue baseValue = OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpDelByVal>(); >+ JSValue baseValue = GET_C(bytecode.base).jsValue(); > JSObject* baseObject = baseValue.toObject(exec); > CHECK_EXCEPTION(); > >- JSValue subscript = OP_C(3).jsValue(); >+ JSValue subscript = GET_C(bytecode.property).jsValue(); > > bool couldDelete; > >@@ -738,13 +778,15 @@ SLOW_PATH_DECL(slow_path_del_by_val) > SLOW_PATH_DECL(slow_path_strcat) > { > BEGIN(); >- RETURN(jsStringFromRegisterArray(exec, &OP(2), pc[3].u.operand)); >+ auto bytecode = pc->as<OpStrcat>(); >+ RETURN(jsStringFromRegisterArray(exec, &GET(bytecode.src), bytecode.count)); > } > > SLOW_PATH_DECL(slow_path_to_primitive) > { > BEGIN(); >- RETURN(OP_C(2).jsValue().toPrimitive(exec)); >+ auto bytecode = pc->as<OpToPrimitive>(); >+ RETURN(GET_C(bytecode.src).jsValue().toPrimitive(exec)); > } > > SLOW_PATH_DECL(slow_path_enter) >@@ -758,7 +800,8 @@ SLOW_PATH_DECL(slow_path_enter) > SLOW_PATH_DECL(slow_path_get_enumerable_length) > { > BEGIN(); >- JSValue enumeratorValue = OP(2).jsValue(); >+ auto bytecode = pc->as<OpGetEnumerableLength>(); >+ JSValue enumeratorValue = GET(bytecode.base).jsValue(); > if (enumeratorValue.isUndefinedOrNull()) > RETURN(jsNumber(0)); > >@@ -770,10 +813,12 @@ SLOW_PATH_DECL(slow_path_get_enumerable_length) > SLOW_PATH_DECL(slow_path_has_indexed_property) > { > BEGIN(); >- JSObject* base = OP(2).jsValue().toObject(exec); >+ auto bytecode = pc->as<OpHasIndexedProperty>(); >+ auto& metadata = bytecode.metadata(exec); >+ JSObject* base = GET(bytecode.base).jsValue().toObject(exec); > CHECK_EXCEPTION(); >- JSValue property = OP(3).jsValue(); >- pc[4].u.arrayProfile->observeStructure(base->structure(vm)); >+ JSValue property = GET(bytecode.property).jsValue(); >+ metadata.arrayProfile.observeStructure(base->structure(vm)); > ASSERT(property.isUInt32()); > RETURN(jsBoolean(base->hasPropertyGeneric(exec, property.asUInt32(), PropertySlot::InternalMethodType::GetOwnProperty))); > } >@@ -781,11 +826,12 @@ SLOW_PATH_DECL(slow_path_has_indexed_property) > SLOW_PATH_DECL(slow_path_has_structure_property) > { > BEGIN(); >- JSObject* base = OP(2).jsValue().toObject(exec); >+ auto bytecode = pc->as<OpHasStructureProperty>(); >+ JSObject* base = GET(bytecode.base).jsValue().toObject(exec); > CHECK_EXCEPTION(); >- JSValue property = OP(3).jsValue(); >+ JSValue property = GET(bytecode.property).jsValue(); > ASSERT(property.isString()); >- JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(OP(4).jsValue().asCell()); >+ JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(GET(bytecode.enumerator).jsValue().asCell()); > if (base->structure(vm)->id() == enumerator->cachedStructureID()) > RETURN(jsBoolean(true)); > JSString* string = asString(property); >@@ -797,9 +843,10 @@ SLOW_PATH_DECL(slow_path_has_structure_property) > SLOW_PATH_DECL(slow_path_has_generic_property) > { > BEGIN(); >- JSObject* base = OP(2).jsValue().toObject(exec); >+ auto bytecode = pc->as<OpHasGenericProperty>(); >+ JSObject* base = GET(bytecode.base).jsValue().toObject(exec); > CHECK_EXCEPTION(); >- JSValue property = OP(3).jsValue(); >+ JSValue property = GET(bytecode.property).jsValue(); > JSString* string = asString(property); > auto propertyName = string->toIdentifier(exec); > CHECK_EXCEPTION(); >@@ -809,8 +856,9 @@ SLOW_PATH_DECL(slow_path_has_generic_property) > SLOW_PATH_DECL(slow_path_get_direct_pname) > { > BEGIN(); >- JSValue baseValue = OP_C(2).jsValue(); >- JSValue property = OP(3).jsValue(); >+ auto bytecode = pc->as<OpGetDirectPname>(); >+ JSValue baseValue = GET_C(bytecode.base).jsValue(); >+ JSValue property = GET(bytecode.property).jsValue(); > JSString* string = asString(property); > auto propertyName = string->toIdentifier(exec); > CHECK_EXCEPTION(); >@@ -820,7 +868,8 @@ SLOW_PATH_DECL(slow_path_get_direct_pname) > SLOW_PATH_DECL(slow_path_get_property_enumerator) > { > BEGIN(); >- JSValue baseValue = OP(2).jsValue(); >+ auto bytecode = pc->as<OpGetPropertyEnumerator>(); >+ JSValue baseValue = GET(bytecode.base).jsValue(); > if (baseValue.isUndefinedOrNull()) > RETURN(JSPropertyNameEnumerator::create(vm)); > >@@ -833,8 +882,9 @@ SLOW_PATH_DECL(slow_path_get_property_enumerator) > SLOW_PATH_DECL(slow_path_next_structure_enumerator_pname) > { > BEGIN(); >- JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(OP(2).jsValue().asCell()); >- uint32_t index = OP(3).jsValue().asUInt32(); >+ auto bytecode = pc->as<OpEnumeratorStructurePname>(); >+ JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(GET(bytecode.enumerator).jsValue().asCell()); >+ uint32_t index = GET(bytecode.index).jsValue().asUInt32(); > > JSString* propertyName = nullptr; > if (index < enumerator->endStructurePropertyIndex()) >@@ -845,8 +895,9 @@ SLOW_PATH_DECL(slow_path_next_structure_enumerator_pname) > SLOW_PATH_DECL(slow_path_next_generic_enumerator_pname) > { > BEGIN(); >- JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(OP(2).jsValue().asCell()); >- uint32_t index = OP(3).jsValue().asUInt32(); >+ auto bytecode = pc->as<OpEnumeratorGenericPname>(); >+ JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(GET(bytecode.enumerator).jsValue().asCell()); >+ uint32_t index = GET(bytecode.index).jsValue().asUInt32(); > > JSString* propertyName = nullptr; > if (enumerator->endStructurePropertyIndex() <= index && index < enumerator->endGenericPropertyIndex()) >@@ -857,7 +908,8 @@ SLOW_PATH_DECL(slow_path_next_generic_enumerator_pname) > SLOW_PATH_DECL(slow_path_to_index_string) > { > BEGIN(); >- RETURN(jsString(exec, Identifier::from(exec, OP(2).jsValue().asUInt32()).string())); >+ auto bytecode = pc->as<OpToIndexString>(); >+ RETURN(jsString(exec, Identifier::from(exec, GET(bytecode.index).jsValue().asUInt32()).string())); > } > > SLOW_PATH_DECL(slow_path_profile_type_clear_log) >@@ -877,10 +929,11 @@ SLOW_PATH_DECL(slow_path_unreachable) > SLOW_PATH_DECL(slow_path_create_lexical_environment) > { > BEGIN(); >- int scopeReg = pc[2].u.operand; >+ auto bytecode = pc->as<OpCreateLexicalEnvironment>(); >+ int scopeReg = bytecode.scope.offset(); > JSScope* currentScope = exec->uncheckedR(scopeReg).Register::scope(); >- SymbolTable* symbolTable = jsCast<SymbolTable*>(OP_C(3).jsValue()); >- JSValue initialValue = OP_C(4).jsValue(); >+ SymbolTable* symbolTable = jsCast<SymbolTable*>(GET_C(bytecode.symbolTable).jsValue()); >+ JSValue initialValue = GET_C(bytecode.initialValue).jsValue(); > ASSERT(initialValue == jsUndefined() || initialValue == jsTDZValue()); > JSScope* newScope = JSLexicalEnvironment::create(vm, exec->lexicalGlobalObject(), currentScope, symbolTable, initialValue); > RETURN(newScope); >@@ -889,10 +942,11 @@ SLOW_PATH_DECL(slow_path_create_lexical_environment) > SLOW_PATH_DECL(slow_path_push_with_scope) > { > BEGIN(); >- JSObject* newScope = OP_C(3).jsValue().toObject(exec); >+ auto bytecode = pc->as<OpPushWithScope>(); >+ JSObject* newScope = GET_C(bytecode.newScope).jsValue().toObject(exec); > CHECK_EXCEPTION(); > >- int scopeReg = pc[2].u.operand; >+ int scopeReg = bytecode.currentScope.offset(); > JSScope* currentScope = exec->uncheckedR(scopeReg).Register::scope(); > RETURN(JSWithScope::create(vm, exec->lexicalGlobalObject(), currentScope, newScope)); > } >@@ -900,8 +954,9 @@ SLOW_PATH_DECL(slow_path_push_with_scope) > SLOW_PATH_DECL(slow_path_resolve_scope_for_hoisting_func_decl_in_eval) > { > BEGIN(); >- const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >+ auto bytecode = pc->as<OpResolveScopeForHoistingFuncDeclInEval>(); >+ const Identifier& ident = exec->codeBlock()->identifier(bytecode.property); >+ JSScope* scope = exec->uncheckedR(bytecode.scope.offset()).Register::scope(); > JSValue resolvedScope = JSScope::resolveScopeForHoistingFuncDeclInEval(exec, scope, ident); > > CHECK_EXCEPTION(); >@@ -912,13 +967,15 @@ SLOW_PATH_DECL(slow_path_resolve_scope_for_hoisting_func_decl_in_eval) > SLOW_PATH_DECL(slow_path_resolve_scope) > { > BEGIN(); >- const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); >- JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); >+ auto bytecode = pc->as<OpResolveScope>(); >+ auto& metadata = bytecode.metadata(exec); >+ const Identifier& ident = exec->codeBlock()->identifier(bytecode.var); >+ JSScope* scope = exec->uncheckedR(bytecode.scope.offset()).Register::scope(); > JSObject* resolvedScope = JSScope::resolve(exec, scope, ident); > // Proxy can throw an error here, e.g. Proxy in with statement's @unscopables. > CHECK_EXCEPTION(); > >- ResolveType resolveType = static_cast<ResolveType>(pc[4].u.operand); >+ ResolveType resolveType = metadata.resolveType; > > // ModuleVar does not keep the scope register value alive in DFG. > ASSERT(resolveType != ModuleVar); >@@ -931,20 +988,20 @@ SLOW_PATH_DECL(slow_path_resolve_scope) > if (hasProperty) { > ConcurrentJSLocker locker(exec->codeBlock()->m_lock); > if (resolveType == UnresolvedProperty) >- pc[4].u.operand = GlobalProperty; >+ metadata.resolveType = GlobalProperty; > else >- pc[4].u.operand = GlobalPropertyWithVarInjectionChecks; >+ metadata.resolveType = GlobalPropertyWithVarInjectionChecks; > >- pc[6].u.pointer = globalObject; >+ metadata.globalObject = globalObject; > } > } else if (resolvedScope->isGlobalLexicalEnvironment()) { > JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(resolvedScope); > ConcurrentJSLocker locker(exec->codeBlock()->m_lock); > if (resolveType == UnresolvedProperty) >- pc[4].u.operand = GlobalLexicalVar; >+ metadata.resolveType = GlobalLexicalVar; > else >- pc[4].u.operand = GlobalLexicalVarWithVarInjectionChecks; >- pc[6].u.pointer = globalLexicalEnvironment; >+ metadata.resolveType = GlobalLexicalVarWithVarInjectionChecks; >+ metadata.globalLexicalEnvironment = globalLexicalEnvironment; > } > } > >@@ -954,10 +1011,11 @@ SLOW_PATH_DECL(slow_path_resolve_scope) > SLOW_PATH_DECL(slow_path_create_rest) > { > BEGIN(); >- unsigned arraySize = OP_C(2).jsValue().asUInt32(); >+ auto bytecode = pc->as<OpCreateRest>(); >+ unsigned arraySize = GET_C(bytecode.arraySize).jsValue().asUInt32(); > JSGlobalObject* globalObject = exec->lexicalGlobalObject(); > Structure* structure = globalObject->restParameterStructure(); >- unsigned numParamsToSkip = pc[3].u.unsignedValue; >+ unsigned numParamsToSkip = bytecode.numParametersToSkip; > JSValue* argumentsToCopyRegion = exec->addressOfArgumentsStart() + numParamsToSkip; > RETURN(constructArray(exec, structure, argumentsToCopyRegion, arraySize)); > } >@@ -965,28 +1023,30 @@ SLOW_PATH_DECL(slow_path_create_rest) > SLOW_PATH_DECL(slow_path_get_by_id_with_this) > { > BEGIN(); >- const Identifier& ident = exec->codeBlock()->identifier(pc[4].u.operand); >- JSValue baseValue = OP_C(2).jsValue(); >- JSValue thisVal = OP_C(3).jsValue(); >+ auto bytecode = pc->as<OpGetByIdWithThis>(); >+ const Identifier& ident = exec->codeBlock()->identifier(bytecode.property); >+ JSValue baseValue = GET_C(bytecode.base).jsValue(); >+ JSValue thisVal = GET_C(bytecode.thisValue).jsValue(); > PropertySlot slot(thisVal, PropertySlot::PropertySlot::InternalMethodType::Get); > JSValue result = baseValue.get(exec, ident, slot); >- RETURN_PROFILED(op_get_by_id_with_this, result); >+ RETURN_PROFILED(result); > } > > SLOW_PATH_DECL(slow_path_get_by_val_with_this) > { > BEGIN(); > >- JSValue baseValue = OP_C(2).jsValue(); >- JSValue thisValue = OP_C(3).jsValue(); >- JSValue subscript = OP_C(4).jsValue(); >+ auto bytecode = pc->as<OpGetByValWithThis>(); >+ JSValue baseValue = GET_C(bytecode.base).jsValue(); >+ JSValue thisValue = GET_C(bytecode.thisValue).jsValue(); >+ JSValue subscript = GET_C(bytecode.property).jsValue(); > > if (LIKELY(baseValue.isCell() && subscript.isString())) { > Structure& structure = *baseValue.asCell()->structure(vm); > if (JSCell::canUseFastGetOwnProperty(structure)) { > if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) { > if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get())) >- RETURN_PROFILED(op_get_by_val_with_this, result); >+ RETURN_PROFILED(result); > } > } > } >@@ -995,26 +1055,27 @@ SLOW_PATH_DECL(slow_path_get_by_val_with_this) > if (subscript.isUInt32()) { > uint32_t i = subscript.asUInt32(); > if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) >- RETURN_PROFILED(op_get_by_val_with_this, asString(baseValue)->getIndex(exec, i)); >+ RETURN_PROFILED(asString(baseValue)->getIndex(exec, i)); > >- RETURN_PROFILED(op_get_by_val_with_this, baseValue.get(exec, i, slot)); >+ RETURN_PROFILED(baseValue.get(exec, i, slot)); > } > > baseValue.requireObjectCoercible(exec); > CHECK_EXCEPTION(); > auto property = subscript.toPropertyKey(exec); > CHECK_EXCEPTION(); >- RETURN_PROFILED(op_get_by_val_with_this, baseValue.get(exec, property, slot)); >+ RETURN_PROFILED(baseValue.get(exec, property, slot)); > } > > SLOW_PATH_DECL(slow_path_put_by_id_with_this) > { > BEGIN(); >+ auto bytecode = pc->as<OpPutByIdWithThis>(); > CodeBlock* codeBlock = exec->codeBlock(); >- const Identifier& ident = codeBlock->identifier(pc[3].u.operand); >- JSValue baseValue = OP_C(1).jsValue(); >- JSValue thisVal = OP_C(2).jsValue(); >- JSValue putValue = OP_C(4).jsValue(); >+ const Identifier& ident = codeBlock->identifier(bytecode.property); >+ JSValue baseValue = GET_C(bytecode.base).jsValue(); >+ JSValue thisVal = GET_C(bytecode.thisValue).jsValue(); >+ JSValue putValue = GET_C(bytecode.value).jsValue(); > PutPropertySlot slot(thisVal, codeBlock->isStrictMode(), codeBlock->putByIdContext()); > baseValue.putInline(exec, ident, putValue, slot); > END(); >@@ -1023,10 +1084,11 @@ SLOW_PATH_DECL(slow_path_put_by_id_with_this) > SLOW_PATH_DECL(slow_path_put_by_val_with_this) > { > BEGIN(); >- JSValue baseValue = OP_C(1).jsValue(); >- JSValue thisValue = OP_C(2).jsValue(); >- JSValue subscript = OP_C(3).jsValue(); >- JSValue value = OP_C(4).jsValue(); >+ auto bytecode = pc->as<OpPutByValWithThis>(); >+ JSValue baseValue = GET_C(bytecode.base).jsValue(); >+ JSValue thisValue = GET_C(bytecode.thisValue).jsValue(); >+ JSValue subscript = GET_C(bytecode.property).jsValue(); >+ JSValue value = GET_C(bytecode.value).jsValue(); > > auto property = subscript.toPropertyKey(exec); > CHECK_EXCEPTION(); >@@ -1038,10 +1100,11 @@ SLOW_PATH_DECL(slow_path_put_by_val_with_this) > SLOW_PATH_DECL(slow_path_define_data_property) > { > BEGIN(); >- JSObject* base = asObject(OP_C(1).jsValue()); >- JSValue property = OP_C(2).jsValue(); >- JSValue value = OP_C(3).jsValue(); >- JSValue attributes = OP_C(4).jsValue(); >+ auto bytecode = pc->as<OpDefineDataProperty>(); >+ JSObject* base = asObject(GET_C(bytecode.base).jsValue()); >+ JSValue property = GET_C(bytecode.property).jsValue(); >+ JSValue value = GET_C(bytecode.value).jsValue(); >+ JSValue attributes = GET_C(bytecode.attributes).jsValue(); > ASSERT(attributes.isInt32()); > > auto propertyName = property.toPropertyKey(exec); >@@ -1055,11 +1118,12 @@ SLOW_PATH_DECL(slow_path_define_data_property) > SLOW_PATH_DECL(slow_path_define_accessor_property) > { > BEGIN(); >- JSObject* base = asObject(OP_C(1).jsValue()); >- JSValue property = OP_C(2).jsValue(); >- JSValue getter = OP_C(3).jsValue(); >- JSValue setter = OP_C(4).jsValue(); >- JSValue attributes = OP_C(5).jsValue(); >+ auto bytecode = pc->as<OpDefineAccessorProperty>(); >+ JSObject* base = asObject(GET_C(bytecode.base).jsValue()); >+ JSValue property = GET_C(bytecode.property).jsValue(); >+ JSValue getter = GET_C(bytecode.getter).jsValue(); >+ JSValue setter = GET_C(bytecode.setter).jsValue(); >+ JSValue attributes = GET_C(bytecode.attributes).jsValue(); > ASSERT(attributes.isInt32()); > > auto propertyName = property.toPropertyKey(exec); >@@ -1073,21 +1137,23 @@ SLOW_PATH_DECL(slow_path_define_accessor_property) > SLOW_PATH_DECL(slow_path_throw_static_error) > { > BEGIN(); >- JSValue errorMessageValue = OP_C(1).jsValue(); >+ auto bytecode = pc->as<OpThrowStaticError>(); >+ JSValue errorMessageValue = GET_C(bytecode.message).jsValue(); > RELEASE_ASSERT(errorMessageValue.isString()); > String errorMessage = asString(errorMessageValue)->value(exec); >- ErrorType errorType = static_cast<ErrorType>(pc[2].u.unsignedValue); >+ ErrorType errorType = bytecode.errorType; > THROW(createError(exec, errorType, errorMessage)); > } > > SLOW_PATH_DECL(slow_path_new_array_with_spread) > { > BEGIN(); >- int numItems = pc[3].u.operand; >+ auto bytecode = pc->as<OpNewArrayWithSpread>(); >+ int numItems = bytecode.argc; > ASSERT(numItems >= 0); >- const BitVector& bitVector = exec->codeBlock()->unlinkedCodeBlock()->bitVector(pc[4].u.unsignedValue); >+ const BitVector& bitVector = exec->codeBlock()->unlinkedCodeBlock()->bitVector(bytecode.bitVector); > >- JSValue* values = bitwise_cast<JSValue*>(&OP(2)); >+ JSValue* values = bitwise_cast<JSValue*>(&GET(bytecode.argv)); > > Checked<unsigned, RecordOverflow> checkedArraySize = 0; > for (int i = 0; i < numItems; i++) { >@@ -1136,12 +1202,12 @@ SLOW_PATH_DECL(slow_path_new_array_with_spread) > SLOW_PATH_DECL(slow_path_new_array_buffer) > { > BEGIN(); >- auto* newArrayBuffer = bitwise_cast<OpNewArrayBuffer*>(pc); >- ASSERT(exec->codeBlock()->isConstantRegisterIndex(newArrayBuffer->immutableButterfly())); >- JSImmutableButterfly* immutableButterfly = bitwise_cast<JSImmutableButterfly*>(GET_C(newArrayBuffer->immutableButterfly()).jsValue().asCell()); >- auto* profile = newArrayBuffer->profile(); >+ auto bytecode = pc->as<OpNewArrayBuffer>(); >+ ASSERT(exec->codeBlock()->isConstantRegisterIndex(bytecode.immutableButterfly.offset())); >+ JSImmutableButterfly* immutableButterfly = bitwise_cast<JSImmutableButterfly*>(GET_C(bytecode.immutableButterfly).jsValue().asCell()); >+ auto& profile = bytecode.metadata(exec).allocationProfile; > >- IndexingType indexingMode = profile->selectIndexingType(); >+ IndexingType indexingMode = profile.selectIndexingType(); > Structure* structure = exec->lexicalGlobalObject()->arrayStructureForIndexingTypeDuringAllocation(indexingMode); > ASSERT(isCopyOnWrite(indexingMode)); > ASSERT(!structure->outOfLineCapacity()); >@@ -1157,13 +1223,13 @@ SLOW_PATH_DECL(slow_path_new_array_buffer) > // We also cannot allocate a new butterfly from compilation threads since it's invalid to allocate cells from > // a compilation thread. > WTF::storeStoreFence(); >- codeBlock->constantRegister(newArrayBuffer->immutableButterfly()).set(vm, codeBlock, immutableButterfly); >+ codeBlock->constantRegister(bytecode.immutableButterfly.offset()).set(vm, codeBlock, immutableButterfly); > WTF::storeStoreFence(); > } > > JSArray* result = CommonSlowPaths::allocateNewArrayBuffer(vm, structure, immutableButterfly); > ASSERT(isCopyOnWrite(result->indexingMode()) || exec->lexicalGlobalObject()->isHavingABadTime()); >- ArrayAllocationProfile::updateLastAllocationFor(profile, result); >+ ArrayAllocationProfile::updateLastAllocationFor(&profile, result); > RETURN(result); > } > >@@ -1171,7 +1237,8 @@ SLOW_PATH_DECL(slow_path_spread) > { > BEGIN(); > >- JSValue iterable = OP_C(2).jsValue(); >+ auto bytecode = pc->as<OpSpread>(); >+ JSValue iterable = GET_C(bytecode.argument).jsValue(); > > if (iterable.isCell() && isJSArray(iterable.asCell())) { > JSArray* array = jsCast<JSArray*>(iterable); >diff --git a/Source/JavaScriptCore/runtime/CommonSlowPaths.h b/Source/JavaScriptCore/runtime/CommonSlowPaths.h >index 1ece89592cd63118dd9b89f1b96bd008dd0ab5ed..139bbe58c5e7dc34d728695a37199753f6ad76f9 100644 >--- a/Source/JavaScriptCore/runtime/CommonSlowPaths.h >+++ b/Source/JavaScriptCore/runtime/CommonSlowPaths.h >@@ -25,6 +25,7 @@ > > #pragma once > >+#include "BytecodeStructs.h" > #include "CodeBlock.h" > #include "CodeSpecializationKind.h" > #include "DirectArguments.h" >@@ -114,11 +115,12 @@ inline bool opInByVal(ExecState* exec, JSValue baseVal, JSValue propName, ArrayP > } > > inline void tryCachePutToScopeGlobal( >- ExecState* exec, CodeBlock* codeBlock, Instruction* pc, JSObject* scope, >- GetPutInfo getPutInfo, PutPropertySlot& slot, const Identifier& ident) >+ ExecState* exec, CodeBlock* codeBlock, OpPutToScope& bytecode, JSObject* scope, >+ PutPropertySlot& slot, const Identifier& ident) > { > // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time. >- ResolveType resolveType = getPutInfo.resolveType(); >+ auto& metadata = bytecode.metadata(exec); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); > if (resolveType != GlobalProperty && resolveType != GlobalPropertyWithVarInjectionChecks > && resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) > return; >@@ -127,18 +129,17 @@ inline void tryCachePutToScopeGlobal( > if (scope->isGlobalObject()) { > ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalProperty : GlobalPropertyWithVarInjectionChecks; > resolveType = newResolveType; >- getPutInfo = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()); > ConcurrentJSLocker locker(codeBlock->m_lock); >- pc[4].u.operand = getPutInfo.operand(); >+ metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); > } else if (scope->isGlobalLexicalEnvironment()) { > JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(scope); > ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalLexicalVar : GlobalLexicalVarWithVarInjectionChecks; >- pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand(); >+ metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); > SymbolTableEntry entry = globalLexicalEnvironment->symbolTable()->get(ident.impl()); > ASSERT(!entry.isNull()); > ConcurrentJSLocker locker(codeBlock->m_lock); >- pc[5].u.watchpointSet = entry.watchpointSet(); >- pc[6].u.pointer = static_cast<void*>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot()); >+ metadata.watchpointSet = entry.watchpointSet(); >+ metadata.operand = reinterpret_cast<uintptr_t>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot()); > } > } > >@@ -161,32 +162,32 @@ inline void tryCachePutToScopeGlobal( > scope->structure(vm)->didCachePropertyReplacement(vm, slot.cachedOffset()); > > ConcurrentJSLocker locker(codeBlock->m_lock); >- pc[5].u.structure.set(vm, codeBlock, scope->structure(vm)); >- pc[6].u.operand = slot.cachedOffset(); >+ metadata.structure.set(vm, codeBlock, scope->structure(vm)); >+ metadata.operand = slot.cachedOffset(); > } > } > > inline void tryCacheGetFromScopeGlobal( >- ExecState* exec, VM& vm, Instruction* pc, JSObject* scope, PropertySlot& slot, const Identifier& ident) >+ ExecState* exec, VM& vm, OpGetFromScope& bytecode, JSObject* scope, PropertySlot& slot, const Identifier& ident) > { >- GetPutInfo getPutInfo(pc[4].u.operand); >- ResolveType resolveType = getPutInfo.resolveType(); >+ auto& metadata = bytecode.metadata(exec); >+ ResolveType resolveType = metadata.getPutInfo.resolveType(); > > if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) { > if (scope->isGlobalObject()) { > ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalProperty : GlobalPropertyWithVarInjectionChecks; > resolveType = newResolveType; // Allow below caching mechanism to kick in. > ConcurrentJSLocker locker(exec->codeBlock()->m_lock); >- pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand(); >+ metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); > } else if (scope->isGlobalLexicalEnvironment()) { > JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(scope); > ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalLexicalVar : GlobalLexicalVarWithVarInjectionChecks; > SymbolTableEntry entry = globalLexicalEnvironment->symbolTable()->get(ident.impl()); > ASSERT(!entry.isNull()); > ConcurrentJSLocker locker(exec->codeBlock()->m_lock); >- pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand(); >- pc[5].u.watchpointSet = entry.watchpointSet(); >- pc[6].u.pointer = static_cast<void*>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot()); >+ metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); >+ metadata.watchpointSet = entry.watchpointSet(); >+ metadata.operand = reinterpret_cast<uintptr_t>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot()); > } > } > >@@ -199,8 +200,8 @@ inline void tryCacheGetFromScopeGlobal( > Structure* structure = scope->structure(vm); > { > ConcurrentJSLocker locker(codeBlock->m_lock); >- pc[5].u.structure.set(vm, codeBlock, structure); >- pc[6].u.operand = slot.cachedOffset(); >+ metadata.structure.set(vm, codeBlock, structure); >+ metadata.operand = slot.cachedOffset(); > } > structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset()); > } >@@ -283,7 +284,7 @@ struct Instruction; > #define SLOW_PATH > > #define SLOW_PATH_DECL(name) \ >-extern "C" SlowPathReturnType SLOW_PATH name(ExecState* exec, Instruction* pc) >+extern "C" SlowPathReturnType SLOW_PATH name(ExecState* exec, const Instruction* pc) > > #define SLOW_PATH_HIDDEN_DECL(name) \ > SLOW_PATH_DECL(name) WTF_INTERNAL >@@ -365,6 +366,6 @@ SLOW_PATH_HIDDEN_DECL(slow_path_new_array_with_spread); > SLOW_PATH_HIDDEN_DECL(slow_path_new_array_buffer); > SLOW_PATH_HIDDEN_DECL(slow_path_spread); > >-using SlowPathFunction = SlowPathReturnType(SLOW_PATH *)(ExecState*, Instruction*); >+using SlowPathFunction = SlowPathReturnType(SLOW_PATH *)(ExecState*, const Instruction*); > > } // namespace JSC >diff --git a/Source/JavaScriptCore/runtime/ExceptionFuzz.cpp b/Source/JavaScriptCore/runtime/ExceptionFuzz.cpp >index 238d5b5d370745b00c71c5b27d1e6d59d1c193b8..6a148450dfb59148e37af930d963a2da6bb5308a 100644 >--- a/Source/JavaScriptCore/runtime/ExceptionFuzz.cpp >+++ b/Source/JavaScriptCore/runtime/ExceptionFuzz.cpp >@@ -36,7 +36,7 @@ static unsigned s_numberOfExceptionFuzzChecks; > unsigned numberOfExceptionFuzzChecks() { return s_numberOfExceptionFuzzChecks; } > > // Call this only if you know that exception fuzzing is enabled. >-void doExceptionFuzzing(ExecState* exec, ThrowScope& scope, const char* where, void* returnPC) >+void doExceptionFuzzing(ExecState* exec, ThrowScope& scope, const char* where, const void* returnPC) > { > VM& vm = scope.vm(); > ASSERT(Options::useExceptionFuzz()); >diff --git a/Source/JavaScriptCore/runtime/ExceptionFuzz.h b/Source/JavaScriptCore/runtime/ExceptionFuzz.h >index aab07cfb5dfdecb93e7ba4c95dd732bb7084a698..c694b5766c40e1209b746706fb72628b774e21b0 100644 >--- a/Source/JavaScriptCore/runtime/ExceptionFuzz.h >+++ b/Source/JavaScriptCore/runtime/ExceptionFuzz.h >@@ -33,10 +33,10 @@ class ExecState; > class ThrowScope; > > // Call this only if you know that exception fuzzing is enabled. >-void doExceptionFuzzing(ExecState*, ThrowScope&, const char* where, void* returnPC); >+void doExceptionFuzzing(ExecState*, ThrowScope&, const char* where, const void* returnPC); > > // This is what you should call if you don't know if fuzzing is enabled. >-ALWAYS_INLINE void doExceptionFuzzingIfEnabled(ExecState* exec, ThrowScope& scope, const char* where, void* returnPC) >+ALWAYS_INLINE void doExceptionFuzzingIfEnabled(ExecState* exec, ThrowScope& scope, const char* where, const void* returnPC) > { > if (LIKELY(!Options::useExceptionFuzz())) > return; >diff --git a/Source/JavaScriptCore/runtime/GetPutInfo.cpp b/Source/JavaScriptCore/runtime/GetPutInfo.cpp >new file mode 100644 >index 0000000000000000000000000000000000000000..aa07c6626a742860e7b07372cf4bf3b1324e8b98 >--- /dev/null >+++ b/Source/JavaScriptCore/runtime/GetPutInfo.cpp >@@ -0,0 +1,48 @@ >+/* >+ * Copyright (C) 2018 Apple Inc. All Rights Reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#include "config.h" >+#include "GetPutInfo.h" >+ >+#include <wtf/PrintStream.h> >+ >+namespace WTF { >+ >+void printInternal(PrintStream& out, JSC::ResolveMode mode) >+{ >+ out.print(resolveModeName(mode)); >+} >+ >+void printInternal(PrintStream& out, JSC::ResolveType type) >+{ >+ out.print(resolveTypeName(type)); >+} >+ >+void printInternal(PrintStream& out, JSC::InitializationMode mode) >+{ >+ out.print(initializationModeName(mode)); >+} >+ >+} // namespace WTF >diff --git a/Source/JavaScriptCore/runtime/GetPutInfo.h b/Source/JavaScriptCore/runtime/GetPutInfo.h >index b7acb18357fe788d0f122a524760d8fc2ba1b1d3..ecc811da2ce5fb64b71cce7d4ea1af24a8a0c40c 100644 >--- a/Source/JavaScriptCore/runtime/GetPutInfo.h >+++ b/Source/JavaScriptCore/runtime/GetPutInfo.h >@@ -1,5 +1,5 @@ > /* >- * Copyright (C) 2015 Apple Inc. All Rights Reserved. >+ * Copyright (C) 2018 Apple Inc. All Rights Reserved. > * > * Redistribution and use in source and binary forms, with or without > * modification, are permitted provided that the following conditions >@@ -223,7 +223,13 @@ public: > ResolveType resolveType() const { return static_cast<ResolveType>(m_operand & typeBits); } > InitializationMode initializationMode() const { return static_cast<InitializationMode>((m_operand & initializationBits) >> initializationShift); } > ResolveMode resolveMode() const { return static_cast<ResolveMode>((m_operand & modeBits) >> modeShift); } >- unsigned operand() { return m_operand; } >+ unsigned operand() const { return m_operand; } >+ >+ void dump(PrintStream& out) const >+ { >+ out.print(operand(), "<", resolveMode(), "|", resolveType(), "|", initializationMode(), ">, "); >+ >+ } > > private: > Operand m_operand; >@@ -232,3 +238,13 @@ private: > enum GetOrPut { Get, Put }; > > } // namespace JSC >+ >+namespace WTF { >+ >+class PrintStream; >+ >+void printInternal(PrintStream&, JSC::ResolveMode); >+void printInternal(PrintStream&, JSC::ResolveType); >+void printInternal(PrintStream&, JSC::InitializationMode); >+ >+} // namespace WTF >diff --git a/Source/JavaScriptCore/runtime/JSCPoison.h b/Source/JavaScriptCore/runtime/JSCPoison.h >index cfb52cd6ee5b51f1d9bfce918a61ba8df84332e3..1db4ac6d5463019d9fc6fa11883107c851da840d 100644 >--- a/Source/JavaScriptCore/runtime/JSCPoison.h >+++ b/Source/JavaScriptCore/runtime/JSCPoison.h >@@ -69,7 +69,7 @@ FOR_EACH_JSC_POISON(DECLARE_POISON) > struct ClassInfo; > > using PoisonedClassInfoPtr = Poisoned<GlobalDataPoison, const ClassInfo*>; >-using PoisonedMasmPtr = Poisoned<JITCodePoison, void*>; >+using PoisonedMasmPtr = Poisoned<JITCodePoison, const void*>; > > void initializePoison(); > >diff --git a/Source/JavaScriptCore/runtime/JSType.cpp b/Source/JavaScriptCore/runtime/JSType.cpp >new file mode 100644 >index 0000000000000000000000000000000000000000..ac9f4d8a451009481d996e94328c942b048fe0dc >--- /dev/null >+++ b/Source/JavaScriptCore/runtime/JSType.cpp >@@ -0,0 +1,103 @@ >+/* >+ * Copyright (C) 2006-2018 Apple Inc. All rights reserved. >+ * >+ * This library is free software; you can redistribute it and/or >+ * modify it under the terms of the GNU Library General Public >+ * License as published by the Free Software Foundation; either >+ * version 2 of the License, or (at your option) any later version. >+ * >+ * This library is distributed in the hope that it will be useful, >+ * but WITHOUT ANY WARRANTY; without even the implied warranty of >+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >+ * Library General Public License for more details. >+ * >+ * You should have received a copy of the GNU Library General Public License >+ * along with this library; see the file COPYING.LIB. If not, write to >+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, >+ * Boston, MA 02110-1301, USA. >+ * >+ */ >+ >+#include "config.h" >+#include "JSType.h" >+ >+#include <wtf/PrintStream.h> >+ >+namespace WTF { >+ >+class PrintStream; >+ >+#define CASE(__type) \ >+ case JSC::__type: \ >+ out.print(#__type); \ >+ return; >+ >+void printInternal(PrintStream& out, JSC::JSType type) >+{ >+ switch (type) { >+ CASE(CellType) >+ CASE(StringType) >+ CASE(SymbolType) >+ CASE(BigIntType) >+ CASE(CustomGetterSetterType) >+ CASE(APIValueWrapperType) >+ CASE(ProgramExecutableType) >+ CASE(ModuleProgramExecutableType) >+ CASE(EvalExecutableType) >+ CASE(FunctionExecutableType) >+ CASE(UnlinkedFunctionExecutableType) >+ CASE(UnlinkedProgramCodeBlockType) >+ CASE(UnlinkedModuleProgramCodeBlockType) >+ CASE(UnlinkedEvalCodeBlockType) >+ CASE(UnlinkedFunctionCodeBlockType) >+ CASE(CodeBlockType) >+ CASE(JSFixedArrayType) >+ CASE(JSImmutableButterflyType) >+ CASE(JSSourceCodeType) >+ CASE(JSScriptFetcherType) >+ CASE(JSScriptFetchParametersType) >+ CASE(ObjectType) >+ CASE(FinalObjectType) >+ CASE(JSCalleeType) >+ CASE(JSFunctionType) >+ CASE(InternalFunctionType) >+ CASE(NumberObjectType) >+ CASE(ErrorInstanceType) >+ CASE(PureForwardingProxyType) >+ CASE(ImpureProxyType) >+ CASE(DirectArgumentsType) >+ CASE(ScopedArgumentsType) >+ CASE(ClonedArgumentsType) >+ CASE(ArrayType) >+ CASE(DerivedArrayType) >+ CASE(Int8ArrayType) >+ CASE(Uint8ArrayType) >+ CASE(Uint8ClampedArrayType) >+ CASE(Int16ArrayType) >+ CASE(Uint16ArrayType) >+ CASE(Int32ArrayType) >+ CASE(Uint32ArrayType) >+ CASE(Float32ArrayType) >+ CASE(Float64ArrayType) >+ CASE(DataViewType) >+ CASE(GetterSetterType) >+ CASE(GlobalObjectType) >+ CASE(GlobalLexicalEnvironmentType) >+ CASE(LexicalEnvironmentType) >+ CASE(ModuleEnvironmentType) >+ CASE(StrictEvalActivationType) >+ CASE(WithScopeType) >+ CASE(RegExpObjectType) >+ CASE(ProxyObjectType) >+ CASE(JSMapType) >+ CASE(JSSetType) >+ CASE(JSWeakMapType) >+ CASE(JSWeakSetType) >+ CASE(WebAssemblyToJSCalleeType) >+ CASE(MaxJSType) >+ } >+} >+ >+#undef CASE >+ >+} // namespace WTF >diff --git a/Source/JavaScriptCore/runtime/JSType.h b/Source/JavaScriptCore/runtime/JSType.h >index 0cccfacea45af5f08e12066322e994def8ee0ddf..bef1fa3878f26137b5d6a3a0cdcf228a98441882 100644 >--- a/Source/JavaScriptCore/runtime/JSType.h >+++ b/Source/JavaScriptCore/runtime/JSType.h >@@ -129,3 +129,11 @@ static_assert(sizeof(JSType) == sizeof(uint8_t), "sizeof(JSType) is one byte."); > static_assert(LastJSCObjectType < 128, "The highest bit is reserved for embedder's extension."); > > } // namespace JSC >+ >+namespace WTF { >+ >+class PrintStream; >+ >+void printInternal(PrintStream&, JSC::JSType); >+ >+} // namespace WTF >diff --git a/Source/JavaScriptCore/runtime/SamplingProfiler.cpp b/Source/JavaScriptCore/runtime/SamplingProfiler.cpp >index 67fbbbc57400eb5536d688614354a487f4779c57..b54db2065e0f9287c6765ac88695ea6a31476d85 100644 >--- a/Source/JavaScriptCore/runtime/SamplingProfiler.cpp >+++ b/Source/JavaScriptCore/runtime/SamplingProfiler.cpp >@@ -750,7 +750,7 @@ String SamplingProfiler::StackFrame::displayName(VM& vm) > if (frameType == FrameType::Unknown || frameType == FrameType::C) { > #if HAVE(DLADDR) > if (frameType == FrameType::C) { >- auto demangled = WTF::StackTrace::demangle(cCodePC); >+ auto demangled = WTF::StackTrace::demangle(const_cast<void*>(cCodePC)); > if (demangled) > return String(demangled->demangledName() ? demangled->demangledName() : demangled->mangledName()); > WTF::dataLog("couldn't get a name"); >diff --git a/Source/JavaScriptCore/runtime/SamplingProfiler.h b/Source/JavaScriptCore/runtime/SamplingProfiler.h >index 65bea9f13e6e3cc0bf5a325b0e27642f1dbb0f3d..f9f406ee6b7a1cc907d27ffde1c30548abc4132d 100644 >--- a/Source/JavaScriptCore/runtime/SamplingProfiler.h >+++ b/Source/JavaScriptCore/runtime/SamplingProfiler.h >@@ -53,13 +53,13 @@ public: > , callSiteIndex(callSiteIndex) > { } > >- UnprocessedStackFrame(void* pc) >+ UnprocessedStackFrame(const void* pc) > : cCodePC(pc) > { } > > UnprocessedStackFrame() = default; > >- void* cCodePC { nullptr }; >+ const void* cCodePC { nullptr }; > CalleeBits unverifiedCallee; > CodeBlock* verifiedCodeBlock { nullptr }; > CallSiteIndex callSiteIndex; >@@ -82,7 +82,7 @@ public: > { } > > FrameType frameType { FrameType::Unknown }; >- void* cCodePC { nullptr }; >+ const void* cCodePC { nullptr }; > ExecutableBase* executable { nullptr }; > JSObject* callee { nullptr }; > >diff --git a/Source/JavaScriptCore/runtime/SlowPathReturnType.h b/Source/JavaScriptCore/runtime/SlowPathReturnType.h >index 4cc59a8f7510f8f280f99115579ace25312d2bcb..1d6042bddf7a2aa4796fb87e2c28e824299da42b 100644 >--- a/Source/JavaScriptCore/runtime/SlowPathReturnType.h >+++ b/Source/JavaScriptCore/runtime/SlowPathReturnType.h >@@ -34,11 +34,11 @@ namespace JSC { > // 'extern "C"') needs to be POD; hence putting any constructors into it could cause either compiler > // warnings, or worse, a change in the ABI used to return these types. > struct SlowPathReturnType { >- void* a; >- void* b; >+ const void* a; >+ const void* b; > }; > >-inline SlowPathReturnType encodeResult(void* a, void* b) >+inline SlowPathReturnType encodeResult(const void* a, const void* b) > { > SlowPathReturnType result; > result.a = a; >@@ -46,7 +46,7 @@ inline SlowPathReturnType encodeResult(void* a, void* b) > return result; > } > >-inline void decodeResult(SlowPathReturnType result, void*& a, void*& b) >+inline void decodeResult(SlowPathReturnType result, const void*& a, const void*& b) > { > a = result.a; > b = result.b; >@@ -57,13 +57,13 @@ typedef int64_t SlowPathReturnType; > > typedef union { > struct { >- void* a; >- void* b; >+ const void* a; >+ const void* b; > } pair; > int64_t i; > } SlowPathReturnTypeEncoding; > >-inline SlowPathReturnType encodeResult(void* a, void* b) >+inline SlowPathReturnType encodeResult(const void* a, const void* b) > { > SlowPathReturnTypeEncoding u; > u.pair.a = a; >@@ -71,7 +71,7 @@ inline SlowPathReturnType encodeResult(void* a, void* b) > return u.i; > } > >-inline void decodeResult(SlowPathReturnType result, void*& a, void*& b) >+inline void decodeResult(SlowPathReturnType result, const void*& a, const void*& b) > { > SlowPathReturnTypeEncoding u; > u.i = result; >diff --git a/Source/JavaScriptCore/runtime/VM.h b/Source/JavaScriptCore/runtime/VM.h >index 187c0fc8509193889afbc23043993068e00ac5ad..c05f5b8efad5a920e2fe5fdc64117c233d511f83 100644 >--- a/Source/JavaScriptCore/runtime/VM.h >+++ b/Source/JavaScriptCore/runtime/VM.h >@@ -717,7 +717,7 @@ public: > ExecState* newCallFrameReturnValue; > ExecState* callFrameForCatch; > void* targetMachinePCForThrow; >- Instruction* targetInterpreterPCForThrow; >+ const Instruction* targetInterpreterPCForThrow; > uint32_t osrExitIndex; > void* osrExitJumpDestination; > bool isExecutingInRegExpJIT { false }; >diff --git a/Source/JavaScriptCore/runtime/Watchdog.h b/Source/JavaScriptCore/runtime/Watchdog.h >index 963a28720ee03d277f1319d64fe06d01665802b2..cc8f91bccf57f170b1e161d504a918bbb5e7d04a 100644 >--- a/Source/JavaScriptCore/runtime/Watchdog.h >+++ b/Source/JavaScriptCore/runtime/Watchdog.h >@@ -26,6 +26,7 @@ > #pragma once > > #include <wtf/Lock.h> >+#include <wtf/MonotonicTime.h> > #include <wtf/Ref.h> > #include <wtf/ThreadSafeRefCounted.h> > #include <wtf/WorkQueue.h> >diff --git a/Source/JavaScriptCore/wip_bytecode/README.md b/Source/JavaScriptCore/wip_bytecode/README.md >new file mode 100644 >index 0000000000000000000000000000000000000000..dfd11654f7b196b89392d674711c5a383a4b74ab >--- /dev/null >+++ b/Source/JavaScriptCore/wip_bytecode/README.md >@@ -0,0 +1,151 @@ >+# Bytecode format >+ >++--------------+ >+| header | >++==============+ >+| instruction0 | >++--------------+ >+| instruction1 | >++--------------+ >+| ... | >++--------------+ >+| instructionN | >++--------------+ >+ >+## Header >+ >++--------------+ >+|num_parameters| >++--------------+ >+| has_metadata | >++--------------+ >+| count_op1 | >++--------------+ >+| ... | >++--------------+ >+| count_opN | >++--------------+ >+| liveness | >++--------------+ >+| global_info | >++--------------+ >+| constants | >++--------------+ >+ >+* `has_metada` is a BitMap that indicates which opcodes need side table entries >+* `count_opI` is a varible length unsigned number that indicates how many entries are necessary for opcode I. >+ >+Given that we currently have < 256 opcodes, the BitMap should fit in 4 bytes. >+Of all opcodes, ~40 will currently ever need metadata, so that if the bytecode for any CodeBlock uses all of this opcodes, it would an extra 40~160b, depending on how many instances of each opcode appear in the bytecode. >+ >+## Instruction >+ >+Instructions have variable length, and have the form >+ >++-----------+------+-----+------+------------+ >+| opcode_id | arg0 | ... | argN | metadataID | >++-----------+------+-----+------+------------+ >+ >+where N <= 0 and metadataID is optional >+ >+### Narrow Instructions >+ >+By the default, we try to encode every instruction in a narrow setting, where every segment has 1-byte. However, we will fall back to a "Wide Instruction" whenever any of the arguments overflows, i.e.: >+ >+* opcode_id: we currently have 167 opcodes, so this won't be a problem for now but, hypothetically, any opcodes beyond id 256 will have to be encoded as a wide instruction. >+* arg: the type of the operand should never be ambiguous, therefore we support: >+ + up to 256 of each of the following: local registers, constants and arguments >+ + up to 8-byte types: we'll attempt to fit integers and unsigned integers in 8 bytes, otherwise fallback to a wide instruction. >+* up to 256 metadata entries per opcode, i.e. if an opcode has metadata, only 256 instances of the same opcode will fit into the same CodeBlock. >+ >+### Wide Instructions >+ >+Wide instructions have 4-byte segments, but otherwise indistinguishable from narrow instructions. >+ >+We reserve the first opcode to a trampoline that will evaluate the next instruction as a "Wide Instruction", where each segment of the instruction has 4 bytes. This opcode will also be responsible to guaranteeing 4-byte alignment on ARM. >+ >+## API >+ >+A class/struct will be generated for each opcode. The struct wil be responsible for: >+* Encoding, e.g. dumping the instruction into a binary format, and choosing between narrow or wide encoding >+* Providing access to each of the instruction's arguments and metadata >+* Potentially allow dumping the instruction, simplifying the work done by the BytecodeDumper >+ >+Here's what the API may look like for each of this operations, for e.g. the `op_get_argument` (this opcode should be a good example, since it has multiple argument types and metadata). Here is its current declaration (syntax may still change) >+ >+```ruby >+op :get_argument, >+ args: { >+ dst: :Register, >+ index: :unsigned, >+ }, >+ metadata: { >+ profile: :ValueProfile, >+ } >+``` >+ >+### Encoding >+ >+```cpp >+static void OpGetArgument::create(BytecodeGenerator& generator RegisterID* register, unsigned index); >+``` >+ >+ >+### Field Access >+ >+```cpp >+RegisterID OpGetArgument::dst(); >+unsigned OpGetArgument::index(); >+``` >+ >+### Metadata Acess >+```cpp >+ValueProfile* OpGetArgument::profile(ExecState&); >+``` >+ >+### BytecodeDumper >+ >+```cpp >+void OpGetArguments::dump(BytecodeDumper&); >+``` >+ >+### Decoding >+ >+Decoding should be done by the base instruction/reader class. >+ >+```cpp >+Instruction::Unknown* Instruction::read(UnlinkedInstructionStream::Reader&); >+``` >+ >+## "Linking" >+ >+Linking, in its current form, should no longer be necessary. Instead, it will consist of creating the side table for the bytecode metadata and ensuring that the jump table with the offset for each opcode has been initialized. >+ >+### Side table >+ >+A callee-saved register pointing to the current CodeBlock's can be kept at all times to speed up metadata accesses that are necessary specially for profiling. >+ >+### Jump table >+ >+A mapping from opcode IDs to opcode addresses is already generated in InitBytecodes.asm and loaded by LLIntData. >+ >+## Portability >+ >+Due to different alignment requirements, the bytecode should not portable across different platforms. >+Does enabling the JIT affect the bytecode? Possibly not, since it may only affect the metadata and not the bytecode itself, but TBC. >+ >+## Performance >+ >+Removing the linking step means that the interpreter will no longer be direct-threaded. Disabling COMPUTED_GOTO in CLoop (in order to disable direct threading) shows a 1% regression on PLT. >+ >+However, CLoop's fallback implementation is a switch statement, which affects branch prediction. >+ >+Alternatively, hacking JSC to skip replacing opcodes with their addresses during linking and modifying the dispatch macro in CLoop to fetch opcodes addresses shows a ~1% progression over CLoop with COMPUTED_GOTO enabled. >+ >+### get_by_id >+ >+`get_by_id` is the instruction that will require the most change, since we currently rewrite the bytecode stream to select from multiple implementations that share the same size. We can default to trying the most performance critical version of `get_by_id` first and fallback to loading the metadata field that specifies which version of the opcode should we execute. >+ >+# Current issues >+ >+Forward jumps will always generate wide opcodes: UINT_MAX is used as invalidLocation, which means that the address won't fit into a 1-byte operand. We might need to compact it later. >diff --git a/Source/WTF/wtf/Vector.h b/Source/WTF/wtf/Vector.h >index 306f46249d257eb365efd2cb439d5daebc2d38d9..e5d76423495d3cf4b4208e069aef1662df689fdf 100644 >--- a/Source/WTF/wtf/Vector.h >+++ b/Source/WTF/wtf/Vector.h >@@ -780,7 +780,7 @@ public: > > template<typename U> void insert(size_t position, const U*, size_t); > template<typename U> void insert(size_t position, U&&); >- template<typename U, size_t c> void insertVector(size_t position, const Vector<U, c>&); >+ template<typename U, size_t c, typename OH> void insertVector(size_t position, const Vector<U, c, OH>&); > > void remove(size_t position); > void remove(size_t position, size_t length); >@@ -1436,8 +1436,8 @@ inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insert(size > } > > template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> >-template<typename U, size_t c> >-inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insertVector(size_t position, const Vector<U, c>& val) >+template<typename U, size_t c, typename OH> >+inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insertVector(size_t position, const Vector<U, c, OH>& val) > { > insert(position, val.begin(), val.size()); > }
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 187373
:
344389
|
344531
|
344635
|
344935
|
345812
|
346138
|
346673
|
346756
|
346862
|
347641
|
347766
|
348149
|
348294
|
348572
|
348792
|
348847
|
348971
|
349051
|
349080
|
349211
|
349307
|
349396
|
349473
|
349594
|
349700
|
349991
|
350040
|
350625
|
350716
|
350743
|
350835
|
350888
|
350987
|
351708
|
351743
|
351841
|
351955
|
351964
|
351995
|
352037
|
352050
|
352126
|
352232
|
352267
|
352268
|
352284
|
352287
|
352288
|
352312
|
352319
|
352322
|
352565
|
352580
|
352600
|
352639
|
352651
|
352664
|
352677
|
352680
|
352689
|
352692
|
352707
|
352719
|
352750
|
352806
|
352809
|
352811
|
352823
|
352843
|
352852
|
352853
|
352861
|
352863
|
352865
|
352866
|
352868
|
352913
|
352926
|
352936
|
352948
|
352981
|
352988
|
352993
|
352999
|
353008
|
353009
|
353033
|
353166
|
353170
|
353199
|
353213
|
353227
|
353235