WebKit Bugzilla
Attachment 370215 Details for
Bug 197979
: [JSC] Implement op_wide16 / op_wide32 and introduce 16bit version bytecode
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-197979-20190518192057.patch (text/plain), 66.81 KB, created by
Yusuke Suzuki
on 2019-05-18 19:20:58 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
Yusuke Suzuki
Created:
2019-05-18 19:20:58 PDT
Size:
66.81 KB
patch
obsolete
>Subversion Revision: 245500 >diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog >index 547bd5233c9107bc7b5a9b844d7f9358126209c5..6479c6ab6ac3538179e637d66d494533b0999bae 100644 >--- a/Source/JavaScriptCore/ChangeLog >+++ b/Source/JavaScriptCore/ChangeLog >@@ -1,3 +1,74 @@ >+2019-05-18 Tadeu Zagallo <tzagallo@apple.com> and Yusuke Suzuki <ysuzuki@apple.com> >+ >+ [JSC] Implement op_wide16 / op_wide32 and introduce 16bit version bytecode >+ https://bugs.webkit.org/show_bug.cgi?id=197979 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ * bytecode/BytecodeDumper.cpp: >+ (JSC::BytecodeDumper<Block>::dumpBlock): >+ * bytecode/BytecodeList.rb: >+ * bytecode/BytecodeUseDef.h: >+ (JSC::computeUsesForBytecodeOffset): >+ (JSC::computeDefsForBytecodeOffset): >+ * bytecode/CodeBlock.cpp: >+ (JSC::CodeBlock::finishCreation): >+ * bytecode/Fits.h: >+ * bytecode/Instruction.h: >+ (JSC::Instruction::opcodeID const): >+ (JSC::Instruction::isWide16 const): >+ (JSC::Instruction::isWide32 const): >+ (JSC::Instruction::hasMetadata const): >+ (JSC::Instruction::width const): >+ (JSC::Instruction::size const): >+ (JSC::Instruction::wide16 const): >+ (JSC::Instruction::wide32 const): >+ (JSC::Instruction::isWide const): Deleted. >+ (JSC::Instruction::wide const): Deleted. >+ * bytecode/InstructionStream.h: >+ (JSC::InstructionStreamWriter::write): >+ * bytecode/Opcode.h: >+ * bytecode/OpcodeSize.h: >+ * bytecompiler/BytecodeGenerator.cpp: >+ (JSC::BytecodeGenerator::alignWideOpcode16): >+ (JSC::BytecodeGenerator::alignWideOpcode32): >+ (JSC::BytecodeGenerator::emitGetByVal): >+ (JSC::BytecodeGenerator::emitYieldPoint): >+ (JSC::StructureForInContext::finalize): >+ (JSC::BytecodeGenerator::alignWideOpcode): Deleted. >+ * bytecompiler/BytecodeGenerator.h: >+ (JSC::BytecodeGenerator::write): >+ * dfg/DFGCapabilities.cpp: >+ (JSC::DFG::capabilityLevel): >+ * generator/Argument.rb: >+ * generator/DSL.rb: >+ * generator/Opcode.rb: >+ * generator/Section.rb: >+ * jit/JITExceptions.cpp: >+ (JSC::genericUnwind): >+ * llint/LLIntData.cpp: >+ (JSC::LLInt::initialize): >+ * llint/LLIntData.h: >+ (JSC::LLInt::opcodeMapWide16): >+ (JSC::LLInt::opcodeMapWide32): >+ (JSC::LLInt::getOpcodeWide16): >+ (JSC::LLInt::getOpcodeWide32): >+ (JSC::LLInt::getWide16CodePtr): >+ (JSC::LLInt::getWide32CodePtr): >+ (JSC::LLInt::opcodeMapWide): Deleted. >+ (JSC::LLInt::getOpcodeWide): Deleted. >+ (JSC::LLInt::getWideCodePtr): Deleted. >+ * llint/LLIntSlowPaths.cpp: >+ (JSC::LLInt::LLINT_SLOW_PATH_DECL): >+ * llint/LLIntSlowPaths.h: >+ * llint/LowLevelInterpreter.asm: >+ * llint/LowLevelInterpreter.cpp: >+ (JSC::CLoop::execute): >+ * llint/LowLevelInterpreter32_64.asm: >+ * llint/LowLevelInterpreter64.asm: >+ * offlineasm/arm64.rb: >+ * offlineasm/x86.rb: >+ > 2019-05-18 Tadeu Zagallo <tzagallo@apple.com> > > Add extra information to dumpJITMemory >diff --git a/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp b/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >index 721d390552c92a8b3a264b08e9525812f8244f8e..ec31ef0e1f24d40e42bb23b993a4c8353e54ebd9 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >+++ b/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >@@ -193,22 +193,26 @@ template<class Block> > void BytecodeDumper<Block>::dumpBlock(Block* block, const InstructionStream& instructions, PrintStream& out, const ICStatusMap& statusMap) > { > size_t instructionCount = 0; >- size_t wideInstructionCount = 0; >+ size_t wide16InstructionCount = 0; >+ size_t wide32InstructionCount = 0; > size_t instructionWithMetadataCount = 0; > > for (const auto& instruction : instructions) { >- if (instruction->isWide()) >- ++wideInstructionCount; >- if (instruction->opcodeID() < NUMBER_OF_BYTECODE_WITH_METADATA) >+ if (instruction->isWide16()) >+ ++wide16InstructionCount; >+ else if (instruction->isWide32()) >+ ++wide32InstructionCount; >+ if (instruction->hasMetadata()) > ++instructionWithMetadataCount; > ++instructionCount; > } > > out.print(*block); > out.printf( >- ": %lu instructions (%lu wide instructions, %lu instructions with metadata); %lu bytes (%lu metadata bytes); %d parameter(s); %d callee register(s); %d variable(s)", >+ ": %lu instructions (%lu 16-byte instructions, %lu 32-byte instructions, %lu instructions with metadata); %lu bytes (%lu metadata bytes); %d parameter(s); %d callee register(s); %d variable(s)", > static_cast<unsigned long>(instructionCount), >- static_cast<unsigned long>(wideInstructionCount), >+ static_cast<unsigned long>(wide16InstructionCount), >+ static_cast<unsigned long>(wide32InstructionCount), > static_cast<unsigned long>(instructionWithMetadataCount), > static_cast<unsigned long>(instructions.sizeInBytes() + block->metadataSizeInBytes()), > static_cast<unsigned long>(block->metadataSizeInBytes()), >diff --git a/Source/JavaScriptCore/bytecode/BytecodeList.rb b/Source/JavaScriptCore/bytecode/BytecodeList.rb >index 0695a2507b62397649a87989616755c0939377ab..120e707aa12737c0398cd7473797d00f8f0e6849 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeList.rb >+++ b/Source/JavaScriptCore/bytecode/BytecodeList.rb >@@ -82,7 +82,8 @@ > asm_prefix: "llint_", > op_prefix: "op_" > >-op :wide >+op :wide16 >+op :wide32 > > op :enter > >@@ -1146,6 +1147,17 @@ > op :llint_cloop_did_return_from_js_21 > op :llint_cloop_did_return_from_js_22 > op :llint_cloop_did_return_from_js_23 >+op :llint_cloop_did_return_from_js_24 >+op :llint_cloop_did_return_from_js_25 >+op :llint_cloop_did_return_from_js_26 >+op :llint_cloop_did_return_from_js_27 >+op :llint_cloop_did_return_from_js_28 >+op :llint_cloop_did_return_from_js_29 >+op :llint_cloop_did_return_from_js_30 >+op :llint_cloop_did_return_from_js_31 >+op :llint_cloop_did_return_from_js_32 >+op :llint_cloop_did_return_from_js_33 >+op :llint_cloop_did_return_from_js_34 > > end_section :CLoopHelpers > >diff --git a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >index 5718b5bd31faf54188dfad27cc30c34ea14d0764..4962c7618044bb147a63a118473d13285391f2f1 100644 >--- a/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >+++ b/Source/JavaScriptCore/bytecode/BytecodeUseDef.h >@@ -68,7 +68,8 @@ void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Ins > }; > > switch (opcodeID) { >- case op_wide: >+ case op_wide16: >+ case op_wide32: > RELEASE_ASSERT_NOT_REACHED(); > > // No uses. >@@ -289,7 +290,8 @@ template<typename Block, typename Functor> > void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor) > { > switch (opcodeID) { >- case op_wide: >+ case op_wide16: >+ case op_wide32: > RELEASE_ASSERT_NOT_REACHED(); > > // These don't define anything. >diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp >index bc9ed129def31a37d4f78451c3a167a71b8dee09..0aa0914da8b2b1101d81bbb241ca47087d4d1efb 100644 >--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp >+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp >@@ -445,9 +445,14 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink > const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i); > HandlerInfo& handler = m_rareData->m_exceptionHandlers[i]; > #if ENABLE(JIT) >- MacroAssemblerCodePtr<BytecodePtrTag> codePtr = instructions().at(unlinkedHandler.target)->isWide() >- ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch) >- : LLInt::getCodePtr<BytecodePtrTag>(op_catch); >+ auto instruction = instructions().at(unlinkedHandler.target); >+ MacroAssemblerCodePtr<BytecodePtrTag> codePtr; >+ if (instruction->isWide32()) >+ codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch); >+ else if (instruction->isWide16()) >+ codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch); >+ else >+ codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch); > handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>())); > #else > handler.initialize(unlinkedHandler); >diff --git a/Source/JavaScriptCore/bytecode/Fits.h b/Source/JavaScriptCore/bytecode/Fits.h >index 24d7757c979465ef28018ad3b7c91f339c82a1d7..637eebda852914c78c17a1e7a1f3b7486f4e78ac 100644 >--- a/Source/JavaScriptCore/bytecode/Fits.h >+++ b/Source/JavaScriptCore/bytecode/Fits.h >@@ -51,123 +51,127 @@ struct Fits; > // Implicit conversion for types of the same size > template<typename T, OpcodeSize size> > struct Fits<T, size, std::enable_if_t<sizeof(T) == size, std::true_type>> { >- static bool check(T) { return true; } >- >- static typename TypeBySize<size>::type convert(T t) { return bitwise_cast<typename TypeBySize<size>::type>(t); } >- >- template<class T1 = T, OpcodeSize size1 = size, typename = std::enable_if_t<!std::is_same<T1, typename TypeBySize<size1>::type>::value, std::true_type>> >- static T1 convert(typename TypeBySize<size1>::type t) { return bitwise_cast<T1>(t); } >-}; >+ using TargetType = typename TypeBySize<size>::unsignedType; > >-template<typename T, OpcodeSize size> >-struct Fits<T, size, std::enable_if_t<sizeof(T) < size, std::true_type>> { > static bool check(T) { return true; } > >- static typename TypeBySize<size>::type convert(T t) { return static_cast<typename TypeBySize<size>::type>(t); } >+ static TargetType convert(T t) { return bitwise_cast<TargetType>(t); } > >- template<class T1 = T, OpcodeSize size1 = size, typename = std::enable_if_t<!std::is_same<T1, typename TypeBySize<size1>::type>::value, std::true_type>> >- static T1 convert(typename TypeBySize<size1>::type t) { return static_cast<T1>(t); } >+ template<class T1 = T, OpcodeSize size1 = size, typename = std::enable_if_t<!std::is_same<T1, TargetType>::value, std::true_type>> >+ static T1 convert(TargetType t) { return bitwise_cast<T1>(t); } > }; > >-template<> >-struct Fits<uint32_t, OpcodeSize::Narrow> { >- static bool check(unsigned u) { return u <= UINT8_MAX; } >+template<typename T, OpcodeSize size> >+struct Fits<T, size, std::enable_if_t<std::is_integral<T>::value && sizeof(T) != size, std::true_type>> { >+ using TargetType = std::conditional_t<std::is_unsigned<T>::value, typename TypeBySize<size>::unsignedType, typename TypeBySize<size>::signedType>; > >- static uint8_t convert(unsigned u) >- { >- ASSERT(check(u)); >- return static_cast<uint8_t>(u); >- } >- static unsigned convert(uint8_t u) >+ static bool check(T t) > { >- return u; >+ return t >= std::numeric_limits<TargetType>::min() && t <= std::numeric_limits<TargetType>::max(); > } >-}; > >-template<> >-struct Fits<int, OpcodeSize::Narrow> { >- static bool check(int i) >+ static TargetType convert(T t) > { >- return i >= INT8_MIN && i <= INT8_MAX; >+ ASSERT(check(t)); >+ return static_cast<TargetType>(t); > } > >- static uint8_t convert(int i) >- { >- ASSERT(check(i)); >- return static_cast<uint8_t>(i); >- } >+ template<class T1 = T, OpcodeSize size1 = size, typename TargetType1 = TargetType, typename = std::enable_if_t<!std::is_same<T1, TargetType1>::value, std::true_type>> >+ static T1 convert(TargetType1 t) { return static_cast<T1>(t); } >+}; > >- static int convert(uint8_t i) >- { >- return static_cast<int8_t>(i); >- } >+template<OpcodeSize size> >+struct FirstConstant; >+ >+template<> >+struct FirstConstant<OpcodeSize::Narrow> { >+ static constexpr unsigned index = 16; > }; > > template<> >-struct Fits<VirtualRegister, OpcodeSize::Narrow> { >+struct FirstConstant<OpcodeSize::Wide16> { >+ static constexpr unsigned index = 64; >+}; >+ >+template<OpcodeSize size> >+struct Fits<VirtualRegister, size, std::enable_if_t<size != OpcodeSize::Wide32, std::true_type>> { >+ // Narrow: > // -128..-1 local variables > // 0..15 arguments > // 16..127 constants >- static constexpr int s_firstConstantIndex = 16; >+ // >+ // Wide16: >+ // -2**15..-1 local variables >+ // 0..64 arguments >+ // 64..2**15-1 constants >+ >+ using TargetType = typename TypeBySize<size>::signedType; >+ >+ static constexpr int s_firstConstantIndex = FirstConstant<size>::index; > static bool check(VirtualRegister r) > { > if (r.isConstant()) >- return (s_firstConstantIndex + r.toConstantIndex()) <= INT8_MAX; >- return r.offset() >= INT8_MIN && r.offset() < s_firstConstantIndex; >+ return (s_firstConstantIndex + r.toConstantIndex()) <= std::numeric_limits<TargetType>::max(); >+ return r.offset() >= std::numeric_limits<TargetType>::min() && r.offset() < s_firstConstantIndex; > } > >- static uint8_t convert(VirtualRegister r) >+ static TargetType convert(VirtualRegister r) > { > ASSERT(check(r)); > if (r.isConstant()) >- return static_cast<int8_t>(s_firstConstantIndex + r.toConstantIndex()); >- return static_cast<int8_t>(r.offset()); >+ return static_cast<TargetType>(s_firstConstantIndex + r.toConstantIndex()); >+ return static_cast<TargetType>(r.offset()); > } > >- static VirtualRegister convert(uint8_t u) >+ static VirtualRegister convert(TargetType u) > { >- int i = static_cast<int>(static_cast<int8_t>(u)); >+ int i = static_cast<int>(static_cast<TargetType>(u)); > if (i >= s_firstConstantIndex) > return VirtualRegister { (i - s_firstConstantIndex) + FirstConstantRegisterIndex }; > return VirtualRegister { i }; > } > }; > >-template<> >-struct Fits<SymbolTableOrScopeDepth, OpcodeSize::Narrow> { >+template<OpcodeSize size> >+struct Fits<SymbolTableOrScopeDepth, size, std::enable_if_t<size != OpcodeSize::Wide32, std::true_type>> { >+ using TargetType = typename TypeBySize<size>::unsignedType; >+ > static bool check(SymbolTableOrScopeDepth u) > { >- return u.raw() <= UINT8_MAX; >+ return u.raw() <= std::numeric_limits<TargetType>::max(); > } > >- static uint8_t convert(SymbolTableOrScopeDepth u) >+ static TargetType convert(SymbolTableOrScopeDepth u) > { > ASSERT(check(u)); >- return static_cast<uint8_t>(u.raw()); >+ return static_cast<TargetType>(u.raw()); > } > >- static SymbolTableOrScopeDepth convert(uint8_t u) >+ static SymbolTableOrScopeDepth convert(TargetType u) > { > return SymbolTableOrScopeDepth::raw(u); > } > }; > >-template<> >-struct Fits<Special::Pointer, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >- using Base = Fits<int, OpcodeSize::Narrow>; >+template<OpcodeSize size> >+struct Fits<Special::Pointer, size, std::enable_if_t<size != OpcodeSize::Wide32>> : public Fits<int, size> { >+ using Base = Fits<int, size>; >+ > static bool check(Special::Pointer sp) { return Base::check(static_cast<int>(sp)); } >- static uint8_t convert(Special::Pointer sp) >+ static typename Base::TargetType convert(Special::Pointer sp) > { > return Base::convert(static_cast<int>(sp)); > } >- static Special::Pointer convert(uint8_t sp) >+ static Special::Pointer convert(typename Base::TargetType sp) > { > return static_cast<Special::Pointer>(Base::convert(sp)); > } > }; > >-template<> >-struct Fits<GetPutInfo, OpcodeSize::Narrow> { >+template<OpcodeSize size> >+struct Fits<GetPutInfo, size, std::enable_if_t<size != OpcodeSize::Wide32, std::true_type>> { >+ using TargetType = typename TypeBySize<size>::unsignedType; >+ > // 13 Resolve Types > // 3 Initialization Modes > // 2 Resolve Modes >@@ -197,7 +201,7 @@ struct Fits<GetPutInfo, OpcodeSize::Narrow> { > return resolveType < s_resolveTypeMax && initializationMode < s_initializationModeMax && resolveMode < s_resolveModeMax; > } > >- static uint8_t convert(GetPutInfo gpi) >+ static TargetType convert(GetPutInfo gpi) > { > ASSERT(check(gpi)); > auto resolveType = static_cast<uint8_t>(gpi.resolveType()); >@@ -206,7 +210,7 @@ struct Fits<GetPutInfo, OpcodeSize::Narrow> { > return (resolveType << 3) | (initializationMode << 1) | resolveMode; > } > >- static GetPutInfo convert(uint8_t gpi) >+ static GetPutInfo convert(TargetType gpi) > { > auto resolveType = static_cast<ResolveType>((gpi & s_resolveTypeBits) >> 3); > auto initializationMode = static_cast<InitializationMode>((gpi & s_initializationModeBits) >> 1); >@@ -215,54 +219,48 @@ struct Fits<GetPutInfo, OpcodeSize::Narrow> { > } > }; > >-template<> >-struct Fits<DebugHookType, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >- using Base = Fits<int, OpcodeSize::Narrow>; >- static bool check(DebugHookType dht) { return Base::check(static_cast<int>(dht)); } >- static uint8_t convert(DebugHookType dht) >+template<typename E, OpcodeSize size> >+struct Fits<E, size, std::enable_if_t<sizeof(E) != size && std::is_enum<E>::value, std::true_type>> : public Fits<std::underlying_type_t<E>, size> { >+ using Base = Fits<std::underlying_type_t<E>, size>; >+ >+ static bool check(E e) { return Base::check(static_cast<std::underlying_type_t<E>>(e)); } >+ >+ static typename Base::TargetType convert(E e) > { >- return Base::convert(static_cast<int>(dht)); >+ return Base::convert(static_cast<std::underlying_type_t<E>>(e)); > } >- static DebugHookType convert(uint8_t dht) >+ >+ static E convert(typename Base::TargetType e) > { >- return static_cast<DebugHookType>(Base::convert(dht)); >+ return static_cast<E>(Base::convert(e)); > } > }; > >+template<OpcodeSize size> >+struct OperandTypesForSize; >+ > template<> >-struct Fits<ProfileTypeBytecodeFlag, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >- using Base = Fits<int, OpcodeSize::Narrow>; >- static bool check(ProfileTypeBytecodeFlag ptbf) { return Base::check(static_cast<int>(ptbf)); } >- static uint8_t convert(ProfileTypeBytecodeFlag ptbf) >- { >- return Base::convert(static_cast<int>(ptbf)); >- } >- static ProfileTypeBytecodeFlag convert(uint8_t ptbf) >- { >- return static_cast<ProfileTypeBytecodeFlag>(Base::convert(ptbf)); >- } >+struct OperandTypesForSize<OpcodeSize::Narrow> { >+ static constexpr unsigned maxType = 0x10; >+ static constexpr unsigned typeWidth = 4; > }; > > template<> >-struct Fits<ResolveType, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >- using Base = Fits<int, OpcodeSize::Narrow>; >- static bool check(ResolveType rt) { return Base::check(static_cast<int>(rt)); } >- static uint8_t convert(ResolveType rt) >- { >- return Base::convert(static_cast<int>(rt)); >- } >- >- static ResolveType convert(uint8_t rt) >- { >- return static_cast<ResolveType>(Base::convert(rt)); >- } >+struct OperandTypesForSize<OpcodeSize::Wide16> { >+ static constexpr unsigned maxType = 0x100; >+ static constexpr unsigned typeWidth = 8; > }; > >-template<> >-struct Fits<OperandTypes, OpcodeSize::Narrow> { >+template<OpcodeSize size> >+struct Fits<OperandTypes, size, std::enable_if_t<size != OpcodeSize::Wide32, std::true_type>> { >+ using TargetType = typename TypeBySize<size>::unsignedType; >+ > // a pair of (ResultType::Type, ResultType::Type) - try to fit each type into 4 bits > // additionally, encode unknown types as 0 rather than the | of all types >- static constexpr int s_maxType = 0x10; >+ static constexpr int s_maxType = OperandTypesForSize<size>::maxType; >+ static constexpr int s_typeWidth = OperandTypesForSize<size>::typeWidth; >+ static constexpr int s_secondTypeMask = s_maxType - 1; >+ static constexpr int s_firstTypeMask = s_secondTypeMask << s_typeWidth; > > static bool check(OperandTypes types) > { >@@ -275,7 +273,7 @@ struct Fits<OperandTypes, OpcodeSize::Narrow> { > return first < s_maxType && second < s_maxType; > } > >- static uint8_t convert(OperandTypes types) >+ static TargetType convert(OperandTypes types) > { > ASSERT(check(types)); > auto first = types.first().bits(); >@@ -284,13 +282,13 @@ struct Fits<OperandTypes, OpcodeSize::Narrow> { > first = 0; > if (second == ResultType::unknownType().bits()) > second = 0; >- return (first << 4) | second; >+ return (first << s_typeWidth) | second; > } > >- static OperandTypes convert(uint8_t types) >+ static OperandTypes convert(TargetType types) > { >- auto first = (types & (0xf << 4)) >> 4; >- auto second = (types & 0xf); >+ auto first = types & s_firstTypeMask; >+ auto second = types & s_secondTypeMask; > if (!first) > first = ResultType::unknownType().bits(); > if (!second) >@@ -299,24 +297,8 @@ struct Fits<OperandTypes, OpcodeSize::Narrow> { > } > }; > >-template<> >-struct Fits<PutByIdFlags, OpcodeSize::Narrow> : Fits<int, OpcodeSize::Narrow> { >- // only ever encoded in the bytecode stream as 0 or 1, so the trivial encoding should be good enough >- using Base = Fits<int, OpcodeSize::Narrow>; >- static bool check(PutByIdFlags flags) { return Base::check(static_cast<int>(flags)); } >- static uint8_t convert(PutByIdFlags flags) >- { >- return Base::convert(static_cast<int>(flags)); >- } >- >- static PutByIdFlags convert(uint8_t flags) >- { >- return static_cast<PutByIdFlags>(Base::convert(flags)); >- } >-}; >- > template<OpcodeSize size> >-struct Fits<BoundLabel, size> : Fits<int, size> { >+struct Fits<BoundLabel, size> : public Fits<int, size> { > // This is a bit hacky: we need to delay computing jump targets, since we > // might have to emit `nop`s to align the instructions stream. Additionally, > // we have to compute the target before we start writing to the instruction >@@ -330,12 +312,12 @@ struct Fits<BoundLabel, size> : Fits<int, size> { > return Base::check(label.saveTarget()); > } > >- static typename TypeBySize<size>::type convert(BoundLabel& label) >+ static typename Base::TargetType convert(BoundLabel& label) > { > return Base::convert(label.commitTarget()); > } > >- static BoundLabel convert(typename TypeBySize<size>::type target) >+ static BoundLabel convert(typename Base::TargetType target) > { > return BoundLabel(Base::convert(target)); > } >diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h >index fb278e9cad37a01667fe113a41bafd3b9956ce0d..fd6ef7060b68272cdf4db9711f585aa49fc0b882 100644 >--- a/Source/JavaScriptCore/bytecode/Instruction.h >+++ b/Source/JavaScriptCore/bytecode/Instruction.h >@@ -45,14 +45,16 @@ struct Instruction { > OpcodeID opcodeID() const { return static_cast<OpcodeID>(m_opcode); } > > private: >- typename TypeBySize<Width>::type m_opcode; >+ typename TypeBySize<Width>::unsignedType m_opcode; > }; > > public: > OpcodeID opcodeID() const > { >- if (isWide()) >- return wide()->opcodeID(); >+ if (isWide32()) >+ return wide32()->opcodeID(); >+ if (isWide16()) >+ return wide16()->opcodeID(); > return narrow()->opcodeID(); > } > >@@ -61,16 +63,35 @@ struct Instruction { > return opcodeNames[opcodeID()]; > } > >- bool isWide() const >+ bool isWide16() const > { >- return narrow()->opcodeID() == op_wide; >+ return narrow()->opcodeID() == op_wide16; >+ } >+ >+ bool isWide32() const >+ { >+ return narrow()->opcodeID() == op_wide32; >+ } >+ >+ bool hasMetadata() const >+ { >+ return opcodeID() < NUMBER_OF_BYTECODE_WITH_METADATA; >+ } >+ >+ int width() const >+ { >+ if (isWide32()) >+ return 2; >+ if (isWide16()) >+ return 1; >+ return 0; > } > > size_t size() const > { >- auto wide = isWide(); >- auto padding = wide ? 1 : 0; >- auto size = wide ? 4 : 1; >+ auto width = this->width(); >+ auto padding = width ? 1 : 0; >+ auto size = 1 << width; > return opcodeLengths[opcodeID()] * size + padding; > } > >@@ -106,11 +127,18 @@ struct Instruction { > return reinterpret_cast<const Impl<OpcodeSize::Narrow>*>(this); > } > >- const Impl<OpcodeSize::Wide>* wide() const >+ const Impl<OpcodeSize::Wide16>* wide16() const >+ { >+ >+ ASSERT(isWide16()); >+ return reinterpret_cast<const Impl<OpcodeSize::Wide16>*>(bitwise_cast<uintptr_t>(this) + 1); >+ } >+ >+ const Impl<OpcodeSize::Wide32>* wide32() const > { > >- ASSERT(isWide()); >- return reinterpret_cast<const Impl<OpcodeSize::Wide>*>(bitwise_cast<uintptr_t>(this) + 1); >+ ASSERT(isWide32()); >+ return reinterpret_cast<const Impl<OpcodeSize::Wide32>*>(bitwise_cast<uintptr_t>(this) + 1); > } > }; > >diff --git a/Source/JavaScriptCore/bytecode/InstructionStream.h b/Source/JavaScriptCore/bytecode/InstructionStream.h >index ce9607b372f3bda529a9b5e1fbc74ab60f5c2584..99b5a5a906026cece8b4dd100449289d1ffe6438 100644 >--- a/Source/JavaScriptCore/bytecode/InstructionStream.h >+++ b/Source/JavaScriptCore/bytecode/InstructionStream.h >@@ -210,6 +210,20 @@ class InstructionStreamWriter : public InstructionStream { > m_position++; > } > } >+ >+ void write(uint16_t h) >+ { >+ ASSERT(!m_finalized); >+ uint8_t bytes[2]; >+ std::memcpy(bytes, &h, sizeof(h)); >+ >+ // Though not always obvious, we don't have to invert the order of the >+ // bytes written here for CPU(BIG_ENDIAN). This is because the incoming >+ // i value is already ordered in big endian on CPU(BIG_EDNDIAN) platforms. >+ write(bytes[0]); >+ write(bytes[1]); >+ } >+ > void write(uint32_t i) > { > ASSERT(!m_finalized); >diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h >index 8ac603e3cc5f7b193544bb0cff0848ddb22074ce..106de03f07420cd6e03a7ef4e89a675f1002bdc7 100644 >--- a/Source/JavaScriptCore/bytecode/Opcode.h >+++ b/Source/JavaScriptCore/bytecode/Opcode.h >@@ -66,8 +66,12 @@ const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_BYTECODE_HELPER_IDS; > > #if ENABLE(C_LOOP) && !HAVE(COMPUTED_GOTO) > >-#define OPCODE_ID_ENUM(opcode, length) opcode##_wide = numOpcodeIDs + opcode, >- enum OpcodeIDWide : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) }; >+#define OPCODE_ID_ENUM(opcode, length) opcode##_wide16 = numOpcodeIDs + opcode, >+ enum OpcodeIDWide16 : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) }; >+#undef OPCODE_ID_ENUM >+ >+#define OPCODE_ID_ENUM(opcode, length) opcode##_wide32 = numOpcodeIDs * 2 + opcode, >+ enum OpcodeIDWide32 : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) }; > #undef OPCODE_ID_ENUM > #endif > >diff --git a/Source/JavaScriptCore/bytecode/OpcodeSize.h b/Source/JavaScriptCore/bytecode/OpcodeSize.h >index 98943f39d8ef08efa8f876b882fec0ebeac7d786..24b162b93f79e84a0d1864231acdbd73e59aa063 100644 >--- a/Source/JavaScriptCore/bytecode/OpcodeSize.h >+++ b/Source/JavaScriptCore/bytecode/OpcodeSize.h >@@ -29,7 +29,8 @@ namespace JSC { > > enum OpcodeSize { > Narrow = 1, >- Wide = 4, >+ Wide16 = 2, >+ Wide32 = 4, > }; > > template<OpcodeSize> >@@ -37,12 +38,20 @@ struct TypeBySize; > > template<> > struct TypeBySize<OpcodeSize::Narrow> { >- using type = uint8_t; >+ using signedType = int8_t; >+ using unsignedType = uint8_t; > }; > > template<> >-struct TypeBySize<OpcodeSize::Wide> { >- using type = uint32_t; >+struct TypeBySize<OpcodeSize::Wide16> { >+ using signedType = int16_t; >+ using unsignedType = uint16_t; >+}; >+ >+template<> >+struct TypeBySize<OpcodeSize::Wide32> { >+ using signedType = int32_t; >+ using unsignedType = uint32_t; > }; > > template<OpcodeSize> >@@ -54,7 +63,12 @@ struct PaddingBySize<OpcodeSize::Narrow> { > }; > > template<> >-struct PaddingBySize<OpcodeSize::Wide> { >+struct PaddingBySize<OpcodeSize::Wide16> { >+ static constexpr uint8_t value = 1; >+}; >+ >+template<> >+struct PaddingBySize<OpcodeSize::Wide32> { > static constexpr uint8_t value = 1; > }; > >diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >index 667dac047e187ed7e6db983af30611b4efd7ae39..a33c988cc5d184fd5c42adb0bff0aa405e9e9bb3 100644 >--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >@@ -1339,10 +1339,18 @@ void BytecodeGenerator::recordOpcode(OpcodeID opcodeID) > m_lastOpcodeID = opcodeID; > } > >-void BytecodeGenerator::alignWideOpcode() >+void BytecodeGenerator::alignWideOpcode16() > { > #if CPU(NEEDS_ALIGNED_ACCESS) >- while ((m_writer.position() + 1) % OpcodeSize::Wide) >+ while ((m_writer.position() + 1) % OpcodeSize::Wide16) >+ OpNop::emit<OpcodeSize::Narrow>(this); >+#endif >+} >+ >+void BytecodeGenerator::alignWideOpcode32() >+{ >+#if CPU(NEEDS_ALIGNED_ACCESS) >+ while ((m_writer.position() + 1) % OpcodeSize::Wide32) > OpNop::emit<OpcodeSize::Narrow>(this); > #endif > } >@@ -2721,13 +2729,13 @@ RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, R > > if (context.isIndexedForInContext()) { > auto& indexedContext = context.asIndexedForInContext(); >- OpGetByVal::emit<OpcodeSize::Wide>(this, kill(dst), base, indexedContext.index()); >+ OpGetByVal::emit<OpcodeSize::Wide32>(this, kill(dst), base, indexedContext.index()); > indexedContext.addGetInst(m_lastInstruction.offset(), property->index()); > return dst; > } > > StructureForInContext& structureContext = context.asStructureForInContext(); >- OpGetDirectPname::emit<OpcodeSize::Wide>(this, kill(dst), base, property, structureContext.index(), structureContext.enumerator()); >+ OpGetDirectPname::emit<OpcodeSize::Wide32>(this, kill(dst), base, property, structureContext.index(), structureContext.enumerator()); > > structureContext.addGetInst(m_lastInstruction.offset(), property->index()); > return dst; >@@ -4480,7 +4488,7 @@ void BytecodeGenerator::emitYieldPoint(RegisterID* argument, JSAsyncGeneratorFun > #if CPU(NEEDS_ALIGNED_ACCESS) > // conservatively align for the bytecode rewriter: it will delete this yield and > // append a fragment, so we make sure that the start of the fragments is aligned >- while (m_writer.position() % OpcodeSize::Wide) >+ while (m_writer.position() % OpcodeSize::Wide32) > OpNop::emit<OpcodeSize::Narrow>(this); > #endif > OpYield::emit(this, generatorFrameRegister(), yieldPointIndex, argument); >@@ -4983,7 +4991,7 @@ void StructureForInContext::finalize(BytecodeGenerator& generator, UnlinkedCodeB > int propertyRegIndex = std::get<1>(instTuple); > auto instruction = generator.m_writer.ref(instIndex); > auto end = instIndex + instruction->size(); >- ASSERT(instruction->isWide()); >+ ASSERT(instruction->isWide32()); > > generator.m_writer.seek(instIndex); > >@@ -4996,7 +5004,7 @@ void StructureForInContext::finalize(BytecodeGenerator& generator, UnlinkedCodeB > // 1. dst stays the same. > // 2. base stays the same. > // 3. property gets switched to the original property. >- OpGetByVal::emit<OpcodeSize::Wide>(&generator, bytecode.m_dst, bytecode.m_base, VirtualRegister(propertyRegIndex)); >+ OpGetByVal::emit<OpcodeSize::Wide32>(&generator, bytecode.m_dst, bytecode.m_base, VirtualRegister(propertyRegIndex)); > > // 4. nop out the remaining bytes > while (generator.m_writer.position() < end) >diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >index 1c90313c1affb5950ba5dc69afbffac8ce4aa6d8..e97686aa3edcdd8d3f9b57c4a6d123bac9cb3987 100644 >--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h >@@ -1162,8 +1162,13 @@ namespace JSC { > RegisterID* emitThrowExpressionTooDeepException(); > > void write(uint8_t byte) { m_writer.write(byte); } >+ void write(uint16_t h) { m_writer.write(h); } > void write(uint32_t i) { m_writer.write(i); } >- void alignWideOpcode(); >+ void write(int8_t byte) { m_writer.write(static_cast<uint8_t>(byte)); } >+ void write(int16_t h) { m_writer.write(static_cast<uint16_t>(h)); } >+ void write(int32_t i) { m_writer.write(static_cast<uint32_t>(i)); } >+ void alignWideOpcode16(); >+ void alignWideOpcode32(); > > class PreservedTDZStack { > private: >diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >index 20c2340cc00a0c2f4f2100f5ffafedda2867b023..dfe6c16d51a00cab03af98010754a96c7969e345 100644 >--- a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp >@@ -108,7 +108,8 @@ CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, const I > UNUSED_PARAM(pc); > > switch (opcodeID) { >- case op_wide: >+ case op_wide16: >+ case op_wide32: > RELEASE_ASSERT_NOT_REACHED(); > case op_enter: > case op_to_this: >diff --git a/Source/JavaScriptCore/generator/Argument.rb b/Source/JavaScriptCore/generator/Argument.rb >index 99dcb93455200ebe230d0e163f0526444ba67218..2567576a5865e6895a993b23c26ab5c770a19605 100644 >--- a/Source/JavaScriptCore/generator/Argument.rb >+++ b/Source/JavaScriptCore/generator/Argument.rb >@@ -67,8 +67,10 @@ def setter > template<typename Functor> > void set#{capitalized_name}(#{@type.to_s} value, Functor func) > { >- if (isWide()) >- set#{capitalized_name}<OpcodeSize::Wide>(value, func); >+ if (isWide32()) >+ set#{capitalized_name}<OpcodeSize::Wide32>(value, func); >+ else if (isWide16()) >+ set#{capitalized_name}<OpcodeSize::Wide16>(value, func); > else > set#{capitalized_name}<OpcodeSize::Narrow>(value, func); > } >@@ -78,7 +80,7 @@ def setter > { > if (!#{Fits::check "size", "value", @type}) > value = func(); >- auto* stream = bitwise_cast<typename TypeBySize<size>::type*>(reinterpret_cast<uint8_t*>(this) + #{@index} * size + PaddingBySize<size>::value); >+ auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + #{@index} * size + PaddingBySize<size>::value); > *stream = #{Fits::convert "size", "value", @type}; > } > EOF >diff --git a/Source/JavaScriptCore/generator/DSL.rb b/Source/JavaScriptCore/generator/DSL.rb >index 9407aad24fb56c597bf8f2c069484d7655e901cd..92c7f946ceb0eec5b03a94c07b712f491f045662 100644 >--- a/Source/JavaScriptCore/generator/DSL.rb >+++ b/Source/JavaScriptCore/generator/DSL.rb >@@ -144,7 +144,7 @@ def self.write_init_asm(bytecode_list, init_asm_filename) > GeneratedFile::create(init_asm_filename, bytecode_list) do |template| > template.multiline_comment = nil > template.line_comment = "#" >- template.body = (opcodes.map.with_index(&:set_entry_address) + opcodes.map.with_index(&:set_entry_address_wide)) .join("\n") >+ template.body = (opcodes.map.with_index(&:set_entry_address) + opcodes.map.with_index(&:set_entry_address_wide16) + opcodes.map.with_index(&:set_entry_address_wide32)) .join("\n") > end > end > >diff --git a/Source/JavaScriptCore/generator/Opcode.rb b/Source/JavaScriptCore/generator/Opcode.rb >index 05c259595c80c97cc33f3ff8eb7b66b5160b4d1c..27fa285009f6719bcdaac6fb8e609dd923f5c960 100644 >--- a/Source/JavaScriptCore/generator/Opcode.rb >+++ b/Source/JavaScriptCore/generator/Opcode.rb >@@ -32,7 +32,8 @@ class Opcode > > module Size > Narrow = "OpcodeSize::Narrow" >- Wide = "OpcodeSize::Wide" >+ Wide16 = "OpcodeSize::Wide16" >+ Wide32 = "OpcodeSize::Wide32" > end > > @@id = 0 >@@ -81,7 +82,7 @@ def untyped_args > end > > def map_fields_with_size(prefix, size, &block) >- args = [Argument.new("opcodeID", :unsigned, 0)] >+ args = [Argument.new("opcodeID", :OpcodeID, 0)] > args += @args.dup if @args > unless @metadata.empty? > args << @metadata.emitter_local >@@ -108,7 +109,8 @@ def opcodeID > end > > def emitter >- op_wide = Argument.new("op_wide", :unsigned, 0) >+ op_wide16 = Argument.new("op_wide16", :OpcodeID, 0) >+ op_wide32 = Argument.new("op_wide32", :OpcodeID, 0) > metadata_param = @metadata.empty? ? "" : ", #{@metadata.emitter_local.create_param}" > metadata_arg = @metadata.empty? ? "" : ", #{@metadata.emitter_local.name}" > <<-EOF.chomp >@@ -116,7 +118,8 @@ def emitter > { > #{@metadata.create_emitter_local} > emit<OpcodeSize::Narrow, NoAssert, true>(gen#{untyped_args}#{metadata_arg}) >- || emit<OpcodeSize::Wide, Assert, true>(gen#{untyped_args}#{metadata_arg}); >+ || emit<OpcodeSize::Wide16, NoAssert, true>(gen#{untyped_args}#{metadata_arg}) >+ || emit<OpcodeSize::Wide32, Assert, true>(gen#{untyped_args}#{metadata_arg}); > } > #{%{ > template<OpcodeSize size, FitsAssertion shouldAssert = Assert> >@@ -138,14 +141,19 @@ def emitter > template<OpcodeSize size, bool recordOpcode> > static bool emitImpl(BytecodeGenerator* gen#{typed_args}#{metadata_param}) > { >- if (size == OpcodeSize::Wide) >- gen->alignWideOpcode(); >+ if (size == OpcodeSize::Wide16) >+ gen->alignWideOpcode16(); >+ else if (size == OpcodeSize::Wide32) >+ gen->alignWideOpcode32(); > if (#{map_fields_with_size("", "size", &:fits_check).join "\n && "} >- && (size == OpcodeSize::Wide ? #{op_wide.fits_check(Size::Narrow)} : true)) { >+ && (size == OpcodeSize::Wide16 ? #{op_wide16.fits_check(Size::Narrow)} : true) >+ && (size == OpcodeSize::Wide32 ? #{op_wide32.fits_check(Size::Narrow)} : true)) { > if (recordOpcode) > gen->recordOpcode(opcodeID); >- if (size == OpcodeSize::Wide) >- #{op_wide.fits_write Size::Narrow} >+ if (size == OpcodeSize::Wide16) >+ #{op_wide16.fits_write Size::Narrow} >+ else if (size == OpcodeSize::Wide32) >+ #{op_wide32.fits_write Size::Narrow} > #{map_fields_with_size(" ", "size", &:fits_write).join "\n"} > return true; > } >@@ -159,9 +167,9 @@ def emitter > def dumper > <<-EOF > template<typename Block> >- void dump(BytecodeDumper<Block>* dumper, InstructionStream::Offset __location, bool __isWide) >+ void dump(BytecodeDumper<Block>* dumper, InstructionStream::Offset __location, int __width) > { >- dumper->printLocationAndOp(__location, &"*#{@name}"[!__isWide]); >+ dumper->printLocationAndOp(__location, &"**#{@name}"[2 - __width]); > #{print_args { |arg| > <<-EOF.chomp > dumper->dumpOperand(#{arg.field_name}, #{arg.index == 1}); >@@ -181,20 +189,27 @@ def constructors > { > ASSERT_UNUSED(stream, stream[0] == opcodeID); > } >+ # >+ #{capitalized_name}(const uint16_t* stream) >+ #{init.call("OpcodeSize::Wide16")} >+ { >+ ASSERT_UNUSED(stream, stream[0] == opcodeID); >+ } >+ > > #{capitalized_name}(const uint32_t* stream) >- #{init.call("OpcodeSize::Wide")} >+ #{init.call("OpcodeSize::Wide32")} > { > ASSERT_UNUSED(stream, stream[0] == opcodeID); > } > > static #{capitalized_name} decode(const uint8_t* stream) > { >- if (*stream != op_wide) >- return { stream }; >- >- auto wideStream = bitwise_cast<const uint32_t*>(stream + 1); >- return { wideStream }; >+ if (*stream == op_wide32) >+ return { bitwise_cast<const uint32_t*>(stream + 1) }; >+ if (*stream == op_wide16) >+ return { bitwise_cast<const uint16_t*>(stream + 1) }; >+ return { stream }; > } > EOF > end >@@ -219,8 +234,12 @@ def set_entry_address(id) > "setEntryAddress(#{id}, _#{full_name})" > end > >- def set_entry_address_wide(id) >- "setEntryAddressWide(#{id}, _#{full_name}_wide)" >+ def set_entry_address_wide16(id) >+ "setEntryAddressWide16(#{id}, _#{full_name}_wide16)" >+ end >+ >+ def set_entry_address_wide32(id) >+ "setEntryAddressWide32(#{id}, _#{full_name}_wide32)" > end > > def struct_indices >@@ -253,7 +272,7 @@ def self.dump_bytecode(opcodes) > #{opcodes.map { |op| > <<-EOF.chomp > case #{op.name}: >- __instruction->as<#{op.capitalized_name}>().dump(dumper, __location, __instruction->isWide()); >+ __instruction->as<#{op.capitalized_name}>().dump(dumper, __location, __instruction->width()); > break; > EOF > }.join "\n"} >diff --git a/Source/JavaScriptCore/generator/Section.rb b/Source/JavaScriptCore/generator/Section.rb >index 7a6afcc2194d4c7af942613576970e8f13828ecf..8cd21db9168417dac389cca5c742ff7bebe6b12f 100644 >--- a/Source/JavaScriptCore/generator/Section.rb >+++ b/Source/JavaScriptCore/generator/Section.rb >@@ -100,7 +100,10 @@ def header_helpers(num_opcodes) > out.write("#define #{opcode.name}_value_string \"#{opcode.id}\"\n") > } > opcodes.each { |opcode| >- out.write("#define #{opcode.name}_wide_value_string \"#{num_opcodes + opcode.id}\"\n") >+ out.write("#define #{opcode.name}_wide16_value_string \"#{num_opcodes + opcode.id}\"\n") >+ } >+ opcodes.each { |opcode| >+ out.write("#define #{opcode.name}_wide32_value_string \"#{num_opcodes * 2 + opcode.id}\"\n") > } > end > out.string >diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp >index 7fb225b17199d37b35991a7affb9e92cb2e91e72..95bbe508b7b051065bbad01d71ab9f94b6633900 100644 >--- a/Source/JavaScriptCore/jit/JITExceptions.cpp >+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp >@@ -74,9 +74,12 @@ void genericUnwind(VM* vm, ExecState* callFrame) > #if ENABLE(JIT) > catchRoutine = handler->nativeCode.executableAddress(); > #else >- catchRoutine = catchPCForInterpreter->isWide() >- ? LLInt::getWideCodePtr(catchPCForInterpreter->opcodeID()) >- : LLInt::getCodePtr(catchPCForInterpreter->opcodeID()); >+ if (catchPCForInterpreter->isWide32()) >+ catchRoutine = LLInt::getWide32CodePtr(catchPCForInterpreter->opcodeID()); >+ else if (catchPCForInterpreter->isWide16()) >+ catchRoutine = LLInt::getWide16CodePtr(catchPCForInterpreter->opcodeID()); >+ else >+ catchRoutine = LLInt::getCodePtr(catchPCForInterpreter->opcodeID()); > #endif > } else > catchRoutine = LLInt::getCodePtr<ExceptionHandlerPtrTag>(handleUncaughtException).executableAddress(); >diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp >index 58f18e47594e7684493479ed3b2121382291fc92..e34a79f58d764d3e94b46d404a928ec86f52344c 100644 >--- a/Source/JavaScriptCore/llint/LLIntData.cpp >+++ b/Source/JavaScriptCore/llint/LLIntData.cpp >@@ -49,10 +49,11 @@ namespace LLInt { > > uint8_t Data::s_exceptionInstructions[maxOpcodeLength + 1] = { }; > Opcode g_opcodeMap[numOpcodeIDs] = { }; >-Opcode g_opcodeMapWide[numOpcodeIDs] = { }; >+Opcode g_opcodeMapWide16[numOpcodeIDs] = { }; >+Opcode g_opcodeMapWide32[numOpcodeIDs] = { }; > > #if !ENABLE(C_LOOP) >-extern "C" void llint_entry(void*, void*); >+extern "C" void llint_entry(void*, void*, void*); > #endif > > void initialize() >@@ -61,11 +62,12 @@ void initialize() > CLoop::initialize(); > > #else // !ENABLE(C_LOOP) >- llint_entry(&g_opcodeMap, &g_opcodeMapWide); >+ llint_entry(&g_opcodeMap, &g_opcodeMapWide16, &g_opcodeMapWide32); > > for (int i = 0; i < numOpcodeIDs; ++i) { > g_opcodeMap[i] = tagCodePtr(g_opcodeMap[i], BytecodePtrTag); >- g_opcodeMapWide[i] = tagCodePtr(g_opcodeMapWide[i], BytecodePtrTag); >+ g_opcodeMapWide16[i] = tagCodePtr(g_opcodeMapWide16[i], BytecodePtrTag); >+ g_opcodeMapWide32[i] = tagCodePtr(g_opcodeMapWide32[i], BytecodePtrTag); > } > > ASSERT(llint_throw_from_slow_path_trampoline < UINT8_MAX); >diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h >index b248abcda43653f3765935db59608679aad2f0ab..de39056636a249f53c5c2572e2a91fd588000bb3 100644 >--- a/Source/JavaScriptCore/llint/LLIntData.h >+++ b/Source/JavaScriptCore/llint/LLIntData.h >@@ -43,7 +43,8 @@ typedef void (*LLIntCode)(); > namespace LLInt { > > extern "C" JS_EXPORT_PRIVATE Opcode g_opcodeMap[numOpcodeIDs]; >-extern "C" JS_EXPORT_PRIVATE Opcode g_opcodeMapWide[numOpcodeIDs]; >+extern "C" JS_EXPORT_PRIVATE Opcode g_opcodeMapWide16[numOpcodeIDs]; >+extern "C" JS_EXPORT_PRIVATE Opcode g_opcodeMapWide32[numOpcodeIDs]; > > class Data { > >@@ -57,11 +58,14 @@ class Data { > > friend Instruction* exceptionInstructions(); > friend Opcode* opcodeMap(); >- friend Opcode* opcodeMapWide(); >+ friend Opcode* opcodeMapWide16(); >+ friend Opcode* opcodeMapWide32(); > friend Opcode getOpcode(OpcodeID); >- friend Opcode getOpcodeWide(OpcodeID); >+ friend Opcode getOpcodeWide16(OpcodeID); >+ friend Opcode getOpcodeWide32(OpcodeID); > template<PtrTag tag> friend MacroAssemblerCodePtr<tag> getCodePtr(OpcodeID); >- template<PtrTag tag> friend MacroAssemblerCodePtr<tag> getWideCodePtr(OpcodeID); >+ template<PtrTag tag> friend MacroAssemblerCodePtr<tag> getWide16CodePtr(OpcodeID); >+ template<PtrTag tag> friend MacroAssemblerCodePtr<tag> getWide32CodePtr(OpcodeID); > template<PtrTag tag> friend MacroAssemblerCodeRef<tag> getCodeRef(OpcodeID); > }; > >@@ -77,9 +81,14 @@ inline Opcode* opcodeMap() > return g_opcodeMap; > } > >-inline Opcode* opcodeMapWide() >+inline Opcode* opcodeMapWide16() > { >- return g_opcodeMapWide; >+ return g_opcodeMapWide16; >+} >+ >+inline Opcode* opcodeMapWide32() >+{ >+ return g_opcodeMapWide32; > } > > inline Opcode getOpcode(OpcodeID id) >@@ -91,10 +100,20 @@ inline Opcode getOpcode(OpcodeID id) > #endif > } > >-inline Opcode getOpcodeWide(OpcodeID id) >+inline Opcode getOpcodeWide16(OpcodeID id) >+{ >+#if ENABLE(COMPUTED_GOTO_OPCODES) >+ return g_opcodeMapWide16[id]; >+#else >+ UNUSED_PARAM(id); >+ RELEASE_ASSERT_NOT_REACHED(); >+#endif >+} >+ >+inline Opcode getOpcodeWide32(OpcodeID id) > { > #if ENABLE(COMPUTED_GOTO_OPCODES) >- return g_opcodeMapWide[id]; >+ return g_opcodeMapWide32[id]; > #else > UNUSED_PARAM(id); > RELEASE_ASSERT_NOT_REACHED(); >@@ -110,9 +129,17 @@ ALWAYS_INLINE MacroAssemblerCodePtr<tag> getCodePtr(OpcodeID opcodeID) > } > > template<PtrTag tag> >-ALWAYS_INLINE MacroAssemblerCodePtr<tag> getWideCodePtr(OpcodeID opcodeID) >+ALWAYS_INLINE MacroAssemblerCodePtr<tag> getWide16CodePtr(OpcodeID opcodeID) >+{ >+ void* address = reinterpret_cast<void*>(getOpcodeWide16(opcodeID)); >+ address = retagCodePtr<BytecodePtrTag, tag>(address); >+ return MacroAssemblerCodePtr<tag>::createFromExecutableAddress(address); >+} >+ >+template<PtrTag tag> >+ALWAYS_INLINE MacroAssemblerCodePtr<tag> getWide32CodePtr(OpcodeID opcodeID) > { >- void* address = reinterpret_cast<void*>(getOpcodeWide(opcodeID)); >+ void* address = reinterpret_cast<void*>(getOpcodeWide32(opcodeID)); > address = retagCodePtr<BytecodePtrTag, tag>(address); > return MacroAssemblerCodePtr<tag>::createFromExecutableAddress(address); > } >@@ -141,9 +168,14 @@ ALWAYS_INLINE void* getCodePtr(OpcodeID id) > return reinterpret_cast<void*>(getOpcode(id)); > } > >-ALWAYS_INLINE void* getWideCodePtr(OpcodeID id) >+ALWAYS_INLINE void* getWide16CodePtr(OpcodeID id) >+{ >+ return reinterpret_cast<void*>(getOpcodeWide16(id)); >+} >+ >+ALWAYS_INLINE void* getWide32CodePtr(OpcodeID id) > { >- return reinterpret_cast<void*>(getOpcodeWide(id)); >+ return reinterpret_cast<void*>(getOpcodeWide32(id)); > } > #endif > >diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >index 2e90c701da1ec75b1822aa6d7b3be7c7426f25b7..780f351b85801ab4f49214386a924c8bbb676951 100644 >--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >@@ -1745,9 +1745,14 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval) > return commonCallEval(exec, pc, LLInt::getCodePtr<JSEntryPtrTag>(llint_generic_return_point)); > } > >-LLINT_SLOW_PATH_DECL(slow_path_call_eval_wide) >+LLINT_SLOW_PATH_DECL(slow_path_call_eval_wide16) > { >- return commonCallEval(exec, pc, LLInt::getWideCodePtr<JSEntryPtrTag>(llint_generic_return_point)); >+ return commonCallEval(exec, pc, LLInt::getWide16CodePtr<JSEntryPtrTag>(llint_generic_return_point)); >+} >+ >+LLINT_SLOW_PATH_DECL(slow_path_call_eval_wide32) >+{ >+ return commonCallEval(exec, pc, LLInt::getWide32CodePtr<JSEntryPtrTag>(llint_generic_return_point)); > } > > LLINT_SLOW_PATH_DECL(slow_path_strcat) >diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h >index dc357a161ca08bdcc3cf9500718a038e1f88b215..c24c2d861a6de9eabe4fa12bd6f0ba97838f5854 100644 >--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h >+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h >@@ -117,7 +117,8 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tail_call_varargs); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tail_call_forward_arguments); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct_varargs); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval); >-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval_wide); >+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval_wide16); >+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval_wide32); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_arguments); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_strcat); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_primitive); >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >index bc419ed617900013344e615dae86fb4611f71902..6b4b4f2be0b5205dfadbb90571979d49f300e3e3 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm >@@ -1,4 +1,4 @@ >-# Copyright (C) 2011-2019 Apple Inc. All rights reserved. >+# Copyrsght (C) 2011-2019 Apple Inc. All rights reserved. > # > # Redistribution and use in source and binary forms, with or without > # modification, are permitted provided that the following conditions >@@ -308,31 +308,39 @@ macro dispatchOp(size, opcodeName) > dispatch(constexpr %opcodeName%_length) > end > >- macro dispatchWide() >+ macro dispatchWide16() >+ dispatch(constexpr %opcodeName%_length * 2 + 1) >+ end >+ >+ macro dispatchWide32() > dispatch(constexpr %opcodeName%_length * 4 + 1) > end > >- size(dispatchNarrow, dispatchWide, macro (dispatch) dispatch() end) >+ size(dispatchNarrow, dispatchWide16, dispatchWide32, macro (dispatch) dispatch() end) > end > > macro getu(size, opcodeStruct, fieldName, dst) >- size(getuOperandNarrow, getuOperandWide, macro (getu) >+ size(getuOperandNarrow, getuOperandWide16, getOperandWide32, macro (getu) > getu(opcodeStruct, fieldName, dst) > end) > end > > macro get(size, opcodeStruct, fieldName, dst) >- size(getOperandNarrow, getOperandWide, macro (get) >+ size(getOperandNarrow, getOperandWide16, getOperandWide32, macro (get) > get(opcodeStruct, fieldName, dst) > end) > end > >-macro narrow(narrowFn, wideFn, k) >+macro narrow(narrowFn, wide16Fn, wide32Fn, k) > k(narrowFn) > end > >-macro wide(narrowFn, wideFn, k) >- k(wideFn) >+macro wide16(narrowFn, wide16Fn, wide32Fn, k) >+ k(wide16Fn) >+end >+ >+macro wide32(narrowFn, wide16Fn, wide32Fn, k) >+ k(wide32Fn) > end > > macro metadata(size, opcode, dst, scratch) >@@ -356,9 +364,13 @@ _%label%: > prologue() > fn(narrow) > >-_%label%_wide: >+_%label%_wide16: >+ prologue() >+ fn(wide16) >+ >+_%label%_wide32: > prologue() >- fn(wide) >+ fn(wide32) > end > > macro op(l, fn) >@@ -470,7 +482,8 @@ const ImplementsDefaultHasInstance = constexpr ImplementsDefaultHasInstance > > # Bytecode operand constants. > const FirstConstantRegisterIndexNarrow = 16 >-const FirstConstantRegisterIndexWide = constexpr FirstConstantRegisterIndex >+const FirstConstantRegisterIndexWide16 = 64 >+const FirstConstantRegisterIndexWide32 = constexpr FirstConstantRegisterIndex > > # Code type constants. > const GlobalCode = constexpr GlobalCode >@@ -1021,7 +1034,7 @@ macro checkSwitchToJITForEpilogue() > end > > macro assertNotConstant(size, index) >- size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) >+ size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide16, FirstConstantRegisterIndexWide32, macro (FirstConstantRegisterIndex) > assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end) > end) > end >@@ -1306,41 +1319,45 @@ else > end > end > >-# The PC base is in t2, as this is what _llint_entry leaves behind through >-# initPCRelative(t2) >+# The PC base is in t3, as this is what _llint_entry leaves behind through >+# initPCRelative(t3) > macro setEntryAddress(index, label) > setEntryAddressCommon(index, label, a0) > end > >-macro setEntryAddressWide(index, label) >+macro setEntryAddressWide16(index, label) > setEntryAddressCommon(index, label, a1) > end > >+macro setEntryAddressWide32(index, label) >+ setEntryAddressCommon(index, label, a2) >+end >+ > macro setEntryAddressCommon(index, label, map) > if X86_64 or X86_64_WIN >- leap (label - _relativePCBase)[t2], t3 >- move index, t4 >- storep t3, [map, t4, 8] >+ leap (label - _relativePCBase)[t3], t4 >+ move index, t5 >+ storep t4, [map, t5, 8] > elsif X86 or X86_WIN >- leap (label - _relativePCBase)[t2], t3 >- move index, t4 >- storep t3, [map, t4, 4] >+ leap (label - _relativePCBase)[t3], t4 >+ move index, t5 >+ storep t4, [map, t5, 4] > elsif ARM64 or ARM64E >- pcrtoaddr label, t2 >+ pcrtoaddr label, t3 > move index, t4 >- storep t2, [map, t4, PtrSize] >+ storep t3, [map, t4, PtrSize] > elsif ARMv7 > mvlbl (label - _relativePCBase), t4 >- addp t4, t2, t4 >- move index, t3 >- storep t4, [map, t3, 4] >+ addp t4, t3, t4 >+ move index, t5 >+ storep t4, [map, t5, 4] > elsif MIPS > la label, t4 > la _relativePCBase, t3 > subp t3, t4 >- addp t4, t2, t4 >- move index, t3 >- storep t4, [map, t3, 4] >+ addp t4, t3, t4 >+ move index, t5 >+ storep t4, [map, t5, 4] > end > end > >@@ -1352,9 +1369,10 @@ _llint_entry: > if X86 or X86_WIN > loadp 20[sp], a0 > loadp 24[sp], a1 >+ loadp 28[sp], a2 > end > >- initPCRelative(t2) >+ initPCRelative(t3) > > # Include generated bytecode initialization file. > include InitBytecodes >@@ -1364,14 +1382,23 @@ _llint_entry: > ret > end > >-_llint_op_wide: >- nextInstructionWide() >+_llint_op_wide16: >+ nextInstructionWide16() > >-_llint_op_wide_wide: >+_llint_op_wide32: >+ nextInstructionWide32() >+ >+macro noWide(label) >+_llint_%label%_wide16: > crash() > >-_llint_op_enter_wide: >+_llint_%label%_wide32: > crash() >+end >+ >+noWide(op_wide16) >+noWide(op_wide32) >+noWide(op_enter) > > op(llint_program_prologue, macro () > prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) >@@ -1772,12 +1799,20 @@ _llint_op_call_eval: > _llint_slow_path_call_eval, > prepareForRegularCall) > >-_llint_op_call_eval_wide: >+_llint_op_call_eval_wide16: > slowPathForCall( >- wide, >+ wide16, > OpCallEval, >- macro () dispatchOp(wide, op_call_eval) end, >- _llint_slow_path_call_eval_wide, >+ macro () dispatchOp(wide16, op_call_eval) end, >+ _llint_slow_path_call_eval_wide16, >+ prepareForRegularCall) >+ >+_llint_op_call_eval_wide32: >+ slowPathForCall( >+ wide32, >+ OpCallEval, >+ macro () dispatchOp(wide32, op_call_eval) end, >+ _llint_slow_path_call_eval_wide32, > prepareForRegularCall) > > _llint_generic_return_point: >@@ -1785,9 +1820,14 @@ _llint_generic_return_point: > dispatchOp(narrow, op_call_eval) > end) > >-_llint_generic_return_point_wide: >- dispatchAfterCall(wide, OpCallEval, macro() >- dispatchOp(wide, op_call_eval) >+_llint_generic_return_point_wide16: >+ dispatchAfterCall(wide16, OpCallEval, macro() >+ dispatchOp(wide16, op_call_eval) >+ end) >+ >+_llint_generic_return_point_wide32: >+ dispatchAfterCall(wide32, OpCallEval, macro() >+ dispatchOp(wide32, op_call_eval) > end) > > llintOp(op_identity_with_profile, OpIdentityWithProfile, macro (unused, unused, dispatch) >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >index 6c4cee7c539cf047501932d9acdaa5666cf2ade1..6952e21e1a8f22f87eb52c9fbe97f53bf92981c9 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp >@@ -249,12 +249,14 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > // are at play. > if (UNLIKELY(isInitializationPass)) { > Opcode* opcodeMap = LLInt::opcodeMap(); >- Opcode* opcodeMapWide = LLInt::opcodeMapWide(); >+ Opcode* opcodeMapWide16 = LLInt::opcodeMapWide16(); >+ Opcode* opcodeMapWide32 = LLInt::opcodeMapWide32(); > > #if ENABLE(COMPUTED_GOTO_OPCODES) > #define OPCODE_ENTRY(__opcode, length) \ > opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); \ >- opcodeMapWide[__opcode] = bitwise_cast<void*>(&&__opcode##_wide); >+ opcodeMapWide16[__opcode] = bitwise_cast<void*>(&&__opcode##_wide16); \ >+ opcodeMapWide32[__opcode] = bitwise_cast<void*>(&&__opcode##_wide32); > > #define LLINT_OPCODE_ENTRY(__opcode, length) \ > opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); >@@ -263,7 +265,8 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, > // narrow opcodes don't need any mapping and wide opcodes just need to add numOpcodeIDs > #define OPCODE_ENTRY(__opcode, length) \ > opcodeMap[__opcode] = __opcode; \ >- opcodeMapWide[__opcode] = static_cast<OpcodeID>(__opcode##_wide); >+ opcodeMapWide16[__opcode] = static_cast<OpcodeID>(__opcode##_wide16); \ >+ opcodeMapWide32[__opcode] = static_cast<OpcodeID>(__opcode##_wide32); > > #define LLINT_OPCODE_ENTRY(__opcode, length) \ > opcodeMap[__opcode] = __opcode; >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm >index c77fb7e36bcde53ea34ade7e483906aec5bf3632..513b9bbbd3c01b39941d601bbca2ee4b0ab557e2 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm >@@ -29,9 +29,15 @@ macro nextInstruction() > jmp [t1, t0, 4], BytecodePtrTag > end > >-macro nextInstructionWide() >+macro nextInstructionWide16() >+ loadh 1[PC], t0 >+ leap _g_opcodeMapWide16, t1 >+ jmp [t1, t0, 4], BytecodePtrTag >+end >+ >+macro nextInstructionWide32() > loadi 1[PC], t0 >- leap _g_opcodeMapWide, t1 >+ leap _g_opcodeMapWide32, t1 > jmp [t1, t0, 4], BytecodePtrTag > end > >@@ -43,11 +49,19 @@ macro getOperandNarrow(opcodeStruct, fieldName, dst) > loadbsp constexpr %opcodeStruct%_%fieldName%_index[PC], dst > end > >-macro getuOperandWide(opcodeStruct, fieldName, dst) >+macro getuOperandWide16(opcodeStruct, fieldName, dst) >+ loadh constexpr %opcodeStruct%_%fieldName%_index * 2 + 1[PC], dst >+end >+ >+macro getOperandWide16(opcodeStruct, fieldName, dst) >+ loadhsp constexpr %opcodeStruct%_%fieldName%_index * 2 + 1[PC], dst >+end >+ >+macro getuOperandWide32(opcodeStruct, fieldName, dst) > loadi constexpr %opcodeStruct%_%fieldName%_index * 4 + 1[PC], dst > end > >-macro getOperandWide(opcodeStruct, fieldName, dst) >+macro getOperandWide32(opcodeStruct, fieldName, dst) > loadis constexpr %opcodeStruct%_%fieldName%_index * 4 + 1[PC], dst > end > >@@ -447,7 +461,7 @@ end > # Index, tag, and payload must be different registers. Index is not > # changed. > macro loadConstantOrVariable(size, index, tag, payload) >- size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) >+ size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide16, FirstConstantRegisterIndexWide32, macro (FirstConstantRegisterIndex) > bigteq index, FirstConstantRegisterIndex, .constant > loadi TagOffset[cfr, index, 8], tag > loadi PayloadOffset[cfr, index, 8], payload >@@ -463,7 +477,7 @@ macro loadConstantOrVariable(size, index, tag, payload) > end > > macro loadConstantOrVariableTag(size, index, tag) >- size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) >+ size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide16, FirstConstantRegisterIndexWide32, macro (FirstConstantRegisterIndex) > bigteq index, FirstConstantRegisterIndex, .constant > loadi TagOffset[cfr, index, 8], tag > jmp .done >@@ -478,7 +492,7 @@ end > > # Index and payload may be the same register. Index may be clobbered. > macro loadConstantOrVariable2Reg(size, index, tag, payload) >- size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) >+ size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide32, FirstConstantRegisterIndexWide16, macro (FirstConstantRegisterIndex) > bigteq index, FirstConstantRegisterIndex, .constant > loadi TagOffset[cfr, index, 8], tag > loadi PayloadOffset[cfr, index, 8], payload >@@ -496,7 +510,7 @@ macro loadConstantOrVariable2Reg(size, index, tag, payload) > end > > macro loadConstantOrVariablePayloadTagCustom(size, index, tagCheck, payload) >- size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) >+ size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide16, FirstConstantRegisterIndexWide32, macro (FirstConstantRegisterIndex) > bigteq index, FirstConstantRegisterIndex, .constant > tagCheck(TagOffset[cfr, index, 8]) > loadi PayloadOffset[cfr, index, 8], payload >diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >index c80743584509cfee30993a6eb11255d213351c62..29c3aa011029c52f8df19a408316351e883f0772 100644 >--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm >@@ -30,9 +30,15 @@ macro nextInstruction() > jmp [t1, t0, PtrSize], BytecodePtrTag > end > >-macro nextInstructionWide() >+macro nextInstructionWide16() >+ loadh 1[PB, PC, 1], t0 >+ leap _g_opcodeMapWide16, t1 >+ jmp [t1, t0, PtrSize], BytecodePtrTag >+end >+ >+macro nextInstructionWide32() > loadi 1[PB, PC, 1], t0 >- leap _g_opcodeMapWide, t1 >+ leap _g_opcodeMapWide32, t1 > jmp [t1, t0, PtrSize], BytecodePtrTag > end > >@@ -44,11 +50,19 @@ macro getOperandNarrow(opcodeStruct, fieldName, dst) > loadbsp constexpr %opcodeStruct%_%fieldName%_index[PB, PC, 1], dst > end > >-macro getuOperandWide(opcodeStruct, fieldName, dst) >+macro getuOperandWide16(opcodeStruct, fieldName, dst) >+ loadh constexpr %opcodeStruct%_%fieldName%_index * 2 + 1[PB, PC, 1], dst >+end >+ >+macro getOperandWide16(opcodeStruct, fieldName, dst) >+ loadhsp constexpr %opcodeStruct%_%fieldName%_index * 2 + 1[PB, PC, 1], dst >+end >+ >+macro getuOperandWide32(opcodeStruct, fieldName, dst) > loadi constexpr %opcodeStruct%_%fieldName%_index * 4 + 1[PB, PC, 1], dst > end > >-macro getOperandWide(opcodeStruct, fieldName, dst) >+macro getOperandWide32(opcodeStruct, fieldName, dst) > loadis constexpr %opcodeStruct%_%fieldName%_index * 4 + 1[PB, PC, 1], dst > end > >@@ -450,19 +464,31 @@ macro loadConstantOrVariable(size, index, value) > .done: > end > >- macro loadWide() >- bpgteq index, FirstConstantRegisterIndexWide, .constant >+ macro loadWide16() >+ bpgteq index, FirstConstantRegisterIndexWide16, .constant >+ loadq [cfr, index, 8], value >+ jmp .done >+ .constant: >+ loadp CodeBlock[cfr], value >+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value >+ subp FirstConstantRegisterIndexWide16, index >+ loadq [value, index, 8], value >+ .done: >+ end >+ >+ macro loadWide32() >+ bpgteq index, FirstConstantRegisterIndexWide32, .constant > loadq [cfr, index, 8], value > jmp .done > .constant: > loadp CodeBlock[cfr], value > loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value >- subp FirstConstantRegisterIndexWide, index >+ subp FirstConstantRegisterIndexWide32, index > loadq [value, index, 8], value > .done: > end > >- size(loadNarrow, loadWide, macro (load) load() end) >+ size(loadNarrow, loadWide16, loadWide32, macro (load) load() end) > end > > macro loadConstantOrVariableInt32(size, index, value, slow) >diff --git a/Source/JavaScriptCore/offlineasm/arm64.rb b/Source/JavaScriptCore/offlineasm/arm64.rb >index 9c0cbdca34b01df090af76718557a4bf57f6d9a9..da0da6bbd58465dcbab5b698b0cf31f0c7e4a3a6 100644 >--- a/Source/JavaScriptCore/offlineasm/arm64.rb >+++ b/Source/JavaScriptCore/offlineasm/arm64.rb >@@ -710,7 +710,7 @@ def lowerARM64 > when "loadb" > emitARM64Access("ldrb", "ldurb", operands[1], operands[0], :word) > when "loadbs" >- emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :word) >+ emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :quad) > when "loadbsp" > emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :ptr) > when "storeb" >@@ -718,7 +718,7 @@ def lowerARM64 > when "loadh" > emitARM64Access("ldrh", "ldurh", operands[1], operands[0], :word) > when "loadhs" >- emitARM64Access("ldrsh", "ldursh", operands[1], operands[0], :word) >+ emitARM64Access("ldrsh", "ldursh", operands[1], operands[0], :quad) > when "storeh" > emitARM64Unflipped("strh", operands, :word) > when "loadd" >diff --git a/Source/JavaScriptCore/offlineasm/x86.rb b/Source/JavaScriptCore/offlineasm/x86.rb >index f2deba81b76317568d812e6b8dc750ad34245bd8..68715e517e872a570e2fc8a7c94fb48bf2501a7b 100644 >--- a/Source/JavaScriptCore/offlineasm/x86.rb >+++ b/Source/JavaScriptCore/offlineasm/x86.rb >@@ -959,9 +959,23 @@ def lowerX86Common > end > when "loadhs" > if !isIntelSyntax >- $asm.puts "movswl #{x86LoadOperands(:half, :int)}" >+ if isX64 >+ $asm.puts "movswq #{x86LoadOperands(:half, :quad)}" >+ else >+ $asm.puts "movswl #{x86LoadOperands(:half, :int)}" >+ end >+ else >+ if isX64 >+ $asm.puts "movsx #{x86LoadOperands(:half, :quad)}" >+ else >+ $asm.puts "movsx #{x86LoadOperands(:half, :int)}" >+ end >+ end >+ when "loadhsp" >+ if !isIntelSyntax >+ $asm.puts "movsw#{x86Suffix(:ptr)} #{x86LoadOperands(:half, :ptr)}" > else >- $asm.puts "movsx #{x86LoadOperands(:half, :int)}" >+ $asm.puts "movsx #{x86LoadOperands(:half, :ptr)}" > end > when "storeb" > $asm.puts "mov#{x86Suffix(:byte)} #{x86Operands(:byte, :byte)}"
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 197979
:
370128
|
370215
|
370216
|
370235
|
370308
|
370311
|
370312
|
370313
|
370314
|
370475
|
370476
|
370481
|
370486
|
370496
|
370516
|
370524
|
370529
|
370534
|
370556
|
370607
|
370616
|
370623
|
370626
|
370690
|
370691
|
370714
|
370715
|
370718
|
370783
|
370791
|
370797
|
370798
|
370819
|
370922
|
370929