WebKit Bugzilla
Attachment 369792 Details for
Bug 196943
: Add a baseline tracelet JIT
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
WIP
c-backup.diff (text/plain), 157.68 KB, created by
Saam Barati
on 2019-05-13 15:41:45 PDT
(
hide
)
Description:
WIP
Filename:
MIME Type:
Creator:
Saam Barati
Created:
2019-05-13 15:41:45 PDT
Size:
157.68 KB
patch
obsolete
>Index: Source/JavaScriptCore/bytecode/ArithProfile.h >=================================================================== >--- Source/JavaScriptCore/bytecode/ArithProfile.h (revision 245246) >+++ Source/JavaScriptCore/bytecode/ArithProfile.h (working copy) >@@ -310,6 +310,11 @@ private: > friend class JSC::LLIntOffsetsExtractor; > }; > >+struct TraceProfile { >+ unsigned start; >+ unsigned end; // Not inclusive of this instruction. >+}; >+ > } // namespace JSC > > namespace WTF { >Index: Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeDumper.cpp (revision 245246) >+++ Source/JavaScriptCore/bytecode/BytecodeDumper.cpp (working copy) >@@ -110,6 +110,25 @@ void BytecodeDumper<Block>::dumpIdentifi > } > } > >+template<> >+void BytecodeDumper<UnlinkedCodeBlock>::dumpPreciseJumpTargets() >+{ >+} >+ >+template<> >+void BytecodeDumper<CodeBlock>::dumpPreciseJumpTargets() >+{ >+ m_out.printf("\nPreciseJumpTargets = { "); >+ Vector<InstructionStream::Offset, 32> jumpTargets; >+ computePreciseJumpTargets(m_block, jumpTargets); >+ for (size_t i = 0; i < jumpTargets.size(); ++i) { >+ m_out.print(jumpTargets[i]); >+ if (i + 1 < jumpTargets.size()) >+ m_out.print(", "); >+ } >+ m_out.printf(" }\n"); >+} >+ > template<class Block> > void BytecodeDumper<Block>::dumpConstants() > { >@@ -225,6 +244,7 @@ void BytecodeDumper<Block>::dumpBlock(Bl > dumper.dumpExceptionHandlers(); > dumper.dumpSwitchJumpTables(); > dumper.dumpStringSwitchJumpTables(); >+ dumper.dumpPreciseJumpTargets(); > > out.printf("\n"); > } >Index: Source/JavaScriptCore/bytecode/BytecodeDumper.h >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeDumper.h (revision 245246) >+++ Source/JavaScriptCore/bytecode/BytecodeDumper.h (working copy) >@@ -82,6 +82,7 @@ private: > void dumpExceptionHandlers(); > void dumpSwitchJumpTables(); > void dumpStringSwitchJumpTables(); >+ void dumpPreciseJumpTargets(); > > void dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&); > >Index: Source/JavaScriptCore/bytecode/BytecodeList.rb >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeList.rb (revision 245246) >+++ Source/JavaScriptCore/bytecode/BytecodeList.rb (working copy) >@@ -61,6 +61,7 @@ types [ > :ArrayProfile, > :ArrayAllocationProfile, > :ObjectAllocationProfile, >+ :TraceProfile, > ] > > namespace :Special do >@@ -661,6 +662,13 @@ op_group :BinaryJmp, > > op :loop_hint > >+op :trace_hint, >+ metadata: { >+ entrypoint: uintptr_t, >+ traceProfile: TraceProfile, >+ count: int, >+ } >+ > op_group :SwitchValue, > [ > :switch_imm, >Index: Source/JavaScriptCore/bytecode/BytecodeUseDef.h >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeUseDef.h (revision 245246) >+++ Source/JavaScriptCore/bytecode/BytecodeUseDef.h (working copy) >@@ -76,6 +76,7 @@ void computeUsesForBytecodeOffset(Block* > case op_debug: > case op_jneq_ptr: > case op_loop_hint: >+ case op_trace_hint: > case op_jmp: > case op_new_object: > case op_enter: >@@ -321,6 +322,7 @@ void computeDefsForBytecodeOffset(Block* > case op_jbelow: > case op_jbeloweq: > case op_loop_hint: >+ case op_trace_hint: > case op_switch_imm: > case op_switch_char: > case op_switch_string: >Index: Source/JavaScriptCore/bytecode/CodeBlock.cpp >=================================================================== >--- Source/JavaScriptCore/bytecode/CodeBlock.cpp (revision 245246) >+++ Source/JavaScriptCore/bytecode/CodeBlock.cpp (working copy) >@@ -190,7 +190,7 @@ void CodeBlock::dumpAssumingJITType(Prin > > if (codeType() == FunctionCode) > out.print(specializationKind()); >- out.print(", ", instructionsSize()); >+ out.print(", ", bytecodeCost()); > if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined) > out.print(" (ShouldAlwaysBeInlined)"); > if (ownerExecutable()->neverInline()) >@@ -522,6 +522,19 @@ bool CodeBlock::finishCreation(VM& vm, S > break; \ > } > >+ TraceProfile* lastTraceProfile = nullptr; >+ OpTraceHint::Metadata* lastTraceHintMetadata = nullptr; >+ >+ auto setTraceCount = [&] { >+ double threshold = lastTraceProfile->end - lastTraceProfile->start; >+ threshold *= Options::traceThresholdMultiplier(); >+ int32_t count = threshold; >+ count = std::max(Options::minimumTraceThreshold(), count); >+ count = std::min(Options::maximumTraceThreshold(), count); >+ count = -count; >+ lastTraceHintMetadata->m_count = count; >+ }; >+ > const InstructionStream& instructionStream = instructions(); > for (const auto& instruction : instructionStream) { > OpcodeID opcodeID = instruction->opcodeID(); >@@ -771,12 +784,33 @@ bool CodeBlock::finishCreation(VM& vm, S > m_numberOfArgumentsToSkip = numberOfArgumentsToSkip; > break; > } >+ >+ case op_trace_hint: { >+ INITIALIZE_METADATA(OpTraceHint) >+ unsigned offset = instruction.offset(); >+ if (lastTraceProfile) { >+ lastTraceProfile->end = offset; >+ setTraceCount(); >+ } >+ >+ metadata.m_traceProfile.start = offset; >+ lastTraceProfile = &metadata.m_traceProfile; >+ lastTraceHintMetadata = &metadata; >+ >+ metadata.m_entrypoint = 0; >+ break; >+ } > > default: > break; > } > } > >+ if (lastTraceProfile) { >+ lastTraceProfile->end = instructionsSize(); >+ setTraceCount(); >+ } >+ > #undef CASE > #undef INITIALIZE_METADATA > #undef LINK_FIELD >@@ -1068,7 +1102,8 @@ void CodeBlock::propagateTransitions(con > > VM& vm = *m_vm; > >- if (jitType() == JITType::InterpreterThunk) { >+ //if (jitType() == JITType::InterpreterThunk) { >+ if (JITCode::couldBeInterpreted(jitType())) { > const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); > const InstructionStream& instructionStream = instructions(); > for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { >@@ -1487,20 +1522,14 @@ CallLinkInfo* CodeBlock::getCallLinkInfo > return nullptr; > } > >-RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset) >-{ >- ConcurrentJSLocker locker(m_lock); >- auto& jitData = ensureJITData(locker); >- jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); >- return &jitData.m_rareCaseProfiles.last(); >-} >- > RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset) > { > if (auto* jitData = m_jitData.get()) { >- return tryBinarySearch<RareCaseProfile, int>( >+ std::unique_ptr<RareCaseProfile>* uniquePtr = tryBinarySearch<std::unique_ptr<RareCaseProfile>, int>( > jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset, >- getRareCaseProfileBytecodeOffset); >+ [] (std::unique_ptr<RareCaseProfile>* rareCaseProfile) { return rareCaseProfile->get()->m_bytecodeOffset; }); >+ if (uniquePtr) >+ return uniquePtr->get(); > } > return nullptr; > } >@@ -1525,6 +1554,9 @@ void CodeBlock::setCalleeSaveRegisters(s > ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList); > } > >+/* >+ OOPS: Make this temporary per compile until we actually >+ get executable code! > void CodeBlock::resetJITData() > { > RELEASE_ASSERT(!JITCode::isJIT(jitType())); >@@ -1543,6 +1575,36 @@ void CodeBlock::resetJITData() > jitData->m_rareCaseProfiles.clear(); > } > } >+*/ >+ >+void CodeBlock::adoptCompileData(JITData::CompileData&& data) >+{ >+ if (data.isEmpty()) >+ return; >+ >+ ConcurrentJSLocker locker(m_lock); >+ auto& jitData = ensureJITData(locker); >+ >+ jitData.m_stubInfos.adopt(data.stubInfos); >+ jitData.m_addICs.adopt(data.addICs); >+ jitData.m_mulICs.adopt(data.mulICs); >+ jitData.m_negICs.adopt(data.negICs); >+ jitData.m_subICs.adopt(data.subICs); >+ jitData.m_byValInfos.adopt(data.byValInfos); >+ jitData.m_callLinkInfos.adopt(data.callLinkInfos); >+ >+ if (data.rareCaseProfiles.size()) { >+ jitData.m_rareCaseProfiles.reserveCapacity(jitData.m_rareCaseProfiles.size() + data.rareCaseProfiles.size()); >+ for (auto& entry : data.rareCaseProfiles) >+ jitData.m_rareCaseProfiles.append(WTFMove(entry)); >+ >+ std::sort(jitData.m_rareCaseProfiles.begin(), jitData.m_rareCaseProfiles.end(), >+ [&] (const auto& a, const auto& b) -> bool { >+ return a->m_bytecodeOffset < b->m_bytecodeOffset; >+ }); >+ data.rareCaseProfiles.clear(); >+ } >+} > #endif > > void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor) >Index: Source/JavaScriptCore/bytecode/CodeBlock.h >=================================================================== >--- Source/JavaScriptCore/bytecode/CodeBlock.h (revision 245246) >+++ Source/JavaScriptCore/bytecode/CodeBlock.h (working copy) >@@ -249,11 +249,35 @@ public: > > void getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result); > void getICStatusMap(ICStatusMap& result); >+ > > #if ENABLE(JIT) > struct JITData { > WTF_MAKE_STRUCT_FAST_ALLOCATED; > >+ struct CompileData { >+ Bag<StructureStubInfo> stubInfos; >+ Bag<JITAddIC> addICs; >+ Bag<JITMulIC> mulICs; >+ Bag<JITNegIC> negICs; >+ Bag<JITSubIC> subICs; >+ Bag<ByValInfo> byValInfos; >+ Bag<CallLinkInfo> callLinkInfos; >+ Vector<std::unique_ptr<RareCaseProfile>, 8> rareCaseProfiles; >+ >+ bool isEmpty() const >+ { >+ return stubInfos.isEmpty() >+ && addICs.isEmpty() >+ && mulICs.isEmpty() >+ && negICs.isEmpty() >+ && subICs.isEmpty() >+ && byValInfos.isEmpty() >+ && callLinkInfos.isEmpty() >+ && rareCaseProfiles.isEmpty(); >+ } >+ }; >+ > Bag<StructureStubInfo> m_stubInfos; > Bag<JITAddIC> m_addICs; > Bag<JITMulIC> m_mulICs; >@@ -263,12 +287,14 @@ public: > Bag<CallLinkInfo> m_callLinkInfos; > SentinelLinkedList<CallLinkInfo, PackedRawSentinelNode<CallLinkInfo>> m_incomingCalls; > SentinelLinkedList<PolymorphicCallNode, PackedRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls; >- SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; >+ Vector<std::unique_ptr<RareCaseProfile>, 8> m_rareCaseProfiles; > std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap; > std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters; > JITCodeMap m_jitCodeMap; > }; > >+ void adoptCompileData(JITData::CompileData&&); >+ > JITData& ensureJITData(const ConcurrentJSLocker& locker) > { > if (LIKELY(m_jitData)) >@@ -309,16 +335,16 @@ public: > // looking for a CallLinkInfoMap to amortize the cost of calling this. > CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex); > >- void setJITCodeMap(JITCodeMap&& jitCodeMap) >- { >- ConcurrentJSLocker locker(m_lock); >- ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap); >- } >- const JITCodeMap& jitCodeMap() >- { >- ConcurrentJSLocker locker(m_lock); >- return ensureJITData(locker).m_jitCodeMap; >- } >+ //void setJITCodeMap(JITCodeMap&& jitCodeMap) >+ //{ >+ // ConcurrentJSLocker locker(m_lock); >+ // ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap); >+ //} >+ //const JITCodeMap& jitCodeMap() >+ //{ >+ // ConcurrentJSLocker locker(m_lock); >+ // return ensureJITData(locker).m_jitCodeMap; >+ //} > > void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&); > Optional<CodeOrigin> findPC(void* pc); >@@ -326,7 +352,6 @@ public: > void setCalleeSaveRegisters(RegisterSet); > void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>); > >- RareCaseProfile* addRareCaseProfile(int bytecodeOffset); > RareCaseProfile* rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset); > unsigned rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset); > >@@ -348,12 +373,6 @@ public: > return value >= Options::couldTakeSlowCaseMinimumCount(); > } > >- // We call this when we want to reattempt compiling something with the baseline JIT. Ideally >- // the baseline JIT would not add data to CodeBlock, but instead it would put its data into >- // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we >- // would be able to get rid of this silly function. >- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061 >- void resetJITData(); > #endif // ENABLE(JIT) > > void unlinkIncomingCalls(); >@@ -884,6 +903,10 @@ public: > return m_unlinkedCode->metadataSizeInBytes(); > } > >+ MetadataTable* metadataTable() { return m_metadata.get(); } >+ >+ const void* instructionsRawPointer() { return m_instructionsRawPointer; } >+ > protected: > void finalizeLLIntInlineCaches(); > #if ENABLE(JIT) >Index: Source/JavaScriptCore/bytecode/ValueProfile.h >=================================================================== >--- Source/JavaScriptCore/bytecode/ValueProfile.h (revision 245246) >+++ Source/JavaScriptCore/bytecode/ValueProfile.h (working copy) >@@ -193,11 +193,6 @@ struct RareCaseProfile { > uint32_t m_counter; > }; > >-inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile) >-{ >- return rareCaseProfile->m_bytecodeOffset; >-} >- > struct ValueProfileAndOperand { > ValueProfile m_profile; > int m_operand; >Index: Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp (revision 245246) >+++ Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp (working copy) >@@ -253,10 +253,12 @@ ParserError BytecodeGenerator::generate( > // At this point we would have emitted an unconditional throw followed by some nonsense that's > // just an artifact of how this generator is structured. That code never runs, but it confuses > // bytecode analyses because it constitutes an unterminated basic block. So, we terminate the >- // basic block the strongest way possible. >+ // basic block in the strongest way possible. > emitUnreachable(); > } > >+ if (m_exceptionHandlersToEmit.size()) >+ OpTraceHint::emit(this); > for (auto& handler : m_exceptionHandlersToEmit) { > Ref<Label> realCatchTarget = newLabel(); > TryData* tryData = handler.tryData; >@@ -1363,6 +1365,9 @@ void BytecodeGenerator::emitLabel(Label& > > m_codeBlock->addJumpTarget(newLabelIndex); > >+ if (m_lastInstruction->opcodeID() != op_trace_hint) >+ OpTraceHint::emit(this); >+ > // This disables peephole optimizations when an instruction is a jump target > m_lastOpcodeID = op_end; > } >@@ -1378,6 +1383,8 @@ void BytecodeGenerator::emitEnter() > // This disables peephole optimizations when an instruction is a jump target > m_lastOpcodeID = op_end; > } >+ >+ OpTraceHint::emit(this); > } > > void BytecodeGenerator::emitLoopHint() >@@ -1852,6 +1859,8 @@ void BytecodeGenerator::emitProfileType( > > void BytecodeGenerator::emitProfileControlFlow(int textOffset) > { >+ //OpTraceHint::emit(this); >+ > if (shouldEmitControlFlowProfilerHooks()) { > RELEASE_ASSERT(textOffset >= 0); > >Index: Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp >=================================================================== >--- Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp (revision 245246) >+++ Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp (working copy) >@@ -3356,7 +3356,7 @@ void ReturnNode::emitBytecode(BytecodeGe > generator.emitProfileControlFlow(endOffset()); > // Emitting an unreachable return here is needed in case this op_profile_control_flow is the > // last opcode in a CodeBlock because a CodeBlock's instructions must end with a terminal opcode. >- if (generator.shouldEmitControlFlowProfilerHooks()) >+ if (generator.shouldEmitControlFlowProfilerHooks() || generator.lastOpcodeID() == op_trace_hint) > generator.emitReturn(generator.emitLoad(nullptr, jsUndefined())); > } > >Index: Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp (working copy) >@@ -6599,6 +6599,19 @@ void ByteCodeParser::parseBlock(unsigned > NEXT_OPCODE(op_put_to_scope); > } > >+ case op_trace_hint: { >+ addToGraph(Check); // We add a nop here so that basic block linking doesn't break. >+ >+ //static double compiled; >+ //static double total; >+ //++total; >+ //auto bytecode = currentInstruction->as<OpTraceHint>(); >+ //if (bytecode.metadata(codeBlock).m_entrypoint) >+ // ++compiled; >+ //dataLogLn("percent compiled: ", compiled / total * 100); >+ NEXT_OPCODE(op_trace_hint); >+ } >+ > case op_loop_hint: { > // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG > // OSR can only happen at basic block boundaries. Assert that these two statements >Index: Source/JavaScriptCore/dfg/DFGCapabilities.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGCapabilities.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGCapabilities.cpp (working copy) >@@ -202,6 +202,7 @@ CapabilityLevel capabilityLevel(OpcodeID > case op_jbelow: > case op_jbeloweq: > case op_loop_hint: >+ case op_trace_hint: > case op_check_traps: > case op_nop: > case op_ret: >Index: Source/JavaScriptCore/dfg/DFGOSREntry.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSREntry.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGOSREntry.cpp (working copy) >@@ -97,7 +97,6 @@ void* prepareOSREntry(ExecState* exec, C > ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType())); > ASSERT(codeBlock->alternative()); > ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT); >- ASSERT(!codeBlock->jitCodeMap()); > ASSERT(codeBlock->jitCode()->dfgCommon()->isStillValid); > > if (!Options::useOSREntryToDFG()) >Index: Source/JavaScriptCore/dfg/DFGOSRExit.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExit.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGOSRExit.cpp (working copy) >@@ -371,12 +371,12 @@ void OSRExit::executeOSRExit(Context& co > // results will be cached in the OSRExitState record for use of the rest of the > // exit ramp code. > >- // Ensure we have baseline codeBlocks to OSR exit to. >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >- > CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative(); > ASSERT(baselineCodeBlock->jitType() == JITType::BaselineJIT); > >+ // Ensure we have baseline codeBlocks to OSR exit to. >+ prepareCodeOriginForOSRExit(exec, baselineCodeBlock, exit.m_codeOrigin); >+ > SpeculationRecovery* recovery = nullptr; > if (exit.m_recoveryIndex != UINT_MAX) { > recovery = &dfgJITCode->speculationRecovery[exit.m_recoveryIndex]; >@@ -405,11 +405,10 @@ void OSRExit::executeOSRExit(Context& co > adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold); > > CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock); >- const JITCodeMap& codeMap = codeBlockForExit->jitCodeMap(); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(exit.m_codeOrigin.bytecodeIndex()); >- ASSERT(codeLocation); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = static_cast<TraceletJITCode*>(codeBlockForExit->jitCode().get())->findCodeLocation(exit.m_codeOrigin.bytecodeIndex()); >+ ASSERT(!!codePtr); > >- void* jumpTarget = codeLocation.executableAddress(); >+ void* jumpTarget = codePtr.executableAddress(); > > // Compute the value recoveries. > Operands<ValueRecovery> operands; >@@ -1047,7 +1046,7 @@ void JIT_OPERATION OSRExit::compileOSREx > ASSERT(!vm->callFrameForCatch || exit.m_kind == GenericUnwind); > EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler()); > >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >+ prepareCodeOriginForOSRExit(exec, codeBlock->baselineAlternative(), exit.m_codeOrigin); > > // Compute the value recoveries. > Operands<ValueRecovery> operands; >Index: Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp (working copy) >@@ -311,10 +311,11 @@ void adjustAndJumpToTarget(VM& vm, CCall > CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin); > ASSERT(codeBlockForExit == codeBlockForExit->baselineVersion()); > ASSERT(codeBlockForExit->jitType() == JITType::BaselineJIT); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeBlockForExit->jitCodeMap().find(exit.m_codeOrigin.bytecodeIndex()); >- ASSERT(codeLocation); >+ RELEASE_ASSERT(codeBlockForExit->jitCode()->isTraceletJITCode()); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = static_cast<TraceletJITCode*>(codeBlockForExit->jitCode().get())->findCodeLocation(exit.m_codeOrigin.bytecodeIndex()); >+ ASSERT(!!codePtr); > >- void* jumpTarget = codeLocation.retagged<OSRExitPtrTag>().executableAddress(); >+ void* jumpTarget = codePtr.retagged<OSRExitPtrTag>().executableAddress(); > jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister); > if (exit.isExceptionHandler()) { > // Since we're jumping to op_catch, we need to set callFrameForCatch. >Index: Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp (working copy) >@@ -36,15 +36,19 @@ > > namespace JSC { namespace DFG { > >-void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin) >+void prepareCodeOriginForOSRExit(ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin) > { > VM& vm = exec->vm(); > DeferGC deferGC(vm.heap); >- >+ >+ RELEASE_ASSERT(codeBlock->baselineAlternative() == codeBlock); >+ > for (; codeOrigin.inlineCallFrame(); codeOrigin = codeOrigin.inlineCallFrame()->directCaller) { > CodeBlock* codeBlock = codeOrigin.inlineCallFrame()->baselineCodeBlock.get(); >- JITWorklist::ensureGlobalWorklist().compileNow(codeBlock); >+ JITWorklist::ensureGlobalWorklist().compileNow(codeBlock, codeOrigin.bytecodeIndex()); > } >+ >+ JITWorklist::ensureGlobalWorklist().compileNow(codeBlock, codeOrigin.bytecodeIndex()); > } > > } } // namespace JSC::DFG >Index: Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h (working copy) >@@ -41,7 +41,7 @@ namespace JSC { namespace DFG { > // probably it's a good sign that the thing we're exiting into is hot. Even more > // interestingly, since the code was inlined, it may never otherwise get JIT > // compiled since the act of inlining it may ensure that it otherwise never runs. >-void prepareCodeOriginForOSRExit(ExecState*, CodeOrigin); >+void prepareCodeOriginForOSRExit(ExecState*, CodeBlock*, CodeOrigin); > > } } // namespace JSC::DFG > >Index: Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp (working copy) >@@ -199,7 +199,7 @@ void SpeculativeJIT::cachedGetById( > > CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); > JITGetByIdGenerator gen( >- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), >+ m_jit.codeBlock()->addStubInfo(type), m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), > JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(resultTagGPR, resultPayloadGPR), type); > > gen.generateFastPath(m_jit); >@@ -234,7 +234,7 @@ void SpeculativeJIT::cachedGetByIdWithTh > > CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); > JITGetByIdWithThisGenerator gen( >- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), >+ m_jit.codeBlock()->addStubInfo(AccessType::GetWithThis), m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), > JSValueRegs(resultTagGPR, resultPayloadGPR), JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(thisTagGPR, thisPayloadGPR), AccessType::GetWithThis); > > gen.generateFastPath(m_jit); >Index: Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp (working copy) >@@ -171,7 +171,7 @@ void SpeculativeJIT::cachedGetById(CodeO > usedRegisters.set(resultGPR, false); > } > JITGetByIdGenerator gen( >- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), >+ m_jit.codeBlock()->addStubInfo(type), m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), > JSValueRegs(baseGPR), JSValueRegs(resultGPR), type); > gen.generateFastPath(m_jit); > >@@ -198,7 +198,7 @@ void SpeculativeJIT::cachedGetByIdWithTh > usedRegisters.set(resultGPR, false); > > JITGetByIdWithThisGenerator gen( >- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), >+ m_jit.codeBlock()->addStubInfo(AccessType::GetWithThis), m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), > JSValueRegs(resultGPR), JSValueRegs(baseGPR), JSValueRegs(thisGPR), AccessType::GetWithThis); > gen.generateFastPath(m_jit); > >Index: Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp (revision 245246) >+++ Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp (working copy) >@@ -1082,7 +1082,7 @@ void SpeculativeJIT::compileInById(Node* > CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); > RegisterSet usedRegisters = this->usedRegisters(); > JITInByIdGenerator gen( >- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(node->identifierNumber()), >+ m_jit.codeBlock()->addStubInfo(AccessType::In), m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(node->identifierNumber()), > JSValueRegs::payloadOnly(baseGPR), resultRegs); > gen.generateFastPath(m_jit); > >@@ -3464,7 +3464,7 @@ void SpeculativeJIT::compileInstanceOfFo > CallSiteIndex callSiteIndex = m_jit.addCallSite(node->origin.semantic); > > JITInstanceOfGenerator gen( >- m_jit.codeBlock(), node->origin.semantic, callSiteIndex, usedRegisters(), resultGPR, >+ m_jit.codeBlock()->addStubInfo(AccessType::InstanceOf), m_jit.codeBlock(), node->origin.semantic, callSiteIndex, usedRegisters(), resultGPR, > valueRegs.payloadGPR(), prototypeRegs.payloadGPR(), scratchGPR, scratch2GPR, > m_state.forNode(node->child2()).isType(SpecObject | ~SpecCell)); > gen.generateFastPath(m_jit); >@@ -13352,7 +13352,7 @@ void SpeculativeJIT::cachedPutById(CodeO > } > CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); > JITPutByIdGenerator gen( >- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, >+ m_jit.codeBlock()->addStubInfo(AccessType::Put), m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, > JSValueRegs::payloadOnly(baseGPR), valueRegs, > scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind); > >Index: Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp (revision 245246) >+++ Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp (working copy) >@@ -3793,7 +3793,7 @@ private: > exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex); > > auto generator = Box<JITPutByIdGenerator>::create( >- jit.codeBlock(), node->origin.semantic, callSiteIndex, >+ jit.codeBlock()->addStubInfo(AccessType::Put), jit.codeBlock(), node->origin.semantic, callSiteIndex, > params.unavailableRegisters(), JSValueRegs(params[0].gpr()), > JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode, > node->op() == PutByIdDirect ? Direct : NotDirect); >@@ -10373,7 +10373,7 @@ private: > exceptionHandle->scheduleExitCreation(params)->jumps(jit); > > auto generator = Box<JITInByIdGenerator>::create( >- jit.codeBlock(), node->origin.semantic, callSiteIndex, >+ jit.codeBlock()->addStubInfo(AccessType::In), jit.codeBlock(), node->origin.semantic, callSiteIndex, > params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()), > JSValueRegs(params[0].gpr())); > >@@ -10632,7 +10632,7 @@ private: > exceptionHandle->scheduleExitCreation(params)->jumps(jit); > > auto generator = Box<JITInstanceOfGenerator>::create( >- jit.codeBlock(), node->origin.semantic, callSiteIndex, >+ jit.codeBlock()->addStubInfo(AccessType::InstanceOf), jit.codeBlock(), node->origin.semantic, callSiteIndex, > params.unavailableRegisters(), resultGPR, valueGPR, prototypeGPR, scratchGPR, > scratch2GPR, prototypeIsObject); > generator->generateFastPath(jit); >@@ -12023,7 +12023,7 @@ private: > exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex); > > auto generator = Box<JITGetByIdGenerator>::create( >- jit.codeBlock(), node->origin.semantic, callSiteIndex, >+ jit.codeBlock()->addStubInfo(type), jit.codeBlock(), node->origin.semantic, callSiteIndex, > params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()), > JSValueRegs(params[0].gpr()), type); > >@@ -12091,7 +12091,7 @@ private: > exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex); > > auto generator = Box<JITGetByIdWithThisGenerator>::create( >- jit.codeBlock(), node->origin.semantic, callSiteIndex, >+ jit.codeBlock()->addStubInfo(AccessType::GetWithThis), jit.codeBlock(), node->origin.semantic, callSiteIndex, > params.unavailableRegisters(), uid, JSValueRegs(params[0].gpr()), > JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), AccessType::GetWithThis); > >Index: Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp (revision 245246) >+++ Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp (working copy) >@@ -532,7 +532,7 @@ extern "C" void* compileFTLOSRExit(ExecS > } > } > >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >+ prepareCodeOriginForOSRExit(exec, codeBlock->baselineAlternative(), exit.m_codeOrigin); > > compileStub(exitID, jitCode, exit, &vm, codeBlock); > >Index: Source/JavaScriptCore/heap/Heap.cpp >=================================================================== >--- Source/JavaScriptCore/heap/Heap.cpp (revision 245246) >+++ Source/JavaScriptCore/heap/Heap.cpp (working copy) >@@ -58,6 +58,7 @@ > #include "MarkStackMergingConstraint.h" > #include "MarkedSpaceInlines.h" > #include "MarkingConstraintSet.h" >+#include "OpcodeInlines.h" > #include "PreventCollectionScope.h" > #include "SamplingProfiler.h" > #include "ShadowChicken.h" >@@ -85,6 +86,7 @@ > #include <wtf/SimpleStats.h> > #include <wtf/Threading.h> > >+ > #if PLATFORM(IOS_FAMILY) > #include <bmalloc/bmalloc.h> > #endif >@@ -2993,4 +2995,33 @@ void Heap::runTaskInParallel(RefPtr<Shar > } > } > >+void Heap::dumpAllCodeBlockTraces() >+{ >+ HeapIterationScope iterationScope(*this); >+ double total = 0.0; >+ double compiled = 0.0; >+ m_objectSpace.forEachLiveCell(iterationScope, [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus { >+ if (!isJSCellKind(kind)) >+ return IterationStatus::Continue; >+ >+ if (CodeBlock* codeBlock = jsDynamicCast<CodeBlock*>(*m_vm, static_cast<JSCell*>(cell))) { >+ const InstructionStream& instructionStream = codeBlock->instructions(); >+ for (const auto& instruction : instructionStream) { >+ OpcodeID opcodeID = instruction->opcodeID(); >+ if (opcodeID != op_trace_hint) >+ continue; >+ >+ ++total; >+ auto bytecode = instruction->as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(codeBlock); >+ if (metadata.m_entrypoint) >+ ++compiled; >+ } >+ } >+ >+ return IterationStatus::Continue; >+ }); >+ dataLogLn("total compiled: ", compiled/total); >+} >+ > } // namespace JSC >Index: Source/JavaScriptCore/heap/Heap.h >=================================================================== >--- Source/JavaScriptCore/heap/Heap.h (revision 245246) >+++ Source/JavaScriptCore/heap/Heap.h (working copy) >@@ -112,6 +112,9 @@ class HeapUtil; > class Heap { > WTF_MAKE_NONCOPYABLE(Heap); > public: >+ >+ void dumpAllCodeBlockTraces(); >+ > friend class JIT; > friend class DFG::SpeculativeJIT; > static Heap* heap(const JSValue); // 0 for immediate values >Index: Source/JavaScriptCore/jit/AssemblyHelpers.h >=================================================================== >--- Source/JavaScriptCore/jit/AssemblyHelpers.h (revision 245246) >+++ Source/JavaScriptCore/jit/AssemblyHelpers.h (working copy) >@@ -52,7 +52,7 @@ class AssemblyHelpers : public MacroAsse > public: > AssemblyHelpers(CodeBlock* codeBlock) > : m_codeBlock(codeBlock) >- , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0) >+ , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : nullptr) > { > if (m_codeBlock) { > ASSERT(m_baselineCodeBlock); >Index: Source/JavaScriptCore/jit/JIT.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JIT.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JIT.cpp (working copy) >@@ -41,6 +41,7 @@ > #include "JSCInlines.h" > #include "JSFunction.h" > #include "LinkBuffer.h" >+#include "LLIntData.h" > #include "MaxFrameExtentForSlowPathCall.h" > #include "ModuleProgramCodeBlock.h" > #include "PCToCodeOriginMap.h" >@@ -56,17 +57,23 @@ > #include <wtf/GraphNodeWorklist.h> > #include <wtf/SimpleStats.h> > >+#include "MacroAssemblerPrinter.h" >+ > namespace JSC { > namespace JITInternal { > static constexpr const bool verbose = false; > } > >+static constexpr bool verboseProbes = false; >+ > Seconds totalBaselineCompileTime; > Seconds totalDFGCompileTime; > Seconds totalFTLCompileTime; > Seconds totalFTLDFGCompileTime; > Seconds totalFTLB3CompileTime; > >+Seconds timeMakingTraces; >+ > void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr<CFunctionPtrTag> newCalleeFunction) > { > MacroAssembler::repatchCall( >@@ -74,21 +81,25 @@ void ctiPatchCallByReturnAddress(ReturnA > newCalleeFunction.retagged<OperationPtrTag>()); > } > >-JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+JIT::JIT(VM* vm, CodeBlock* codeBlock) > : JSInterfaceJIT(vm, codeBlock) > , m_interpreter(vm->interpreter) >- , m_labels(codeBlock ? codeBlock->instructions().size() : 0) > , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) > , m_pcToCodeOriginMapBuilder(*vm) > , m_canBeOptimized(false) > , m_shouldEmitProfiling(false) > , m_shouldUseIndexMasking(Options::enableSpectreMitigations()) >- , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset) > { >+ RefPtr<JITCode> jitCode = codeBlock->jitCode(); >+ if (jitCode && jitCode->isTraceletJITCode()) >+ m_priorCode = static_cast<TraceletJITCode*>(jitCode.get()); > } > > JIT::~JIT() > { >+ // OOPS: debug assert >+ if (!m_linkBuffer || !m_linkBuffer->didFailToAllocate()) >+ RELEASE_ASSERT(m_compileData.isEmpty()); > } > > #if ENABLE(DFG_JIT) >@@ -143,18 +154,14 @@ void JIT::assertStackPointerOffset() > > #define DEFINE_SLOW_OP(name) \ > case op_##name: { \ >- if (m_bytecodeOffset >= startBytecodeOffset) { \ >- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \ >- slowPathCall.call(); \ >- } \ >+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \ >+ slowPathCall.call(); \ > NEXT_OPCODE(op_##name); \ > } > > #define DEFINE_OP(name) \ > case name: { \ >- if (m_bytecodeOffset >= startBytecodeOffset) { \ >- emit_##name(currentInstruction); \ >- } \ >+ emit_##name(currentInstruction); \ > NEXT_OPCODE(name); \ > } > >@@ -187,275 +194,246 @@ void JIT::privateCompileMainPass() > jitAssertArgumentCountSane(); > > auto& instructions = m_codeBlock->instructions(); >- unsigned instructionCount = m_codeBlock->instructions().size(); > > m_callLinkInfoIndex = 0; > >- VM& vm = *m_codeBlock->vm(); >- unsigned startBytecodeOffset = 0; >- if (m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) { >- // We can only do this optimization because we execute ProgramCodeBlock's exactly once. >- // This optimization would be invalid otherwise. When the LLInt determines it wants to >- // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it >- // was executing at when it kicked off our compilation. We only need to compile code for >- // anything reachable from that bytecode offset. >- >- // We only bother building the bytecode graph if it could save time and executable >- // memory. We pick an arbitrary offset where we deem this is profitable. >- if (m_loopOSREntryBytecodeOffset >= 200) { >- // As a simplification, we don't find all bytecode ranges that are unreachable. >- // Instead, we just find the minimum bytecode offset that is reachable, and >- // compile code from that bytecode offset onwards. >- >- BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions()); >- BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeOffset); >- RELEASE_ASSERT(block); >- >- GraphNodeWorklist<BytecodeBasicBlock*> worklist; >- startBytecodeOffset = UINT_MAX; >- worklist.push(block); >- >- while (BytecodeBasicBlock* block = worklist.pop()) { >- startBytecodeOffset = std::min(startBytecodeOffset, block->leaderOffset()); >- worklist.pushAll(block->successors()); >- >- // Also add catch blocks for bytecodes that throw. >- if (m_codeBlock->numberOfExceptionHandlers()) { >- for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { >- auto instruction = instructions.at(bytecodeOffset); >- if (auto* handler = m_codeBlock->handlerForBytecodeOffset(bytecodeOffset)) >- worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target)); >+ for (unsigned traceIndex = 0; traceIndex < m_traces.size(); ++traceIndex) { >+ const TraceProfile& trace = m_traces[traceIndex]; > >- bytecodeOffset += instruction->size(); >- } >- } >- } >- } >- } >+ if (verboseProbes) >+ dataLogLn("Compiling trace: [", trace.start, ", ", trace.end, ")"); > >- for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { >- if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) { >- // We've proven all bytecode instructions up until here are unreachable. >- // Let's ensure that by crashing if it's ever hit. >- breakpoint(); >- } >+ m_numBytecodesCompiled += trace.end - trace.start; > >- if (m_disassembler) >- m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); >- const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr(); >- ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); >+ for (m_bytecodeOffset = trace.start; m_bytecodeOffset < trace.end; ) { >+ if (m_disassembler) >+ m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); >+ const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr(); >+ ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); > >- m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); >+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); > > #if ENABLE(OPCODE_SAMPLING) >- if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. >- sampleInstruction(currentInstruction); >+ if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. >+ sampleInstruction(currentInstruction); > #endif > >- m_labels[m_bytecodeOffset] = label(); >+ m_labels.add(m_bytecodeOffset, label()); >+ if (m_bytecodeOffset == trace.start && verboseProbes) >+ print("Started running trace in: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), " [", trace.start, ", ", trace.end, ")\n"); > >- if (JITInternal::verbose) >- dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); >+ if (JITInternal::verbose) >+ dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); > >- OpcodeID opcodeID = currentInstruction->opcodeID(); >+ OpcodeID opcodeID = currentInstruction->opcodeID(); > >- if (UNLIKELY(m_compilation)) { >- add64( >- TrustedImm32(1), >- AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( >- m_compilation->bytecodes(), m_bytecodeOffset)))->address())); >- } >- >- if (Options::eagerlyUpdateTopCallFrame()) >- updateTopCallFrame(); >+ if (UNLIKELY(m_compilation)) { >+ add64( >+ TrustedImm32(1), >+ AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( >+ m_compilation->bytecodes(), m_bytecodeOffset)))->address())); >+ } >+ >+ if (Options::eagerlyUpdateTopCallFrame()) >+ updateTopCallFrame(); > >- unsigned bytecodeOffset = m_bytecodeOffset; >+ unsigned bytecodeOffset = m_bytecodeOffset; > #if ENABLE(MASM_PROBE) >- if (UNLIKELY(Options::traceBaselineJITExecution())) { >- CodeBlock* codeBlock = m_codeBlock; >- probe([=] (Probe::Context& ctx) { >- dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); >- }); >- } >+ if (UNLIKELY(Options::traceBaselineJITExecution())) { >+ CodeBlock* codeBlock = m_codeBlock; >+ probe([=] (Probe::Context& ctx) { >+ dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); >+ }); >+ } > #endif > >- switch (opcodeID) { >- DEFINE_SLOW_OP(in_by_val) >- DEFINE_SLOW_OP(less) >- DEFINE_SLOW_OP(lesseq) >- DEFINE_SLOW_OP(greater) >- DEFINE_SLOW_OP(greatereq) >- DEFINE_SLOW_OP(is_function) >- DEFINE_SLOW_OP(is_object_or_null) >- DEFINE_SLOW_OP(typeof) >- DEFINE_SLOW_OP(strcat) >- DEFINE_SLOW_OP(push_with_scope) >- DEFINE_SLOW_OP(create_lexical_environment) >- DEFINE_SLOW_OP(get_by_val_with_this) >- DEFINE_SLOW_OP(put_by_id_with_this) >- DEFINE_SLOW_OP(put_by_val_with_this) >- DEFINE_SLOW_OP(resolve_scope_for_hoisting_func_decl_in_eval) >- DEFINE_SLOW_OP(define_data_property) >- DEFINE_SLOW_OP(define_accessor_property) >- DEFINE_SLOW_OP(unreachable) >- DEFINE_SLOW_OP(throw_static_error) >- DEFINE_SLOW_OP(new_array_with_spread) >- DEFINE_SLOW_OP(new_array_buffer) >- DEFINE_SLOW_OP(spread) >- DEFINE_SLOW_OP(get_enumerable_length) >- DEFINE_SLOW_OP(has_generic_property) >- DEFINE_SLOW_OP(get_property_enumerator) >- DEFINE_SLOW_OP(to_index_string) >- DEFINE_SLOW_OP(create_direct_arguments) >- DEFINE_SLOW_OP(create_scoped_arguments) >- DEFINE_SLOW_OP(create_cloned_arguments) >- DEFINE_SLOW_OP(create_rest) >- DEFINE_SLOW_OP(pow) >- >- DEFINE_OP(op_add) >- DEFINE_OP(op_bitnot) >- DEFINE_OP(op_bitand) >- DEFINE_OP(op_bitor) >- DEFINE_OP(op_bitxor) >- DEFINE_OP(op_call) >- DEFINE_OP(op_tail_call) >- DEFINE_OP(op_call_eval) >- DEFINE_OP(op_call_varargs) >- DEFINE_OP(op_tail_call_varargs) >- DEFINE_OP(op_tail_call_forward_arguments) >- DEFINE_OP(op_construct_varargs) >- DEFINE_OP(op_catch) >- DEFINE_OP(op_construct) >- DEFINE_OP(op_create_this) >- DEFINE_OP(op_to_this) >- DEFINE_OP(op_get_argument) >- DEFINE_OP(op_argument_count) >- DEFINE_OP(op_get_rest_length) >- DEFINE_OP(op_check_tdz) >- DEFINE_OP(op_identity_with_profile) >- DEFINE_OP(op_debug) >- DEFINE_OP(op_del_by_id) >- DEFINE_OP(op_del_by_val) >- DEFINE_OP(op_div) >- DEFINE_OP(op_end) >- DEFINE_OP(op_enter) >- DEFINE_OP(op_get_scope) >- DEFINE_OP(op_eq) >- DEFINE_OP(op_eq_null) >- DEFINE_OP(op_below) >- DEFINE_OP(op_beloweq) >- DEFINE_OP(op_try_get_by_id) >- DEFINE_OP(op_in_by_id) >- DEFINE_OP(op_get_by_id) >- DEFINE_OP(op_get_by_id_with_this) >- DEFINE_OP(op_get_by_id_direct) >- DEFINE_OP(op_get_by_val) >- DEFINE_OP(op_overrides_has_instance) >- DEFINE_OP(op_instanceof) >- DEFINE_OP(op_instanceof_custom) >- DEFINE_OP(op_is_empty) >- DEFINE_OP(op_is_undefined) >- DEFINE_OP(op_is_undefined_or_null) >- DEFINE_OP(op_is_boolean) >- DEFINE_OP(op_is_number) >- DEFINE_OP(op_is_object) >- DEFINE_OP(op_is_cell_with_type) >- DEFINE_OP(op_jeq_null) >- DEFINE_OP(op_jfalse) >- DEFINE_OP(op_jmp) >- DEFINE_OP(op_jneq_null) >- DEFINE_OP(op_jneq_ptr) >- DEFINE_OP(op_jless) >- DEFINE_OP(op_jlesseq) >- DEFINE_OP(op_jgreater) >- DEFINE_OP(op_jgreatereq) >- DEFINE_OP(op_jnless) >- DEFINE_OP(op_jnlesseq) >- DEFINE_OP(op_jngreater) >- DEFINE_OP(op_jngreatereq) >- DEFINE_OP(op_jeq) >- DEFINE_OP(op_jneq) >- DEFINE_OP(op_jstricteq) >- DEFINE_OP(op_jnstricteq) >- DEFINE_OP(op_jbelow) >- DEFINE_OP(op_jbeloweq) >- DEFINE_OP(op_jtrue) >- DEFINE_OP(op_loop_hint) >- DEFINE_OP(op_check_traps) >- DEFINE_OP(op_nop) >- DEFINE_OP(op_super_sampler_begin) >- DEFINE_OP(op_super_sampler_end) >- DEFINE_OP(op_lshift) >- DEFINE_OP(op_mod) >- DEFINE_OP(op_mov) >- DEFINE_OP(op_mul) >- DEFINE_OP(op_negate) >- DEFINE_OP(op_neq) >- DEFINE_OP(op_neq_null) >- DEFINE_OP(op_new_array) >- DEFINE_OP(op_new_array_with_size) >- DEFINE_OP(op_new_func) >- DEFINE_OP(op_new_func_exp) >- DEFINE_OP(op_new_generator_func) >- DEFINE_OP(op_new_generator_func_exp) >- DEFINE_OP(op_new_async_func) >- DEFINE_OP(op_new_async_func_exp) >- DEFINE_OP(op_new_async_generator_func) >- DEFINE_OP(op_new_async_generator_func_exp) >- DEFINE_OP(op_new_object) >- DEFINE_OP(op_new_regexp) >- DEFINE_OP(op_not) >- DEFINE_OP(op_nstricteq) >- DEFINE_OP(op_dec) >- DEFINE_OP(op_inc) >- DEFINE_OP(op_profile_type) >- DEFINE_OP(op_profile_control_flow) >- DEFINE_OP(op_get_parent_scope) >- DEFINE_OP(op_put_by_id) >- DEFINE_OP(op_put_by_val_direct) >- DEFINE_OP(op_put_by_val) >- DEFINE_OP(op_put_getter_by_id) >- DEFINE_OP(op_put_setter_by_id) >- DEFINE_OP(op_put_getter_setter_by_id) >- DEFINE_OP(op_put_getter_by_val) >- DEFINE_OP(op_put_setter_by_val) >- >- DEFINE_OP(op_ret) >- DEFINE_OP(op_rshift) >- DEFINE_OP(op_unsigned) >- DEFINE_OP(op_urshift) >- DEFINE_OP(op_set_function_name) >- DEFINE_OP(op_stricteq) >- DEFINE_OP(op_sub) >- DEFINE_OP(op_switch_char) >- DEFINE_OP(op_switch_imm) >- DEFINE_OP(op_switch_string) >- DEFINE_OP(op_throw) >- DEFINE_OP(op_to_number) >- DEFINE_OP(op_to_string) >- DEFINE_OP(op_to_object) >- DEFINE_OP(op_to_primitive) >- >- DEFINE_OP(op_resolve_scope) >- DEFINE_OP(op_get_from_scope) >- DEFINE_OP(op_put_to_scope) >- DEFINE_OP(op_get_from_arguments) >- DEFINE_OP(op_put_to_arguments) >- >- DEFINE_OP(op_has_structure_property) >- DEFINE_OP(op_has_indexed_property) >- DEFINE_OP(op_get_direct_pname) >- DEFINE_OP(op_enumerator_structure_pname) >- DEFINE_OP(op_enumerator_generic_pname) >- >- DEFINE_OP(op_log_shadow_chicken_prologue) >- DEFINE_OP(op_log_shadow_chicken_tail) >- default: >- RELEASE_ASSERT_NOT_REACHED(); >+ switch (opcodeID) { >+ DEFINE_SLOW_OP(in_by_val) >+ DEFINE_SLOW_OP(less) >+ DEFINE_SLOW_OP(lesseq) >+ DEFINE_SLOW_OP(greater) >+ DEFINE_SLOW_OP(greatereq) >+ DEFINE_SLOW_OP(is_function) >+ DEFINE_SLOW_OP(is_object_or_null) >+ DEFINE_SLOW_OP(typeof) >+ DEFINE_SLOW_OP(strcat) >+ DEFINE_SLOW_OP(push_with_scope) >+ DEFINE_SLOW_OP(create_lexical_environment) >+ DEFINE_SLOW_OP(get_by_val_with_this) >+ DEFINE_SLOW_OP(put_by_id_with_this) >+ DEFINE_SLOW_OP(put_by_val_with_this) >+ DEFINE_SLOW_OP(resolve_scope_for_hoisting_func_decl_in_eval) >+ DEFINE_SLOW_OP(define_data_property) >+ DEFINE_SLOW_OP(define_accessor_property) >+ DEFINE_SLOW_OP(unreachable) >+ DEFINE_SLOW_OP(throw_static_error) >+ DEFINE_SLOW_OP(new_array_with_spread) >+ DEFINE_SLOW_OP(new_array_buffer) >+ DEFINE_SLOW_OP(spread) >+ DEFINE_SLOW_OP(get_enumerable_length) >+ DEFINE_SLOW_OP(has_generic_property) >+ DEFINE_SLOW_OP(get_property_enumerator) >+ DEFINE_SLOW_OP(to_index_string) >+ DEFINE_SLOW_OP(create_direct_arguments) >+ DEFINE_SLOW_OP(create_scoped_arguments) >+ DEFINE_SLOW_OP(create_cloned_arguments) >+ DEFINE_SLOW_OP(create_rest) >+ DEFINE_SLOW_OP(pow) >+ >+ DEFINE_OP(op_add) >+ DEFINE_OP(op_bitnot) >+ DEFINE_OP(op_bitand) >+ DEFINE_OP(op_bitor) >+ DEFINE_OP(op_bitxor) >+ DEFINE_OP(op_call) >+ DEFINE_OP(op_tail_call) >+ DEFINE_OP(op_call_eval) >+ DEFINE_OP(op_call_varargs) >+ DEFINE_OP(op_tail_call_varargs) >+ DEFINE_OP(op_tail_call_forward_arguments) >+ DEFINE_OP(op_construct_varargs) >+ DEFINE_OP(op_catch) >+ DEFINE_OP(op_construct) >+ DEFINE_OP(op_create_this) >+ DEFINE_OP(op_to_this) >+ DEFINE_OP(op_get_argument) >+ DEFINE_OP(op_argument_count) >+ DEFINE_OP(op_get_rest_length) >+ DEFINE_OP(op_check_tdz) >+ DEFINE_OP(op_identity_with_profile) >+ DEFINE_OP(op_debug) >+ DEFINE_OP(op_del_by_id) >+ DEFINE_OP(op_del_by_val) >+ DEFINE_OP(op_div) >+ DEFINE_OP(op_end) >+ DEFINE_OP(op_enter) >+ DEFINE_OP(op_get_scope) >+ DEFINE_OP(op_eq) >+ DEFINE_OP(op_eq_null) >+ DEFINE_OP(op_below) >+ DEFINE_OP(op_beloweq) >+ DEFINE_OP(op_try_get_by_id) >+ DEFINE_OP(op_in_by_id) >+ DEFINE_OP(op_get_by_id) >+ DEFINE_OP(op_get_by_id_with_this) >+ DEFINE_OP(op_get_by_id_direct) >+ DEFINE_OP(op_get_by_val) >+ DEFINE_OP(op_overrides_has_instance) >+ DEFINE_OP(op_instanceof) >+ DEFINE_OP(op_instanceof_custom) >+ DEFINE_OP(op_is_empty) >+ DEFINE_OP(op_is_undefined) >+ DEFINE_OP(op_is_undefined_or_null) >+ DEFINE_OP(op_is_boolean) >+ DEFINE_OP(op_is_number) >+ DEFINE_OP(op_is_object) >+ DEFINE_OP(op_is_cell_with_type) >+ DEFINE_OP(op_jeq_null) >+ DEFINE_OP(op_jfalse) >+ DEFINE_OP(op_jmp) >+ DEFINE_OP(op_jneq_null) >+ DEFINE_OP(op_jneq_ptr) >+ DEFINE_OP(op_jless) >+ DEFINE_OP(op_jlesseq) >+ DEFINE_OP(op_jgreater) >+ DEFINE_OP(op_jgreatereq) >+ DEFINE_OP(op_jnless) >+ DEFINE_OP(op_jnlesseq) >+ DEFINE_OP(op_jngreater) >+ DEFINE_OP(op_jngreatereq) >+ DEFINE_OP(op_jeq) >+ DEFINE_OP(op_jneq) >+ DEFINE_OP(op_jstricteq) >+ DEFINE_OP(op_jnstricteq) >+ DEFINE_OP(op_jbelow) >+ DEFINE_OP(op_jbeloweq) >+ DEFINE_OP(op_jtrue) >+ DEFINE_OP(op_loop_hint) >+ DEFINE_OP(op_trace_hint) >+ DEFINE_OP(op_check_traps) >+ DEFINE_OP(op_nop) >+ DEFINE_OP(op_super_sampler_begin) >+ DEFINE_OP(op_super_sampler_end) >+ DEFINE_OP(op_lshift) >+ DEFINE_OP(op_mod) >+ DEFINE_OP(op_mov) >+ DEFINE_OP(op_mul) >+ DEFINE_OP(op_negate) >+ DEFINE_OP(op_neq) >+ DEFINE_OP(op_neq_null) >+ DEFINE_OP(op_new_array) >+ DEFINE_OP(op_new_array_with_size) >+ DEFINE_OP(op_new_func) >+ DEFINE_OP(op_new_func_exp) >+ DEFINE_OP(op_new_generator_func) >+ DEFINE_OP(op_new_generator_func_exp) >+ DEFINE_OP(op_new_async_func) >+ DEFINE_OP(op_new_async_func_exp) >+ DEFINE_OP(op_new_async_generator_func) >+ DEFINE_OP(op_new_async_generator_func_exp) >+ DEFINE_OP(op_new_object) >+ DEFINE_OP(op_new_regexp) >+ DEFINE_OP(op_not) >+ DEFINE_OP(op_nstricteq) >+ DEFINE_OP(op_dec) >+ DEFINE_OP(op_inc) >+ DEFINE_OP(op_profile_type) >+ DEFINE_OP(op_profile_control_flow) >+ DEFINE_OP(op_get_parent_scope) >+ DEFINE_OP(op_put_by_id) >+ DEFINE_OP(op_put_by_val_direct) >+ DEFINE_OP(op_put_by_val) >+ DEFINE_OP(op_put_getter_by_id) >+ DEFINE_OP(op_put_setter_by_id) >+ DEFINE_OP(op_put_getter_setter_by_id) >+ DEFINE_OP(op_put_getter_by_val) >+ DEFINE_OP(op_put_setter_by_val) >+ >+ DEFINE_OP(op_ret) >+ DEFINE_OP(op_rshift) >+ DEFINE_OP(op_unsigned) >+ DEFINE_OP(op_urshift) >+ DEFINE_OP(op_set_function_name) >+ DEFINE_OP(op_stricteq) >+ DEFINE_OP(op_sub) >+ DEFINE_OP(op_switch_char) >+ DEFINE_OP(op_switch_imm) >+ DEFINE_OP(op_switch_string) >+ DEFINE_OP(op_throw) >+ DEFINE_OP(op_to_number) >+ DEFINE_OP(op_to_string) >+ DEFINE_OP(op_to_object) >+ DEFINE_OP(op_to_primitive) >+ >+ DEFINE_OP(op_resolve_scope) >+ DEFINE_OP(op_get_from_scope) >+ DEFINE_OP(op_put_to_scope) >+ DEFINE_OP(op_get_from_arguments) >+ DEFINE_OP(op_put_to_arguments) >+ >+ DEFINE_OP(op_has_structure_property) >+ DEFINE_OP(op_has_indexed_property) >+ DEFINE_OP(op_get_direct_pname) >+ DEFINE_OP(op_enumerator_structure_pname) >+ DEFINE_OP(op_enumerator_generic_pname) >+ >+ DEFINE_OP(op_log_shadow_chicken_prologue) >+ DEFINE_OP(op_log_shadow_chicken_tail) >+ default: >+ RELEASE_ASSERT_NOT_REACHED(); >+ } >+ >+ if (JITInternal::verbose) >+ dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); > } > >- if (JITInternal::verbose) >- dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); >+ if (m_isTracelet) { >+ if (verboseProbes) >+ dataLogLn("end of trace going to: ", trace.end); >+ bool nextTraceIsAdjacent = traceIndex + 1 < m_traces.size() && trace.end == m_traces[traceIndex + 1].start; >+ if (!nextTraceIsAdjacent) >+ m_jmpTable.append(JumpTable(jump(), trace.end)); >+ } > } > > RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); >@@ -468,14 +446,165 @@ void JIT::privateCompileMainPass() > > void JIT::privateCompileLinkPass() > { >- unsigned jmpTableCount = m_jmpTable.size(); >- for (unsigned i = 0; i < jmpTableCount; ++i) >- m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this); >+ // OOPS: concurrency issue here when we look it up in JIT code! >+ >+ auto emitJumpToLLInt = [&] (unsigned bytecodeOffset) { >+ if (verboseProbes) >+ dataLogLn("compiling jump to LLInt bc#", bytecodeOffset); >+ const Instruction& currentInstruction = *m_codeBlock->instructions().at(bytecodeOffset).ptr(); >+ MacroAssemblerCodePtr<JSEntryPtrTag> destination; >+ if (currentInstruction.isWide()) >+ destination = LLInt::getWideCodePtr<JSEntryPtrTag>(currentInstruction.opcodeID()); >+ else >+ destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction.opcodeID()); >+ >+ auto start = label(); >+ move(TrustedImmPtr(m_codeBlock->metadataTable()), LLInt::Registers::metadataTableGPR); >+ move(TrustedImmPtr(m_codeBlock->instructionsRawPointer()), LLInt::Registers::pbGPR); >+ move(TrustedImm32(bytecodeOffset), LLInt::Registers::pcGPR); >+ if (verboseProbes) >+ print("Exiting trace to LLInt: bc#", bytecodeOffset, " to: ", RawPointer(destination.executableAddress()), " is wide: ", currentInstruction.isWide(), " in codeblock: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), "\n"); >+ move(TrustedImmPtr(destination.executableAddress()), GPRInfo::regT0); >+ jump(GPRInfo::regT0, JSEntryPtrTag); >+ auto end = label(); >+ >+ m_locationsOfJumpToLLInt.add(bytecodeOffset, Vector<std::pair<Label, Label>>()).iterator->value.append({ start, end }); >+ }; >+ >+ auto getLocalBytecode = [&] (unsigned bytecodeOffset) -> Optional<Label> { >+ auto iter = m_labels.find(bytecodeOffset); >+ if (iter != m_labels.end()) >+ return iter->value; >+ return WTF::nullopt; >+ }; >+ >+ auto getPriorBytecode = [&] (unsigned bytecodeOffset) -> Optional<CodeLocationLabel<JITTraceletPtrTag>> { >+ if (!m_priorCode) >+ return WTF::nullopt; >+ >+ // OOPS: Same concurrency dependency as described above. Should we allow for it? >+ auto iter = m_priorCode->m_codeLocations.find(bytecodeOffset); >+ if (iter != m_priorCode->m_codeLocations.end()) >+ return CodeLocationLabel<JITTraceletPtrTag>(iter->value); >+ >+ return WTF::nullopt; >+ }; >+ >+ for (const JumpTable& entry : m_jmpTable) { >+ unsigned bytecodeOffset = entry.toBytecodeOffset; >+ if (verboseProbes) >+ dataLogLn("Have jump table entry to: bc#", bytecodeOffset); >+ >+ if (auto label = getLocalBytecode(bytecodeOffset)) { >+ entry.from.linkTo(*label, this); >+ continue; >+ } >+ >+ if (auto priorCode = getPriorBytecode(bytecodeOffset)) { >+ Jump from = entry.from; >+ addLinkTask([=] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(from, *priorCode); >+ }); >+ continue; >+ } >+ >+ RELEASE_ASSERT(m_isTracelet); >+ entry.from.linkTo(label(), this); >+ >+ if (bytecodeOffset >= m_codeBlock->instructionsSize()) { >+ if (verboseProbes) >+ dataLogLn("Have jump table entry exceeding instructionsSize() bc#", bytecodeOffset); >+ // This is the ending trace. We should never get here in bytecode, e.g, >+ // we should have returned, jumped, or done something to terminate execution >+ // of this code. >+ breakpoint(); >+ continue; >+ } >+ >+ emitJumpToLLInt(bytecodeOffset); >+ } >+ >+ // Translate vPC offsets into addresses in JIT generated code, for switch tables. >+ for (auto& record : m_switches) { >+ Vector<Label> jumpDestinations; >+ auto appendDestination = [&] (CodeLocationLabel<JSSwitchPtrTag>& ctiOffset, unsigned bytecodeOffset) { >+ if (auto label = getLocalBytecode(bytecodeOffset)) { >+ jumpDestinations.append(*label); >+ return; >+ } >+ >+ if (auto priorCode = getPriorBytecode(bytecodeOffset)) { >+ jumpDestinations.append(Label()); >+ ctiOffset = priorCode->retagged<JSSwitchPtrTag>(); >+ return; >+ } >+ >+ jumpDestinations.append(label()); >+ emitJumpToLLInt(bytecodeOffset); >+ }; >+ >+ unsigned bytecodeOffset = record.bytecodeOffset; >+ >+ if (record.type != SwitchRecord::String) { >+ ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); >+ ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); >+ >+ auto* simpleJumpTable = record.jumpTable.simpleJumpTable; >+ appendDestination(simpleJumpTable->ctiDefault, bytecodeOffset + record.defaultOffset); // First is the 'default' case. >+ >+ for (unsigned i = 0; i < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++i) { >+ unsigned offset = simpleJumpTable->branchOffsets[i]; >+ if (offset) >+ appendDestination(simpleJumpTable->ctiOffsets[i], bytecodeOffset + offset); >+ else >+ jumpDestinations.append(jumpDestinations[0]); >+ } >+ >+ addLinkTask([=, jumpDestinations = WTFMove(jumpDestinations)] (LinkBuffer& linkBuffer) { >+ if (jumpDestinations[0].isSet()) >+ simpleJumpTable->ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[0]); >+ >+ for (unsigned i = 0; i < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++i) { >+ if (jumpDestinations[i + 1].isSet()) >+ simpleJumpTable->ctiOffsets[i] = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[i + 1]); >+ } >+ }); >+ } else { >+ ASSERT(record.type == SwitchRecord::String); >+ >+ auto* stringJumpTable = record.jumpTable.stringJumpTable; >+ >+ appendDestination(stringJumpTable->ctiDefault, bytecodeOffset + record.defaultOffset); // First is the 'default' case. >+ >+ for (auto& location : stringJumpTable->offsetTable.values()) { >+ unsigned offset = location.branchOffset; >+ if (offset) >+ appendDestination(location.ctiOffset, bytecodeOffset + offset); >+ else >+ jumpDestinations.append(jumpDestinations[0]); >+ } >+ >+ addLinkTask([=, jumpDestinations = WTFMove(jumpDestinations)] (LinkBuffer& linkBuffer) { >+ if (jumpDestinations[0].isSet()) >+ stringJumpTable->ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[0]); >+ unsigned i = 1; >+ for (auto& location : stringJumpTable->offsetTable.values()) { >+ if (jumpDestinations[i].isSet()) >+ location.ctiOffset = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[i]); >+ ++i; >+ } >+ }); >+ } >+ } >+ > m_jmpTable.clear(); > } > >+static uint64_t compileID = 0; >+ > void JIT::privateCompileSlowCases() > { >+ ++compileID; > m_getByIdIndex = 0; > m_getByIdWithThisIndex = 0; > m_putByIdIndex = 0; >@@ -494,8 +623,10 @@ void JIT::privateCompileSlowCases() > const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeOffset).ptr(); > > RareCaseProfile* rareCaseProfile = 0; >- if (shouldEmitProfiling()) >- rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset); >+ if (shouldEmitProfiling()) { >+ m_compileData.rareCaseProfiles.append(std::make_unique<RareCaseProfile>(m_bytecodeOffset)); >+ rareCaseProfile = m_compileData.rareCaseProfiles.last().get(); >+ } > > if (JITInternal::verbose) > dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); >@@ -653,6 +784,7 @@ void JIT::compileWithoutLinking(JITCompi > > if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) > m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock); >+ > if (UNLIKELY(m_vm->m_perBytecodeProfiler)) { > m_compilation = adoptRef( > new Profiler::Compilation( >@@ -660,7 +792,102 @@ void JIT::compileWithoutLinking(JITCompi > Profiler::Baseline)); > m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); > } >- >+ >+ unsigned instructionCount = m_codeBlock->instructions().size(); >+ if (m_isTracelet) { >+ //if (m_requiredBytecodeToCompile) >+ // dataLogLn("Required bytecode to compile = bc#", *m_requiredBytecodeToCompile); >+ const Instruction* firstTraceHint = nullptr; >+ auto start = MonotonicTime::now(); >+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) { >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(bytecodeOffset).ptr(); >+ if (currentInstruction->opcodeID() == op_trace_hint) { >+ auto bytecode = currentInstruction->as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ >+ //dataLogLn("Looking at trace: [", metadata.m_traceProfile.start, ",", metadata.m_traceProfile.end, ")"); >+ bool shouldCompile = [&] { >+ if (!firstTraceHint && m_requiredBytecodeToCompile && *m_requiredBytecodeToCompile <= metadata.m_traceProfile.start) >+ metadata.m_entrypoint = 1; >+ >+ if (m_requiredBytecodeToCompile && metadata.m_traceProfile.start <= *m_requiredBytecodeToCompile && *m_requiredBytecodeToCompile < metadata.m_traceProfile.end) { >+ //dataLogLn("compiling trace that contains m_requiredBytecodeToCompile bc#", *m_requiredBytecodeToCompile); >+ metadata.m_entrypoint = 1; >+ } >+ >+ // Haven't tripped tier up yet. >+ if (metadata.m_entrypoint != 1) { >+ //dataLogLn("\t! should compile"); >+ return false; >+ } >+ >+ // Already compiled. >+ if (metadata.m_entrypoint >= 2) { >+ //dataLogLn("\thas entrypoint already"); >+ //dataLogLn("\tin hash table: ", m_priorCode->m_codeLocations.contains(bytecodeOffset)); >+ return false; >+ } >+ >+ // OOPS: This is only safe to do because we: >+ // - Link on the main thread. >+ // - Never compile the same CodeBlock* concurrently. >+ // Is this ok to rely on? >+ if (m_priorCode && m_priorCode->m_codeLocations.contains(bytecodeOffset)) { >+ //dataLogLn("\talready compiled in code locs"); >+ // Already compiled. >+ return false; >+ } >+ >+ return true; >+ }(); >+ >+ if (!firstTraceHint) { >+ firstTraceHint = currentInstruction; >+ if (shouldCompile) { >+ m_traces.append(TraceProfile { 0, metadata.m_traceProfile.start }); >+ m_isCompilingPrologue = true; >+ } >+ } >+ >+ if (shouldCompile) { >+ //dataLogLn("compiling trace: [", metadata.m_traceProfile.start, ", ", metadata.m_traceProfile.end, ")"); >+ m_traces.append(metadata.m_traceProfile); >+ //dataLogLn("Compiling trace: ", RawPointer(m_codeBlock), " bc#", m_traces.last().start, " traceID: ", compileID); >+ } >+ } >+ >+ // OOPS: Would be could to not unconditionally compile catches >+ if (currentInstruction->opcodeID() == op_catch) { >+ if (!m_priorCode || !m_priorCode->m_codeLocations.contains(bytecodeOffset)) >+ m_traces.append(TraceProfile { bytecodeOffset, bytecodeOffset + static_cast<unsigned>(currentInstruction->size()) }); >+ } >+ >+ bytecodeOffset += currentInstruction->size(); >+ } >+ auto end = MonotonicTime::now(); >+ timeMakingTraces += end - start; >+ //dataLogLn("time making traces: ", timeMakingTraces.milliseconds()); >+ } else { >+ TraceProfile wholeTrace; >+ wholeTrace.start = 0; >+ wholeTrace.end = instructionCount; >+ m_traces.append(wholeTrace); >+ m_isCompilingPrologue = true; >+ } >+ >+ //MonotonicTime after { }; >+ //if (UNLIKELY(computeCompileTimes())) { >+ // after = MonotonicTime::now(); >+ >+ // if (Options::reportTotalCompileTimes()) >+ // totalBaselineCompileTime += after - before; >+ //} >+ >+ if (!m_traces.size()) { >+ //dataLogLn("No traces to compile!"); >+ // OOPS: Make eager options really affect tier up threshold. >+ } >+ > m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr)); > > Label entryLabel(this); >@@ -671,108 +898,130 @@ void JIT::compileWithoutLinking(JITCompi > if (random() & 1) > nop(); > >- emitFunctionPrologue(); >- emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ if (m_isCompilingPrologue) { >+ emitFunctionPrologue(); >+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ } > > Label beginLabel(this); >+ if (verboseProbes) >+ print("executing JIT prologue for: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), "\n"); > >- sampleCodeBlock(m_codeBlock); >+ JumpList stackOverflow; >+ if (m_isCompilingPrologue) { >+ sampleCodeBlock(m_codeBlock); > #if ENABLE(OPCODE_SAMPLING) >- sampleInstruction(m_codeBlock->instructions().begin()); >+ sampleInstruction(m_codeBlock->instructions().begin()); > #endif > >- int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register); >- unsigned maxFrameSize = -frameTopOffset; >- addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); >- JumpList stackOverflow; >- if (UNLIKELY(maxFrameSize > Options::reservedZoneSize())) >- stackOverflow.append(branchPtr(Above, regT1, callFrameRegister)); >- stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1)); >- >- move(regT1, stackPointerRegister); >- checkStackPointerAlignment(); >- if (Options::zeroStackFrame()) >- clearStackFrame(callFrameRegister, stackPointerRegister, regT0, maxFrameSize); >- >- emitSaveCalleeSaves(); >- emitMaterializeTagCheckRegisters(); >- >- if (m_codeBlock->codeType() == FunctionCode) { >- ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); >- if (shouldEmitProfiling()) { >- for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { >- // If this is a constructor, then we want to put in a dummy profiling site (to >- // keep things consistent) but we don't actually want to record the dummy value. >- if (m_codeBlock->isConstructor() && !argument) >- continue; >- int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); >+ int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register); >+ unsigned maxFrameSize = -frameTopOffset; >+ addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); >+ if (UNLIKELY(maxFrameSize > Options::reservedZoneSize())) >+ stackOverflow.append(branchPtr(Above, regT1, callFrameRegister)); >+ stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1)); >+ >+ move(regT1, stackPointerRegister); >+ checkStackPointerAlignment(); >+ if (Options::zeroStackFrame()) >+ clearStackFrame(callFrameRegister, stackPointerRegister, regT0, maxFrameSize); >+ >+ emitSaveCalleeSaves(); >+ emitMaterializeTagCheckRegisters(); >+ >+ if (m_codeBlock->codeType() == FunctionCode) { >+ ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); >+ if (shouldEmitProfiling()) { >+ for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { >+ // If this is a constructor, then we want to put in a dummy profiling site (to >+ // keep things consistent) but we don't actually want to record the dummy value. >+ if (m_codeBlock->isConstructor() && !argument) >+ continue; >+ int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); > #if USE(JSVALUE64) >- load64(Address(callFrameRegister, offset), regT0); >+ load64(Address(callFrameRegister, offset), regT0); > #elif USE(JSVALUE32_64) >- load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); >- load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); >+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); >+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); > #endif >- emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); >+ emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); >+ } > } > } > } > >- RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); >+ //RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); > > privateCompileMainPass(); >- privateCompileLinkPass(); >+ >+ //privateCompileLinkPass(); >+ if (m_disassembler) >+ m_disassembler->setStartOfSlowPath(label()); > privateCompileSlowCases(); > > if (m_disassembler) > m_disassembler->setEndOfSlowPath(label()); > m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); > >- stackOverflow.link(this); >- m_bytecodeOffset = 0; >- if (maxFrameExtentForSlowPathCall) >- addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >- callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); >- >- // If the number of parameters is 1, we never require arity fixup. >- bool requiresArityFixup = m_codeBlock->m_numParameters != 1; >- if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) { >- m_arityCheck = label(); >- store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); >- emitFunctionPrologue(); >- emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >- >- load32(payloadFor(CallFrameSlot::argumentCount), regT1); >- branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); >- >+ if (m_isCompilingPrologue) { >+ stackOverflow.link(this); > m_bytecodeOffset = 0; >- > if (maxFrameExtentForSlowPathCall) > addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >- callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck); >- if (maxFrameExtentForSlowPathCall) >- addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); >- branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); >- move(returnValueGPR, GPRInfo::argumentGPR0); >- emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>()); >+ callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); >+ >+ // If the number of parameters is 1, we never require arity fixup. >+ bool requiresArityFixup = m_codeBlock->m_numParameters != 1; >+ if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) { >+ m_arityCheck = label(); >+ if (verboseProbes) >+ print("executing JIT arity check prologue for: ", m_codeBlock->inferredName().data(), "\n"); >+ store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); >+ emitFunctionPrologue(); >+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ >+ load32(payloadFor(CallFrameSlot::argumentCount), regT1); >+ branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); >+ >+ m_bytecodeOffset = 0; >+ >+ if (maxFrameExtentForSlowPathCall) >+ addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >+ callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck); >+ if (maxFrameExtentForSlowPathCall) >+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); >+ branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); >+ move(returnValueGPR, GPRInfo::argumentGPR0); >+ emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>()); > > #if !ASSERT_DISABLED >- m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. >+ m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. > #endif > >- jump(beginLabel); >- } else >- m_arityCheck = entryLabel; // Never require arity fixup. >+ jump(beginLabel); >+ } else >+ m_arityCheck = entryLabel; // Never require arity fixup. >+ } > >- ASSERT(m_jmpTable.isEmpty()); >- >+ privateCompileLinkPass(); > privateCompileExceptionHandlers(); > > if (m_disassembler) > m_disassembler->setEndOfCode(label()); > m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); > >+ // OOPS: Need to clear stale codeblock data on fails! > m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, effort)); > >+ for (auto& trace : m_traces) { >+ const Instruction& instruction = *m_codeBlock->instructions().at(trace.start).ptr(); >+ if (instruction.opcodeID() != op_trace_hint) >+ continue; >+ auto bytecode = instruction.as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ metadata.m_entrypoint = 2; >+ } >+ > MonotonicTime after { }; > if (UNLIKELY(computeCompileTimes())) { > after = MonotonicTime::now(); >@@ -780,6 +1029,7 @@ void JIT::compileWithoutLinking(JITCompi > if (Options::reportTotalCompileTimes()) > totalBaselineCompileTime += after - before; > } >+ > if (UNLIKELY(reportCompileTimes())) { > CString codeBlockName = toCString(*m_codeBlock); > >@@ -789,48 +1039,25 @@ void JIT::compileWithoutLinking(JITCompi > > CompilationResult JIT::link() > { >+ if (m_isTracelet && m_traces.isEmpty()) >+ return CompilationDeferred; >+ >+ MonotonicTime before { }; >+ if (UNLIKELY(Options::reportTotalCompileTimes())) >+ before = MonotonicTime::now(); >+ > LinkBuffer& patchBuffer = *m_linkBuffer; > > if (patchBuffer.didFailToAllocate()) > return CompilationFailed; > >- // Translate vPC offsets into addresses in JIT generated code, for switch tables. >- for (auto& record : m_switches) { >- unsigned bytecodeOffset = record.bytecodeOffset; >- >- if (record.type != SwitchRecord::String) { >- ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); >- ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); >- >- auto* simpleJumpTable = record.jumpTable.simpleJumpTable; >- simpleJumpTable->ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); >- >- for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { >- unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; >- simpleJumpTable->ctiOffsets[j] = offset >- ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset]) >- : simpleJumpTable->ctiDefault; >- } >- } else { >- ASSERT(record.type == SwitchRecord::String); >- >- auto* stringJumpTable = record.jumpTable.stringJumpTable; >- stringJumpTable->ctiDefault = >- patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); >- >- for (auto& location : stringJumpTable->offsetTable.values()) { >- unsigned offset = location.branchOffset; >- location.ctiOffset = offset >- ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset]) >- : stringJumpTable->ctiDefault; >- } >- } >- } >- > for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { > HandlerInfo& handler = m_codeBlock->exceptionHandler(i); >+ > // FIXME: <rdar://problem/39433318>. >- handler.nativeCode = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_labels[handler.target]); >+ auto iter = m_labels.find(handler.target); >+ if (iter != m_labels.end()) >+ handler.nativeCode = patchBuffer.locationOf<ExceptionHandlerPtrTag>(iter->value); > } > > for (auto& record : m_calls) { >@@ -845,7 +1072,7 @@ CompilationResult JIT::link() > finalizeInlineCaches(m_instanceOfs, patchBuffer); > > if (m_byValCompilationInfo.size()) { >- CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionHandler); >+ CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler = m_exceptionHandler.isSet() ? patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionHandler) : m_priorCode->m_exceptionHandler; > > for (const auto& byValCompilationInfo : m_byValCompilationInfo) { > PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump; >@@ -878,15 +1105,12 @@ CompilationResult JIT::link() > patchBuffer.locationOfNearCall<JSInternalPtrTag>(compilationInfo.hotPathOther)); > } > >- JITCodeMap jitCodeMap; >- for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { >- if (m_labels[bytecodeOffset].isSet()) >- jitCodeMap.append(bytecodeOffset, patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); >- } >- jitCodeMap.finish(); >- m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap)); >- >- MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); >+ //JITCodeMap jitCodeMap; >+ //for (auto entry : m_labels) { >+ // jitCodeMap.append(entry.key, patchBuffer.locationOf<JSEntryPtrTag>(entry.value)); >+ //} >+ //jitCodeMap.finish(); >+ //m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap)); > > if (Options::dumpDisassembly()) { > m_disassembler->dump(patchBuffer); >@@ -907,15 +1131,94 @@ CompilationResult JIT::link() > > m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add( > static_cast<double>(result.size()) / >- static_cast<double>(m_codeBlock->instructionsSize())); >+ static_cast<double>(m_numBytecodesCompiled)); >+ >+ TraceletJITCode* traceletJITCode; >+ RefPtr<JITCode> jitCode = m_codeBlock->jitCode(); >+ if (!jitCode) { >+ // OOPS: should shrinkToFit perhaps for all of these? This code path is only taken when useLLInt=0 >+ //m_codeBlock->shrinkToFit(CodeBlock::LateShrink); >+ RELEASE_ASSERT(m_isCompilingPrologue); >+ MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); >+ traceletJITCode = new TraceletJITCode(result, withArityCheck, JITType::BaselineJIT); >+ m_codeBlock->setJITCode( >+ adoptRef(*traceletJITCode)); >+ m_shouldInstallCode = true; >+ } else if (jitCode->isTraceletJITCode()) >+ traceletJITCode = static_cast<TraceletJITCode*>(jitCode.get()); >+ else { >+ RELEASE_ASSERT(jitCode->isJITCodeWithCodeRef()); >+ JITCodeWithCodeRef* jitCodeWithCodeRef = static_cast<JITCodeWithCodeRef*>(jitCode.get()); >+ MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = jitCodeWithCodeRef->codeRef(); >+ MacroAssemblerCodePtr<JSEntryPtrTag> arityCheck = jitCodeWithCodeRef->addressForCall(MustCheckArity); >+ traceletJITCode = new TraceletJITCode(codeRef, arityCheck, JITType::BaselineJIT); >+ //dataLogLn("Allocated tracelet JIT code: ", RawPointer(traceletJITCode)); >+ m_codeBlock->setJITCode(adoptRef(*traceletJITCode)); >+ m_shouldInstallCode = true; >+ } >+ >+ //traceletJITCode->m_codeRefs.append(WTFMove(result)); >+ traceletJITCode->m_codeRefs.append(result.retagged<JITTraceletPtrTag>()); >+ >+ for (const auto& entry : m_locationsOfJumpToLLInt) { >+ for (auto pair : entry.value) { >+ Label start = pair.first; >+ Label end = pair.second; >+ MacroAssemblerCodePtr<JITTraceletPtrTag> startPtr = patchBuffer.locationOf<JITTraceletPtrTag>(start); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> endPtr = patchBuffer.locationOf<JITTraceletPtrTag>(end); >+ RELEASE_ASSERT(endPtr.dataLocation<uintptr_t>() - startPtr.dataLocation<uintptr_t>() >= static_cast<uintptr_t>(MacroAssembler::maxJumpReplacementSize())); >+ >+ traceletJITCode->m_locationsOfJumpsToLLIntBytecode.add(entry.key, Vector<MacroAssemblerCodePtr<JITTraceletPtrTag>>()).iterator->value.append(startPtr); >+ } >+ } > >- m_codeBlock->shrinkToFit(CodeBlock::LateShrink); >- m_codeBlock->setJITCode( >- adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT))); >+ if (m_isCompilingPrologue) { >+ if (verboseProbes) >+ dataLogLn("compiling prologue: ", RawPointer(result.code().executableAddress())); >+ traceletJITCode->installPrologue(result, patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck)); >+ m_shouldInstallCode = true; >+ } >+ >+ for (auto entry : m_labels) { >+ unsigned bytecodeOffset = entry.key; >+ auto codeLabel = patchBuffer.locationOf<JSEntryPtrTag>(entry.value); >+ traceletJITCode->m_codeLocations.add(bytecodeOffset, codeLabel.retagged<JITTraceletPtrTag>()); >+ >+ auto iter = traceletJITCode->m_locationsOfJumpsToLLIntBytecode.find(bytecodeOffset); >+ if (iter != traceletJITCode->m_locationsOfJumpsToLLIntBytecode.end()) { >+ if (verboseProbes) >+ dataLogLn("repatching prior to LLInt jump to go to new JIT code: bc#", bytecodeOffset); >+ for (auto codePtr : iter->value) { >+ CCallHelpers jit; >+ auto jump = jit.jump(); >+ >+ LinkBuffer linkBuffer(jit, codePtr, MacroAssembler::maxJumpReplacementSize()); >+ RELEASE_ASSERT(linkBuffer.isValid()); >+ if (verboseProbes) >+ dataLogLn("\trepatching from: ", RawPointer(codePtr.dataLocation())); >+ linkBuffer.link(jump, codeLabel); >+ FINALIZE_CODE(linkBuffer, NoPtrTag, "TraceletJIT: linking constant jump to away from LLInt to newly allocated JIT code for bc#%d", bytecodeOffset); >+ } >+ >+ traceletJITCode->m_locationsOfJumpsToLLIntBytecode.remove(iter); >+ } >+ } >+ >+ if (m_exceptionChecksWithCallFrameRollbackLabel.isSet()) >+ traceletJITCode->m_exceptionCheckWithCallFrameRollback = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionChecksWithCallFrameRollbackLabel); >+ if (m_exceptionHandler.isSet()) >+ traceletJITCode->m_exceptionHandler = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionHandler); > > if (JITInternal::verbose) > dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr()); > >+ registerCompileData(); >+ >+ if (UNLIKELY(Options::reportTotalCompileTimes())) { >+ MonotonicTime after = MonotonicTime::now(); >+ totalBaselineCompileTime += after - before; >+ } >+ > return CompilationSuccessful; > } > >@@ -926,44 +1229,64 @@ CompilationResult JIT::privateCompile(JI > return link(); > } > >+void JIT::registerCompileData() >+{ >+ m_codeBlock->adoptCompileData(WTFMove(m_compileData)); >+} >+ > void JIT::privateCompileExceptionHandlers() > { > if (!m_exceptionChecksWithCallFrameRollback.empty()) { >- m_exceptionChecksWithCallFrameRollback.link(this); >+ if (m_priorCode && m_priorCode->m_exceptionCheckWithCallFrameRollback) { >+ addLinkTask([=] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(m_exceptionChecksWithCallFrameRollback, m_priorCode->m_exceptionCheckWithCallFrameRollback); >+ }); >+ } else { >+ m_exceptionChecksWithCallFrameRollback.link(this); >+ m_exceptionChecksWithCallFrameRollbackLabel = label(); > >- copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); >+ copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); > >- // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). >+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). > >- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); >- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); >+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); >+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); > > #if CPU(X86) >- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! >- poke(GPRInfo::argumentGPR0); >- poke(GPRInfo::argumentGPR1, 1); >+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! >+ poke(GPRInfo::argumentGPR0); >+ poke(GPRInfo::argumentGPR1, 1); > #endif >- m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame))); >- jumpToExceptionHandler(*vm()); >+ m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame))); >+ jumpToExceptionHandler(*vm()); >+ } > } > > if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) { >- m_exceptionHandler = label(); >- m_exceptionChecks.link(this); >+ if (m_priorCode && m_priorCode->m_exceptionHandler) { >+ if (!m_exceptionChecks.empty()) { >+ addLinkTask([=] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(m_exceptionChecks, m_priorCode->m_exceptionHandler); >+ }); >+ } >+ } else { >+ m_exceptionChecks.link(this); >+ m_exceptionHandler = label(); > >- copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); >+ copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); > >- // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). >- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); >- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); >+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). >+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); >+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); > > #if CPU(X86) >- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! >- poke(GPRInfo::argumentGPR0); >- poke(GPRInfo::argumentGPR1, 1); >+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! >+ poke(GPRInfo::argumentGPR0); >+ poke(GPRInfo::argumentGPR1, 1); > #endif >- m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandler))); >- jumpToExceptionHandler(*vm()); >+ m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandler))); >+ jumpToExceptionHandler(*vm()); >+ } > } > } > >Index: Source/JavaScriptCore/jit/JIT.h >=================================================================== >--- Source/JavaScriptCore/jit/JIT.h (revision 245246) >+++ Source/JavaScriptCore/jit/JIT.h (working copy) >@@ -192,7 +192,7 @@ namespace JSC { > static const int patchPutByIdDefaultOffset = 256; > > public: >- JIT(VM*, CodeBlock* = 0, unsigned loopOSREntryBytecodeOffset = 0); >+ JIT(VM*, CodeBlock* = 0); > ~JIT(); > > void compileWithoutLinking(JITCompilationEffort); >@@ -200,9 +200,12 @@ namespace JSC { > > void doMainThreadPreparationBeforeCompile(); > >- static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned bytecodeOffset = 0) >+ static CompilationResult compileNow(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned requiredBytecodeOffset, bool isTracelet) > { >- return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort); >+ JIT jit(vm, codeBlock); >+ jit.m_isTracelet = isTracelet; >+ jit.m_requiredBytecodeToCompile = requiredBytecodeOffset; >+ return jit.privateCompile(effort); > } > > static void compileGetByVal(const ConcurrentJSLocker& locker, VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) >@@ -210,6 +213,7 @@ namespace JSC { > JIT jit(vm, codeBlock); > jit.m_bytecodeOffset = byValInfo->bytecodeIndex; > jit.privateCompileGetByVal(locker, byValInfo, returnAddress, arrayMode); >+ jit.registerCompileData(); > } > > static void compileGetByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName) >@@ -217,6 +221,7 @@ namespace JSC { > JIT jit(vm, codeBlock); > jit.m_bytecodeOffset = byValInfo->bytecodeIndex; > jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName); >+ jit.registerCompileData(); > } > > static void compilePutByVal(const ConcurrentJSLocker& locker, VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) >@@ -224,6 +229,7 @@ namespace JSC { > JIT jit(vm, codeBlock); > jit.m_bytecodeOffset = byValInfo->bytecodeIndex; > jit.privateCompilePutByVal<OpPutByVal>(locker, byValInfo, returnAddress, arrayMode); >+ jit.registerCompileData(); > } > > static void compileDirectPutByVal(const ConcurrentJSLocker& locker, VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) >@@ -231,6 +237,7 @@ namespace JSC { > JIT jit(vm, codeBlock); > jit.m_bytecodeOffset = byValInfo->bytecodeIndex; > jit.privateCompilePutByVal<OpPutByValDirect>(locker, byValInfo, returnAddress, arrayMode); >+ jit.registerCompileData(); > } > > template<typename Op> >@@ -239,6 +246,7 @@ namespace JSC { > JIT jit(vm, codeBlock); > jit.m_bytecodeOffset = byValInfo->bytecodeIndex; > jit.privateCompilePutByValWithCachedId<Op>(byValInfo, returnAddress, putKind, propertyName); >+ jit.registerCompileData(); > } > > static void compileHasIndexedProperty(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) >@@ -246,6 +254,7 @@ namespace JSC { > JIT jit(vm, codeBlock); > jit.m_bytecodeOffset = byValInfo->bytecodeIndex; > jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode); >+ jit.registerCompileData(); > } > > static unsigned frameRegisterCountFor(CodeBlock*); >@@ -571,6 +580,7 @@ namespace JSC { > void emit_op_jbeloweq(const Instruction*); > void emit_op_jtrue(const Instruction*); > void emit_op_loop_hint(const Instruction*); >+ void emit_op_trace_hint(const Instruction*); > void emit_op_check_traps(const Instruction*); > void emit_op_nop(const Instruction*); > void emit_op_super_sampler_begin(const Instruction*); >@@ -669,6 +679,7 @@ namespace JSC { > void emitSlow_op_jnstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_jtrue(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_loop_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_trace_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_mul(const Instruction*, Vector<SlowCaseEntry>::iterator&); >@@ -915,10 +926,13 @@ namespace JSC { > template<typename BinaryOp> > ArithProfile copiedArithProfile(BinaryOp); > >+ void registerCompileData(); >+ > Interpreter* m_interpreter; > > Vector<CallRecord> m_calls; >- Vector<Label> m_labels; >+ //Vector<Label> m_labels; >+ HashMap<unsigned, Label, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_labels; > Vector<JITGetByIdGenerator> m_getByIds; > Vector<JITGetByIdWithThisGenerator> m_getByIdsWithThis; > Vector<JITPutByIdGenerator> m_putByIds; >@@ -962,7 +976,17 @@ namespace JSC { > bool m_canBeOptimizedOrInlined; > bool m_shouldEmitProfiling; > bool m_shouldUseIndexMasking; >- unsigned m_loopOSREntryBytecodeOffset { 0 }; >+ public: >+ bool m_isTracelet { false }; >+ Optional<unsigned> m_requiredBytecodeToCompile; >+ bool m_isCompilingPrologue { false }; >+ bool m_shouldInstallCode { false }; >+ Vector<TraceProfile> m_traces; >+ RefPtr<TraceletJITCode> m_priorCode; >+ HashMap<unsigned, Vector<std::pair<Label, Label>>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_locationsOfJumpToLLInt; >+ Label m_exceptionChecksWithCallFrameRollbackLabel; >+ unsigned m_numBytecodesCompiled { 0 }; >+ CodeBlock::JITData::CompileData m_compileData; > }; > > } // namespace JSC >Index: Source/JavaScriptCore/jit/JITArithmetic.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITArithmetic.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITArithmetic.cpp (working copy) >@@ -453,7 +453,7 @@ void JIT::emitSlow_op_mod(const Instruct > void JIT::emit_op_negate(const Instruction* currentInstruction) > { > ArithProfile* arithProfile = ¤tInstruction->as<OpNegate>().metadata(m_codeBlock).m_arithProfile; >- JITNegIC* negateIC = m_codeBlock->addJITNegIC(arithProfile); >+ JITNegIC* negateIC = m_compileData.negICs.add(arithProfile); > m_instructionToMathIC.add(currentInstruction, negateIC); > emitMathICFast<OpNegate>(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate); > } >@@ -641,7 +641,7 @@ ALWAYS_INLINE static OperandTypes getOpe > void JIT::emit_op_add(const Instruction* currentInstruction) > { > ArithProfile* arithProfile = ¤tInstruction->as<OpAdd>().metadata(m_codeBlock).m_arithProfile; >- JITAddIC* addIC = m_codeBlock->addJITAddIC(arithProfile); >+ JITAddIC* addIC = m_compileData.addICs.add(arithProfile); > m_instructionToMathIC.add(currentInstruction, addIC); > emitMathICFast<OpAdd>(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd); > } >@@ -960,7 +960,7 @@ void JIT::emit_op_div(const Instruction* > void JIT::emit_op_mul(const Instruction* currentInstruction) > { > ArithProfile* arithProfile = ¤tInstruction->as<OpMul>().metadata(m_codeBlock).m_arithProfile; >- JITMulIC* mulIC = m_codeBlock->addJITMulIC(arithProfile); >+ JITMulIC* mulIC = m_compileData.mulICs.add(arithProfile); > m_instructionToMathIC.add(currentInstruction, mulIC); > emitMathICFast<OpMul>(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul); > } >@@ -976,7 +976,7 @@ void JIT::emitSlow_op_mul(const Instruct > void JIT::emit_op_sub(const Instruction* currentInstruction) > { > ArithProfile* arithProfile = ¤tInstruction->as<OpSub>().metadata(m_codeBlock).m_arithProfile; >- JITSubIC* subIC = m_codeBlock->addJITSubIC(arithProfile); >+ JITSubIC* subIC = m_compileData.subICs.add(arithProfile); > m_instructionToMathIC.add(currentInstruction, subIC); > emitMathICFast<OpSub>(subIC, currentInstruction, operationValueSubProfiled, operationValueSub); > } >Index: Source/JavaScriptCore/jit/JITCall32_64.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITCall32_64.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITCall32_64.cpp (working copy) >@@ -245,7 +245,7 @@ void JIT::compileCallEvalSlowCase(const > linkAllSlowCases(iter); > > auto bytecode = instruction->as<OpCallEval>(); >- CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); >+ CallLinkInfo* info = m_compileData.callLinkInfos.add(); > info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0); > > int registerOffset = -bytecode.m_argv; >@@ -284,7 +284,7 @@ void JIT::compileOpCall(const Instructio > */ > CallLinkInfo* info = nullptr; > if (opcodeID != op_call_eval) >- info = m_codeBlock->addCallLinkInfo(); >+ info = m_compileData.callLinkInfos.add(); > compileSetupFrame(bytecode, info); > // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. > >Index: Source/JavaScriptCore/jit/JITCall.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITCall.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITCall.cpp (working copy) >@@ -154,7 +154,7 @@ void JIT::compileCallEvalSlowCase(const > linkAllSlowCases(iter); > > auto bytecode = instruction->as<OpCallEval>(); >- CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); >+ CallLinkInfo* info = m_compileData.callLinkInfos.add(); > info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0); > > int registerOffset = -bytecode.m_argv; >@@ -222,7 +222,7 @@ void JIT::compileOpCall(const Instructio > */ > CallLinkInfo* info = nullptr; > if (opcodeID != op_call_eval) >- info = m_codeBlock->addCallLinkInfo(); >+ info = m_compileData.callLinkInfos.add(); > compileSetupFrame(bytecode, info); > > // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. >Index: Source/JavaScriptCore/jit/JITCode.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITCode.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITCode.cpp (working copy) >@@ -36,10 +36,18 @@ JITCode::JITCode(JITType jitType, ShareA > : m_jitType(jitType) > , m_shareAttribute(shareAttribute) > { >+ //dataLogLn("Allocated JITCode: ", RawPointer(this)); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); > } > > JITCode::~JITCode() > { >+ //dataLogLn("Deallocated JITCode: ", RawPointer(this)); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); > } > > const char* JITCode::typeName(JITType jitType) >@@ -237,6 +245,12 @@ RegisterSet JITCode::liveRegistersToPres > } > #endif > >+void TraceletJITCode::installPrologue(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck) >+{ >+ m_ref = WTFMove(entry); >+ m_withArityCheck = withArityCheck; >+} >+ > } // namespace JSC > > namespace WTF { >Index: Source/JavaScriptCore/jit/JITCode.h >=================================================================== >--- Source/JavaScriptCore/jit/JITCode.h (revision 245246) >+++ Source/JavaScriptCore/jit/JITCode.h (working copy) >@@ -48,6 +48,7 @@ class Signature; > } > > struct ProtoCallFrame; >+class TraceletJITCode; > class TrackedReferences; > class VM; > >@@ -61,6 +62,7 @@ enum class JITType : uint8_t { > }; > > class JITCode : public ThreadSafeRefCounted<JITCode> { >+ using Base = ThreadSafeRefCounted<JITCode>; > public: > template<PtrTag tag> using CodePtr = MacroAssemblerCodePtr<tag>; > template<PtrTag tag> using CodeRef = MacroAssemblerCodeRef<tag>; >@@ -163,6 +165,8 @@ public: > Shared > }; > >+ TraceletJITCode* asTracelet(); >+ > protected: > JITCode(JITType, JITCode::ShareAttribute = JITCode::ShareAttribute::NotShared); > >@@ -203,6 +207,9 @@ public: > > virtual bool contains(void*) = 0; > >+ virtual bool isTraceletJITCode() const { return false; } >+ virtual bool isJITCodeWithCodeRef() const { return false; } >+ > #if ENABLE(JIT) > virtual RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex); > virtual Optional<CodeOrigin> findPC(CodeBlock*, void* pc) { UNUSED_PARAM(pc); return WTF::nullopt; } >@@ -233,6 +240,9 @@ public: > size_t size() override; > bool contains(void*) override; > >+ bool isJITCodeWithCodeRef() const override { return true; } >+ CodeRef<JSEntryPtrTag> codeRef() { return m_ref; } >+ > protected: > CodeRef<JSEntryPtrTag> m_ref; > }; >@@ -248,11 +258,40 @@ public: > > protected: > void initializeCodeRefForDFG(CodeRef<JSEntryPtrTag>, CodePtr<JSEntryPtrTag> withArityCheck); >- >-private: > CodePtr<JSEntryPtrTag> m_withArityCheck; > }; > >+class TraceletJITCode : public DirectJITCode { >+ using Base = DirectJITCode; >+public: >+ TraceletJITCode(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck, JITType type, JITCode::ShareAttribute shareAttribute = JITCode::ShareAttribute::NotShared) >+ : Base(WTFMove(entry), WTFMove(withArityCheck), type, shareAttribute) >+ { } >+ >+ bool isTraceletJITCode() const override { return true; } >+ >+ CodeLocationLabel<ExceptionHandlerPtrTag> exceptionCheckWithCallFrameRollback() { return m_exceptionCheckWithCallFrameRollback; } >+ CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler() { return m_exceptionHandler; } >+ >+ void installPrologue(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck); >+ >+ MacroAssemblerCodePtr<JITTraceletPtrTag> findCodeLocation(unsigned bytecodeOffset) >+ { >+ auto iter = m_codeLocations.find(bytecodeOffset); >+ if (iter != m_codeLocations.end()) >+ return iter->value; >+ return { }; >+ } >+ >+public: >+//private: >+ HashMap<unsigned, MacroAssemblerCodePtr<JITTraceletPtrTag>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_codeLocations; >+ HashMap<unsigned, Vector<MacroAssemblerCodePtr<JITTraceletPtrTag>>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_locationsOfJumpsToLLIntBytecode; >+ Vector<CodeRef<JITTraceletPtrTag>> m_codeRefs; >+ CodeLocationLabel<ExceptionHandlerPtrTag> m_exceptionCheckWithCallFrameRollback; >+ CodeLocationLabel<ExceptionHandlerPtrTag> m_exceptionHandler; >+}; >+ > class NativeJITCode : public JITCodeWithCodeRef { > public: > NativeJITCode(JITType); >@@ -273,6 +312,13 @@ private: > const DOMJIT::Signature* m_signature; > }; > >+ALWAYS_INLINE TraceletJITCode* JITCode::asTracelet() >+{ >+ if (isTraceletJITCode()) >+ return static_cast<TraceletJITCode*>(this); >+ return nullptr; >+} >+ > } // namespace JSC > > namespace WTF { >Index: Source/JavaScriptCore/jit/JITCodeMap.h >=================================================================== >--- Source/JavaScriptCore/jit/JITCodeMap.h (revision 245246) >+++ Source/JavaScriptCore/jit/JITCodeMap.h (working copy) >@@ -35,6 +35,7 @@ namespace JSC { > > class JITCodeMap { > private: >+ /* > struct Entry { > Entry() { } > >@@ -44,16 +45,33 @@ private: > { } > > inline unsigned bytecodeIndex() const { return m_bytecodeIndex; } >- inline CodeLocationLabel<JSEntryPtrTag> codeLocation() { return m_codeLocation; } >+ inline CodeLocationLabel<JSEntryPtrTag> codeLocation() const { return m_codeLocation; } > > private: > unsigned m_bytecodeIndex; > CodeLocationLabel<JSEntryPtrTag> m_codeLocation; > }; >+ */ > > public: > void append(unsigned bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation) > { >+ m_entries.add(bytecodeIndex, codeLocation); >+ } >+ void finish() {} >+ >+ CodeLocationLabel<JSEntryPtrTag> find(unsigned bytecodeIndex) const >+ { >+ auto iter = m_entries.find(bytecodeIndex); >+ if (iter == m_entries.end()) >+ return CodeLocationLabel<JSEntryPtrTag>(); >+ return iter->value; >+ } >+ >+ /* >+ >+ void append(unsigned bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation) >+ { > m_entries.append({ bytecodeIndex, codeLocation }); > } > >@@ -70,11 +88,13 @@ public: > return CodeLocationLabel<JSEntryPtrTag>(); > return entry->codeLocation(); > } >+ */ > > explicit operator bool() const { return m_entries.size(); } > > private: >- Vector<Entry> m_entries; >+ //Vector<Entry> m_entries; >+ HashMap<unsigned, CodeLocationLabel<JSEntryPtrTag>, DefaultHash<unsigned>::Hash, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_entries; > }; > > } // namespace JSC >Index: Source/JavaScriptCore/jit/JITDisassembler.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITDisassembler.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITDisassembler.cpp (working copy) >@@ -53,7 +53,7 @@ JITDisassembler::~JITDisassembler() > void JITDisassembler::dump(PrintStream& out, LinkBuffer& linkBuffer) > { > dumpHeader(out, linkBuffer); >- dumpDisassembly(out, linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]); >+ dumpDisassembly(out, linkBuffer, m_startOfCode, firstFastLabel()); > > dumpForInstructions(out, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel()); > out.print(" (End Of Main Path)\n"); >@@ -75,7 +75,7 @@ void JITDisassembler::reportToProfiler(P > dumpHeader(out, linkBuffer); > compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString())); > out.reset(); >- dumpDisassembly(out, linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]); >+ dumpDisassembly(out, linkBuffer, m_startOfCode, firstFastLabel()); > compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString())); > > reportInstructions(compilation, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel()); >@@ -106,6 +106,15 @@ MacroAssembler::Label JITDisassembler::f > return firstSlowLabel.isSet() ? firstSlowLabel : m_endOfSlowPath; > } > >+MacroAssembler::Label JITDisassembler::firstFastLabel() >+{ >+ for (unsigned i = 0; i < m_labelForBytecodeIndexInMainPath.size(); ++i) { >+ if (m_labelForBytecodeIndexInMainPath[i].isSet()) >+ return m_labelForBytecodeIndexInMainPath[i]; >+ } >+ return m_startOfSlowPath; >+} >+ > Vector<JITDisassembler::DumpedOp> JITDisassembler::dumpVectorForInstructions(LinkBuffer& linkBuffer, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel) > { > StringPrintStream out; >Index: Source/JavaScriptCore/jit/JITDisassembler.h >=================================================================== >--- Source/JavaScriptCore/jit/JITDisassembler.h (revision 245246) >+++ Source/JavaScriptCore/jit/JITDisassembler.h (working copy) >@@ -55,6 +55,8 @@ public: > { > m_labelForBytecodeIndexInSlowPath[bytecodeIndex] = label; > } >+ >+ void setStartOfSlowPath(MacroAssembler::Label label) { m_startOfSlowPath = label; } > void setEndOfSlowPath(MacroAssembler::Label label) { m_endOfSlowPath = label; } > void setEndOfCode(MacroAssembler::Label label) { m_endOfCode = label; } > >@@ -64,6 +66,7 @@ public: > > private: > void dumpHeader(PrintStream&, LinkBuffer&); >+ MacroAssembler::Label firstFastLabel(); > MacroAssembler::Label firstSlowLabel(); > > struct DumpedOp { >@@ -81,6 +84,7 @@ private: > MacroAssembler::Label m_startOfCode; > Vector<MacroAssembler::Label> m_labelForBytecodeIndexInMainPath; > Vector<MacroAssembler::Label> m_labelForBytecodeIndexInSlowPath; >+ MacroAssembler::Label m_startOfSlowPath; > MacroAssembler::Label m_endOfSlowPath; > MacroAssembler::Label m_endOfCode; > }; >Index: Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp (working copy) >@@ -36,18 +36,16 @@ > > namespace JSC { > >-static StructureStubInfo* garbageStubInfo() >-{ >- static StructureStubInfo* stubInfo = new StructureStubInfo(AccessType::Get); >- return stubInfo; >-} >- > JITInlineCacheGenerator::JITInlineCacheGenerator( >- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType, >+ StructureStubInfo* stubInfo, CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType, > const RegisterSet& usedRegisters) > : m_codeBlock(codeBlock) > { >- m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo(accessType) : garbageStubInfo(); >+ UNUSED_PARAM(accessType); // OOPS! >+ >+ RELEASE_ASSERT(accessType == stubInfo->accessType); >+ >+ m_stubInfo = stubInfo; > m_stubInfo->codeOrigin = codeOrigin; > m_stubInfo->callSiteIndex = callSite; > >@@ -66,9 +64,9 @@ void JITInlineCacheGenerator::finalize( > } > > JITByIdGenerator::JITByIdGenerator( >- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType, >+ StructureStubInfo* stubInfo, CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType, > const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value) >- : JITInlineCacheGenerator(codeBlock, codeOrigin, callSite, accessType, usedRegisters) >+ : JITInlineCacheGenerator(stubInfo, codeBlock, codeOrigin, callSite, accessType, usedRegisters) > , m_base(base) > , m_value(value) > { >@@ -102,9 +100,9 @@ void JITByIdGenerator::generateFastCommo > } > > JITGetByIdGenerator::JITGetByIdGenerator( >- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, >+ StructureStubInfo* stubInfo, CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, > UniquedStringImpl* propertyName, JSValueRegs base, JSValueRegs value, AccessType accessType) >- : JITByIdGenerator(codeBlock, codeOrigin, callSite, accessType, usedRegisters, base, value) >+ : JITByIdGenerator(stubInfo, codeBlock, codeOrigin, callSite, accessType, usedRegisters, base, value) > , m_isLengthAccess(propertyName == codeBlock->vm()->propertyNames->length.impl()) > { > RELEASE_ASSERT(base.payloadGPR() != value.tagGPR()); >@@ -116,9 +114,9 @@ void JITGetByIdGenerator::generateFastPa > } > > JITGetByIdWithThisGenerator::JITGetByIdWithThisGenerator( >- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, >+ StructureStubInfo* stubInfo, CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, > UniquedStringImpl*, JSValueRegs value, JSValueRegs base, JSValueRegs thisRegs, AccessType accessType) >- : JITByIdGenerator(codeBlock, codeOrigin, callSite, accessType, usedRegisters, base, value) >+ : JITByIdGenerator(stubInfo, codeBlock, codeOrigin, callSite, accessType, usedRegisters, base, value) > { > RELEASE_ASSERT(thisRegs.payloadGPR() != thisRegs.tagGPR()); > >@@ -134,11 +132,10 @@ void JITGetByIdWithThisGenerator::genera > } > > JITPutByIdGenerator::JITPutByIdGenerator( >- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, >+ StructureStubInfo* stubInfo, CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, > JSValueRegs base, JSValueRegs value, GPRReg scratch, > ECMAMode ecmaMode, PutKind putKind) >- : JITByIdGenerator( >- codeBlock, codeOrigin, callSite, AccessType::Put, usedRegisters, base, value) >+ : JITByIdGenerator(stubInfo, codeBlock, codeOrigin, callSite, AccessType::Put, usedRegisters, base, value) > , m_ecmaMode(ecmaMode) > , m_putKind(putKind) > { >@@ -163,9 +160,9 @@ V_JITOperation_ESsiJJI JITPutByIdGenerat > } > > JITInByIdGenerator::JITInByIdGenerator( >- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, >+ StructureStubInfo* stubInfo, CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters, > UniquedStringImpl* propertyName, JSValueRegs base, JSValueRegs value) >- : JITByIdGenerator(codeBlock, codeOrigin, callSite, AccessType::In, usedRegisters, base, value) >+ : JITByIdGenerator(stubInfo, codeBlock, codeOrigin, callSite, AccessType::In, usedRegisters, base, value) > { > // FIXME: We are not supporting fast path for "length" property. > UNUSED_PARAM(propertyName); >@@ -178,11 +175,10 @@ void JITInByIdGenerator::generateFastPat > } > > JITInstanceOfGenerator::JITInstanceOfGenerator( >- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSiteIndex, >+ StructureStubInfo* stubInfo, CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSiteIndex, > const RegisterSet& usedRegisters, GPRReg result, GPRReg value, GPRReg prototype, > GPRReg scratch1, GPRReg scratch2, bool prototypeIsKnownObject) >- : JITInlineCacheGenerator( >- codeBlock, codeOrigin, callSiteIndex, AccessType::InstanceOf, usedRegisters) >+ : JITInlineCacheGenerator(stubInfo, codeBlock, codeOrigin, callSiteIndex, AccessType::InstanceOf, usedRegisters) > { > m_stubInfo->patch.baseGPR = value; > m_stubInfo->patch.valueGPR = result; >Index: Source/JavaScriptCore/jit/JITInlineCacheGenerator.h >=================================================================== >--- Source/JavaScriptCore/jit/JITInlineCacheGenerator.h (revision 245246) >+++ Source/JavaScriptCore/jit/JITInlineCacheGenerator.h (working copy) >@@ -46,7 +46,7 @@ class JITInlineCacheGenerator { > protected: > JITInlineCacheGenerator() { } > JITInlineCacheGenerator( >- CodeBlock*, CodeOrigin, CallSiteIndex, AccessType, const RegisterSet& usedRegisters); >+ StructureStubInfo*, CodeBlock*, CodeOrigin, CallSiteIndex, AccessType, const RegisterSet& usedRegisters); > > public: > StructureStubInfo* stubInfo() const { return m_stubInfo; } >@@ -77,7 +77,7 @@ protected: > JITByIdGenerator() { } > > JITByIdGenerator( >- CodeBlock*, CodeOrigin, CallSiteIndex, AccessType, const RegisterSet& usedRegisters, >+ StructureStubInfo*, CodeBlock*, CodeOrigin, CallSiteIndex, AccessType, const RegisterSet& usedRegisters, > JSValueRegs base, JSValueRegs value); > > public: >@@ -106,7 +106,7 @@ public: > JITGetByIdGenerator() { } > > JITGetByIdGenerator( >- CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, UniquedStringImpl* propertyName, >+ StructureStubInfo*, CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, UniquedStringImpl* propertyName, > JSValueRegs base, JSValueRegs value, AccessType); > > void generateFastPath(MacroAssembler&); >@@ -120,7 +120,7 @@ public: > JITGetByIdWithThisGenerator() { } > > JITGetByIdWithThisGenerator( >- CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, UniquedStringImpl* propertyName, >+ StructureStubInfo*, CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, UniquedStringImpl* propertyName, > JSValueRegs value, JSValueRegs base, JSValueRegs thisRegs, AccessType); > > void generateFastPath(MacroAssembler&); >@@ -131,7 +131,7 @@ public: > JITPutByIdGenerator() { } > > JITPutByIdGenerator( >- CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base, >+ StructureStubInfo*, CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base, > JSValueRegs value, GPRReg scratch, ECMAMode, PutKind); > > void generateFastPath(MacroAssembler&); >@@ -148,7 +148,7 @@ public: > JITInByIdGenerator() { } > > JITInByIdGenerator( >- CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, UniquedStringImpl* propertyName, >+ StructureStubInfo*, CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, UniquedStringImpl* propertyName, > JSValueRegs base, JSValueRegs value); > > void generateFastPath(MacroAssembler&); >@@ -159,7 +159,7 @@ public: > JITInstanceOfGenerator() { } > > JITInstanceOfGenerator( >- CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, GPRReg result, >+ StructureStubInfo*, CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, GPRReg result, > GPRReg value, GPRReg prototype, GPRReg scratch1, GPRReg scratch2, > bool prototypeIsKnownObject = false); > >Index: Source/JavaScriptCore/jit/JITInlines.h >=================================================================== >--- Source/JavaScriptCore/jit/JITInlines.h (revision 245246) >+++ Source/JavaScriptCore/jit/JITInlines.h (working copy) >@@ -247,7 +247,8 @@ ALWAYS_INLINE void JIT::emitJumpSlowToHo > { > ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. > >- jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); >+ //jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); >+ m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset)); > } > > #if ENABLE(SAMPLING_FLAGS) >Index: Source/JavaScriptCore/jit/JITOpcodes32_64.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITOpcodes32_64.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITOpcodes32_64.cpp (working copy) >@@ -159,7 +159,7 @@ void JIT::emit_op_instanceof(const Instr > emitJumpSlowCaseIfNotJSCell(proto); > > JITInstanceOfGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), >+ m_compileData.stubInfos.add(AccessType::InstanceOf), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), > RegisterSet::stubUnavailableRegisters(), > regT0, // result > regT2, // value >@@ -1116,7 +1116,7 @@ void JIT::emit_op_has_indexed_property(c > int base = bytecode.m_base.offset(); > int property = bytecode.m_property.offset(); > ArrayProfile* profile = &metadata.m_arrayProfile; >- ByValInfo* byValInfo = m_codeBlock->addByValInfo(); >+ ByValInfo* byValInfo = m_compileData.byValInfos.add(); > > emitLoadPayload(base, regT0); > emitJumpSlowCaseIfNotJSCell(base); >Index: Source/JavaScriptCore/jit/JITOpcodes.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITOpcodes.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITOpcodes.cpp (working copy) >@@ -169,7 +169,7 @@ void JIT::emit_op_instanceof(const Instr > emitJumpSlowCaseIfNotJSCell(regT1, proto); > > JITInstanceOfGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), >+ m_compileData.stubInfos.add(AccessType::InstanceOf), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), > RegisterSet::stubUnavailableRegisters(), > regT0, // result > regT2, // value >@@ -1034,6 +1034,10 @@ void JIT::emitSlow_op_loop_hint(const In > #endif > } > >+void JIT::emit_op_trace_hint(const Instruction*) >+{ >+} >+ > void JIT::emit_op_check_traps(const Instruction*) > { > addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->needTrapHandlingAddress()))); >@@ -1245,7 +1249,7 @@ void JIT::emit_op_has_indexed_property(c > int base = bytecode.m_base.offset(); > int property = bytecode.m_property.offset(); > ArrayProfile* profile = &metadata.m_arrayProfile; >- ByValInfo* byValInfo = m_codeBlock->addByValInfo(); >+ ByValInfo* byValInfo = m_compileData.byValInfos.add(); > > emitGetVirtualRegisters(base, regT0, property, regT1); > >Index: Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp (working copy) >@@ -144,7 +144,7 @@ void JIT::emit_op_get_by_val(const Instr > int base = bytecode.m_base.offset(); > int property = bytecode.m_property.offset(); > ArrayProfile* profile = &metadata.m_arrayProfile; >- ByValInfo* byValInfo = m_codeBlock->addByValInfo(); >+ ByValInfo* byValInfo = m_compileData.byValInfos.add(); > > emitLoad2(base, regT1, regT0, property, regT3, regT2); > >@@ -207,7 +207,7 @@ JITGetByIdGenerator JIT::emitGetByValWit > > const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); > JITGetByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::Get), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), > propertyName.impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::Get); > gen.generateFastPath(*this); > >@@ -274,7 +274,7 @@ void JIT::emit_op_put_by_val(const Instr > int base = bytecode.m_base.offset(); > int property = bytecode.m_property.offset(); > ArrayProfile* profile = &metadata.m_arrayProfile; >- ByValInfo* byValInfo = m_codeBlock->addByValInfo(); >+ ByValInfo* byValInfo = m_compileData.byValInfos.add(); > > emitLoad2(base, regT1, regT0, property, regT3, regT2); > >@@ -437,7 +437,7 @@ JITPutByIdGenerator JIT::emitPutByValWit > > const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); > JITPutByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::Put), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), > JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, m_codeBlock->ecmaMode(), putKind); > gen.generateFastPath(*this); > doneCases.append(jump()); >@@ -501,7 +501,7 @@ void JIT::emit_op_try_get_by_id(const In > emitJumpSlowCaseIfNotJSCell(base, regT1); > > JITGetByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::TryGet), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::TryGet); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >@@ -540,7 +540,7 @@ void JIT::emit_op_get_by_id_direct(const > emitJumpSlowCaseIfNotJSCell(base, regT1); > > JITGetByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::GetDirect), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::GetDirect); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >@@ -586,7 +586,7 @@ void JIT::emit_op_get_by_id(const Instru > } > > JITGetByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::Get), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::Get); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >@@ -670,7 +670,7 @@ void JIT::emit_op_put_by_id(const Instru > emitJumpSlowCaseIfNotJSCell(base, regT1); > > JITPutByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::Put), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), > JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), > regT1, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect); > >@@ -714,7 +714,7 @@ void JIT::emit_op_in_by_id(const Instruc > emitJumpSlowCaseIfNotJSCell(base, regT1); > > JITInByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::In), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0)); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >Index: Source/JavaScriptCore/jit/JITPropertyAccess.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITPropertyAccess.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITPropertyAccess.cpp (working copy) >@@ -60,7 +60,7 @@ void JIT::emit_op_get_by_val(const Instr > int base = bytecode.m_base.offset(); > int property = bytecode.m_property.offset(); > ArrayProfile* profile = &metadata.m_arrayProfile; >- ByValInfo* byValInfo = m_codeBlock->addByValInfo(); >+ ByValInfo* byValInfo = m_compileData.byValInfos.add(); > > emitGetVirtualRegister(base, regT0); > bool propertyNameIsIntegerConstant = isOperandConstantInt(property); >@@ -141,7 +141,7 @@ JITGetByIdGenerator JIT::emitGetByValWit > emitByValIdentifierCheck(byValInfo, regT1, regT3, propertyName, slowCases); > > JITGetByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::Get), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), > propertyName.impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::Get); > gen.generateFastPath(*this); > >@@ -209,7 +209,7 @@ void JIT::emit_op_put_by_val(const Instr > int base = bytecode.m_base.offset(); > int property = bytecode.m_property.offset(); > ArrayProfile* profile = &metadata.m_arrayProfile; >- ByValInfo* byValInfo = m_codeBlock->addByValInfo(); >+ ByValInfo* byValInfo = m_compileData.byValInfos.add(); > > emitGetVirtualRegister(base, regT0); > bool propertyNameIsIntegerConstant = isOperandConstantInt(property); >@@ -374,7 +374,7 @@ JITPutByIdGenerator JIT::emitPutByValWit > emitGetVirtualRegisters(base, regT0, value, regT1); > > JITPutByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::Put), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), > JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind); > gen.generateFastPath(*this); > emitWriteBarrier(base, value, ShouldFilterBase); >@@ -504,7 +504,7 @@ void JIT::emit_op_try_get_by_id(const In > emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); > > JITGetByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::TryGet), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::TryGet); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >@@ -543,7 +543,7 @@ void JIT::emit_op_get_by_id_direct(const > emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); > > JITGetByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::GetDirect), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::GetDirect); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >@@ -589,7 +589,7 @@ void JIT::emit_op_get_by_id(const Instru > } > > JITGetByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::Get), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::Get); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >@@ -613,7 +613,7 @@ void JIT::emit_op_get_by_id_with_this(co > emitJumpSlowCaseIfNotJSCell(regT1, thisVReg); > > JITGetByIdWithThisGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::GetWithThis), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), JSValueRegs(regT1), AccessType::GetWithThis); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >@@ -673,7 +673,7 @@ void JIT::emit_op_put_by_id(const Instru > emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); > > JITPutByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::Put), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), > JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), > direct ? Direct : NotDirect); > >@@ -713,7 +713,7 @@ void JIT::emit_op_in_by_id(const Instruc > emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); > > JITInByIdGenerator gen( >- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), >+ m_compileData.stubInfos.add(AccessType::In), m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), > ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0)); > gen.generateFastPath(*this); > addSlowCase(gen.slowPathJump()); >Index: Source/JavaScriptCore/jit/JITWorklist.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITWorklist.cpp (revision 245246) >+++ Source/JavaScriptCore/jit/JITWorklist.cpp (working copy) >@@ -36,10 +36,12 @@ namespace JSC { > > class JITWorklist::Plan : public ThreadSafeRefCounted<JITWorklist::Plan> { > public: >- Plan(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+ Plan(CodeBlock* codeBlock, bool isTracelet) > : m_codeBlock(codeBlock) >- , m_jit(codeBlock->vm(), codeBlock, loopOSREntryBytecodeOffset) >+ , m_jit(codeBlock->vm(), codeBlock) > { >+ m_jit.m_isTracelet = isTracelet; >+ m_jit.m_isTracelet = true; > m_jit.doMainThreadPreparationBeforeCompile(); > } > >@@ -54,6 +56,11 @@ public: > void finalize() > { > CompilationResult result = m_jit.link(); >+ >+ //static uint64_t counter; >+ //if (++counter % 5 == 0) >+ // vm()->heap.dumpAllCodeBlockTraces(); >+ > switch (result) { > case CompilationFailed: > CODEBLOCK_LOG_EVENT(m_codeBlock, "delayJITCompile", ("compilation failed")); >@@ -65,9 +72,15 @@ public: > case CompilationSuccessful: > if (Options::verboseOSR()) > dataLogF(" JIT compilation successful.\n"); >- m_codeBlock->ownerExecutable()->installCode(m_codeBlock); >+ if (m_jit.m_shouldInstallCode) >+ m_codeBlock->ownerExecutable()->installCode(m_codeBlock); > m_codeBlock->jitSoon(); > return; >+ case CompilationDeferred: >+ dataLogLn("Bogus compile!"); >+ // Nothing to compile! >+ //m_codeBlock->jitSoon(); >+ return; > default: > RELEASE_ASSERT_NOT_REACHED(); > return; >@@ -83,9 +96,9 @@ public: > return m_isFinishedCompiling; > } > >- static void compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+ static void compileNow(CodeBlock* codeBlock, bool isTracelet) > { >- Plan plan(codeBlock, loopOSREntryBytecodeOffset); >+ Plan plan(codeBlock, isTracelet); > plan.compileInThread(); > plan.finalize(); > } >@@ -159,7 +172,10 @@ JITWorklist::JITWorklist() > , m_condition(AutomaticThreadCondition::create()) > { > LockHolder locker(*m_lock); >- m_thread = new Thread(locker, *this); >+ m_threads.append(new Thread(locker, *this)); >+ m_threads.append(new Thread(locker, *this)); >+ //m_threads.append(new Thread(locker, *this)); >+ //m_threads.append(new Thread(locker, *this)); > } > > JITWorklist::~JITWorklist() >@@ -228,10 +244,10 @@ void JITWorklist::poll(VM& vm) > finalizePlans(myPlans); > } > >-void JITWorklist::compileLater(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+void JITWorklist::compileLater(CodeBlock* codeBlock, bool isTracelet) > { > DeferGC deferGC(codeBlock->vm()->heap); >- RELEASE_ASSERT(codeBlock->jitType() == JITType::InterpreterThunk); >+ //RELEASE_ASSERT(codeBlock->jitType() == JITType::InterpreterThunk); > > if (codeBlock->m_didFailJITCompilation) { > codeBlock->dontJITAnytimeSoon(); >@@ -239,7 +255,7 @@ void JITWorklist::compileLater(CodeBlock > } > > if (!Options::useConcurrentJIT()) { >- Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset); >+ Plan::compileNow(codeBlock, isTracelet); > return; > } > >@@ -253,7 +269,7 @@ void JITWorklist::compileLater(CodeBlock > > if (m_numAvailableThreads) { > m_planned.add(codeBlock); >- RefPtr<Plan> plan = adoptRef(new Plan(codeBlock, loopOSREntryBytecodeOffset)); >+ RefPtr<Plan> plan = adoptRef(new Plan(codeBlock, isTracelet)); > m_plans.append(plan); > m_queue.append(plan); > m_condition->notifyAll(locker); >@@ -277,14 +293,24 @@ void JITWorklist::compileLater(CodeBlock > // This works around the issue. If the concurrent JIT thread is convoyed, we revert to main > // thread compiles. This is probably not as good as if we had multiple JIT threads. Maybe we > // can do that someday. >- Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset); >+ Plan::compileNow(codeBlock, isTracelet); > } > >-void JITWorklist::compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+void JITWorklist::compileNow(CodeBlock* codeBlock, unsigned bytecodeIndexToCompile) > { > VM* vm = codeBlock->vm(); > DeferGC deferGC(vm->heap); >- if (codeBlock->jitType() != JITType::InterpreterThunk) >+ >+ auto didCompileBytecode = [&] { >+ if (TraceletJITCode* tracelet = codeBlock->jitCode()->asTracelet()) { >+ if (tracelet->m_codeLocations.find(bytecodeIndexToCompile) != tracelet->m_codeLocations.end()) >+ return true; >+ } >+ >+ return false; >+ }; >+ >+ if (didCompileBytecode()) > return; > > bool isPlanned; >@@ -300,15 +326,14 @@ void JITWorklist::compileNow(CodeBlock* > } > > // Now it might be compiled! >- if (codeBlock->jitType() != JITType::InterpreterThunk) >+ if (didCompileBytecode()) > return; > >- // We do this in case we had previously attempted, and then failed, to compile with the >- // baseline JIT. >- codeBlock->resetJITData(); >- > // OK, just compile it. >- JIT::compile(vm, codeBlock, JITCompilationMustSucceed, loopOSREntryBytecodeOffset); >+ bool isTracelet = Options::useLLInt() ? true : false; >+ JIT::compileNow(vm, codeBlock, JITCompilationMustSucceed, bytecodeIndexToCompile, isTracelet); >+ ASSERT(didCompileBytecode()); >+ // OOPS: change how we installCode(). > codeBlock->ownerExecutable()->installCode(codeBlock); > } > >Index: Source/JavaScriptCore/jit/JITWorklist.h >=================================================================== >--- Source/JavaScriptCore/jit/JITWorklist.h (revision 245246) >+++ Source/JavaScriptCore/jit/JITWorklist.h (working copy) >@@ -53,9 +53,8 @@ public: > bool completeAllForVM(VM&); // Return true if any JIT work happened. > void poll(VM&); > >- void compileLater(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0); >- >- void compileNow(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0); >+ void compileLater(CodeBlock*, bool isTracelet = false); >+ void compileNow(CodeBlock*, unsigned bytecodeIndexToCompile); > > static JITWorklist& ensureGlobalWorklist(); > static JITWorklist* existingGlobalWorklistOrNull(); >@@ -74,7 +73,7 @@ private: > > Box<Lock> m_lock; > Ref<AutomaticThreadCondition> m_condition; // We use One True Condition for everything because that's easier. >- RefPtr<AutomaticThread> m_thread; >+ Vector<RefPtr<AutomaticThread>> m_threads; > > unsigned m_numAvailableThreads { 0 }; > }; >Index: Source/JavaScriptCore/llint/LLIntData.h >=================================================================== >--- Source/JavaScriptCore/llint/LLIntData.h (revision 245246) >+++ Source/JavaScriptCore/llint/LLIntData.h (working copy) >@@ -25,6 +25,7 @@ > > #pragma once > >+#include "GPRInfo.h" > #include "JSCJSValue.h" > #include "MacroAssemblerCodeRef.h" > #include "Opcode.h" >@@ -152,4 +153,21 @@ ALWAYS_INLINE void* getCodePtr(JSC::Enco > return bitwise_cast<void*>(glueHelper); > } > >+#if ENABLE(JIT) >+struct Registers { >+ static const GPRReg pcGPR = GPRInfo::regT4; >+ >+#if CPU(X86_64) && !OS(WINDOWS) >+ static const GPRReg metadataTableGPR = GPRInfo::regCS1; >+ static const GPRReg pbGPR = GPRInfo::regCS2; >+#elif CPU(X86_64) && OS(WINDOWS) >+ static const GPRReg metadataTableGPR = GPRInfo::regCS3; >+ static const GPRReg pbGPR = GPRInfo::regCS4; >+#elif CPU(ARM64) >+ static const GPRReg metadataTableGPR = GPRInfo::regCS6; >+ static const GPRReg pbGPR = GPRInfo::regCS7; >+#endif >+}; >+#endif >+ > } } // namespace JSC::LLInt >Index: Source/JavaScriptCore/llint/LLIntEntrypoint.cpp >=================================================================== >--- Source/JavaScriptCore/llint/LLIntEntrypoint.cpp (revision 245246) >+++ Source/JavaScriptCore/llint/LLIntEntrypoint.cpp (working copy) >@@ -180,6 +180,8 @@ void setEntrypoint(CodeBlock* codeBlock) > > unsigned frameRegisterCountFor(CodeBlock* codeBlock) > { >+ // OOPS: Combine this with JIT's function to ensure they're always the same! >+ > ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals()))); > > return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters); >Index: Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >=================================================================== >--- Source/JavaScriptCore/llint/LLIntSlowPaths.cpp (revision 245246) >+++ Source/JavaScriptCore/llint/LLIntSlowPaths.cpp (working copy) >@@ -365,7 +365,7 @@ inline bool shouldJIT(CodeBlock* codeBlo > } > > // Returns true if we should try to OSR. >-inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec, unsigned loopOSREntryBytecodeOffset = 0) >+inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec) > { > VM& vm = exec->vm(); > DeferGCForAWhile deferGC(vm.heap); // My callers don't set top callframe, so we don't want to GC here at all. >@@ -390,7 +390,7 @@ inline bool jitCompileAndSetHeuristics(C > return true; > } > case JITType::InterpreterThunk: { >- JITWorklist::ensureGlobalWorklist().compileLater(codeBlock, loopOSREntryBytecodeOffset); >+ JITWorklist::ensureGlobalWorklist().compileLater(codeBlock); > return codeBlock->jitType() == JITType::BaselineJIT; > } > default: >@@ -400,6 +400,13 @@ inline bool jitCompileAndSetHeuristics(C > } > } > >+static ALWAYS_INLINE MacroAssemblerCodePtr<JITTraceletPtrTag> traceletPC(CodeBlock* codeBlock, unsigned bytecodeOffset) >+{ >+ if (TraceletJITCode* tracelet = codeBlock->jitCode()->asTracelet()) >+ return tracelet->findCodeLocation(bytecodeOffset); >+ return { }; >+} >+ > static SlowPathReturnType entryOSR(ExecState* exec, const Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind) > { > if (Options::verboseOSR()) { >@@ -412,10 +419,20 @@ static SlowPathReturnType entryOSR(ExecS > codeBlock->dontJITAnytimeSoon(); > LLINT_RETURN_TWO(0, 0); > } >- if (!jitCompileAndSetHeuristics(codeBlock, exec)) >- LLINT_RETURN_TWO(0, 0); >- >+ >+ if (!traceletPC(codeBlock, 0)) { >+ if (!jitCompileAndSetHeuristics(codeBlock, exec)) { >+ //dataLogLn("Entry OSR fail: ", RawPointer(codeBlock)); >+ LLINT_RETURN_TWO(0, 0); >+ } >+ if (!traceletPC(codeBlock, 0)) { >+ //dataLogLn("Entry OSR fail: ", RawPointer(codeBlock)); >+ LLINT_RETURN_TWO(0, 0); >+ } >+ } >+ > CODEBLOCK_LOG_EVENT(codeBlock, "OSR entry", ("in prologue")); >+ > > if (kind == Prologue) > LLINT_RETURN_TWO(codeBlock->jitCode()->executableAddress(), 0); >@@ -474,19 +491,26 @@ LLINT_SLOW_PATH_DECL(loop_osr) > codeBlock->dontJITAnytimeSoon(); > LLINT_RETURN_TWO(0, 0); > } >+ >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = traceletPC(codeBlock, loopOSREntryBytecodeOffset); >+ if (codePtr) >+ LLINT_RETURN_TWO(codePtr.retagged<JSEntryPtrTag>().executableAddress(), exec->topOfFrame()); > >- if (!jitCompileAndSetHeuristics(codeBlock, exec, loopOSREntryBytecodeOffset)) >+ if (!jitCompileAndSetHeuristics(codeBlock, exec)) { >+ //dataLogLn("loop_osr fail: ", RawPointer(codeBlock)); > LLINT_RETURN_TWO(0, 0); >+ } > > CODEBLOCK_LOG_EVENT(codeBlock, "osrEntry", ("at bc#", loopOSREntryBytecodeOffset)); > > ASSERT(codeBlock->jitType() == JITType::BaselineJIT); >+ codePtr = static_cast<TraceletJITCode*>(codeBlock->jitCode().get())->findCodeLocation(loopOSREntryBytecodeOffset); >+ if (!codePtr) { >+ //dataLogLn("loop_osr fail: ", RawPointer(codeBlock)); >+ LLINT_RETURN_TWO(0, 0); >+ } > >- const JITCodeMap& codeMap = codeBlock->jitCodeMap(); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(loopOSREntryBytecodeOffset); >- ASSERT(codeLocation); >- >- void* jumpTarget = codeLocation.executableAddress(); >+ void* jumpTarget = codePtr.retagged<JSEntryPtrTag>().executableAddress(); > ASSERT(jumpTarget); > > LLINT_RETURN_TWO(jumpTarget, exec->topOfFrame()); >@@ -1959,6 +1983,56 @@ LLINT_SLOW_PATH_DECL(slow_path_out_of_li > LLINT_END_IMPL(); > } > >+LLINT_SLOW_PATH_DECL(trace_hint) >+{ >+ LLINT_BEGIN_NO_SET_PC(); >+ UNUSED_PARAM(throwScope); >+ >+ auto bytecode = pc->as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(exec); >+ >+ CodeBlock* codeBlock = exec->codeBlock(); >+ if (!shouldJIT(codeBlock)) >+ LLINT_RETURN_TWO(0, 0); >+ >+ auto returnPC = [&] () -> void* { >+ if (MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = traceletPC(codeBlock, codeBlock->bytecodeOffset(pc))) { >+ void* result = codePtr.executableAddress(); >+ metadata.m_entrypoint = bitwise_cast<uintptr_t>(result); >+ metadata.m_count = 0; >+ return result; >+ } >+ >+ return nullptr; >+ }; >+ >+ JITWorklist::ensureGlobalWorklist().poll(vm); >+ >+ if (auto* ret = returnPC()) >+ LLINT_RETURN_TWO(ret, 0); >+ >+ if (metadata.m_entrypoint == 1) { >+ metadata.m_entrypoint = 1; >+ codeBlock->updateAllValueProfilePredictions(); >+ JITWorklist::ensureGlobalWorklist().compileLater(codeBlock, true); >+ >+ if (auto* ret = returnPC()) >+ LLINT_RETURN_TWO(ret, 0); >+ >+ //dataLogLn("trace_hint slow path: ", RawPointer(codeBlock), " bc#", codeBlock->bytecodeOffset(pc)); >+ >+ metadata.m_count = std::numeric_limits<int32_t>::min(); >+ LLINT_RETURN_TWO(0, 0); >+ } >+ >+ //dataLogLn("trace_hint slow path: ", RawPointer(codeBlock), " bc#", codeBlock->bytecodeOffset(pc)); >+ // Setting m_entrypoint to 1 means we should compile. >+ // Setting m_entrypoint to 2 means we have compiled it. >+ metadata.m_entrypoint = 1; >+ metadata.m_count = -Options::traceWaveThreshold(); >+ LLINT_RETURN_TWO(0, 0); >+} >+ > extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM* vm, ProtoCallFrame* protoFrame) > { > ExecState* exec = vm->topCallFrame; >Index: Source/JavaScriptCore/llint/LLIntSlowPaths.h >=================================================================== >--- Source/JavaScriptCore/llint/LLIntSlowPaths.h (revision 245246) >+++ Source/JavaScriptCore/llint/LLIntSlowPaths.h (working copy) >@@ -134,6 +134,7 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_lo > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_super_sampler_begin); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_super_sampler_end); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_out_of_line_jump_target); >+LLINT_SLOW_PATH_HIDDEN_DECL(trace_hint); > extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM*, ProtoCallFrame*) WTF_INTERNAL; > #if ENABLE(C_LOOP) > extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM*, Register*) WTF_INTERNAL; >Index: Source/JavaScriptCore/llint/LowLevelInterpreter.asm >=================================================================== >--- Source/JavaScriptCore/llint/LowLevelInterpreter.asm (revision 245246) >+++ Source/JavaScriptCore/llint/LowLevelInterpreter.asm (working copy) >@@ -247,6 +247,7 @@ const ArithProfileNumberNumber = constex > # Pointer Tags > const BytecodePtrTag = constexpr BytecodePtrTag > const JSEntryPtrTag = constexpr JSEntryPtrTag >+const JITTraceletPtrTag = constexpr JITTraceletPtrTag > const ExceptionHandlerPtrTag = constexpr ExceptionHandlerPtrTag > const NoPtrTag = constexpr NoPtrTag > const SlowPathPtrTag = constexpr SlowPathPtrTag >@@ -1632,6 +1633,31 @@ llintOp(op_loop_hint, OpLoopHint, macro > dispatch() > end) > >+llintOpWithMetadata(op_trace_hint, OpTraceHint, macro (size, get, dispatch, metadata, return) >+ metadata(t2, t0) >+ loadp OpTraceHint::Metadata::m_entrypoint[t2], t1 >+ bpbeq t1, 2, .noEntrypoint >+ jmp t1, JITTraceletPtrTag >+ >+.noEntrypoint: >+ bpeq t1, 2, .doCompile >+ baddis 1, OpTraceHint::Metadata::m_count[t2], .continue >+ >+.doCompile: >+ storei PC, ArgumentCount + TagOffset[cfr] >+ prepareStateForCCall() >+ move cfr, a0 >+ move PC, a1 >+ cCall2(_llint_trace_hint) >+ btpz r0, .recover >+ jmp r0, JITTraceletPtrTag >+.recover: >+ loadi ArgumentCount + TagOffset[cfr], PC >+ >+.continue: >+ dispatch() >+end) >+ > > llintOp(op_check_traps, OpCheckTraps, macro (unused, unused, dispatch) > loadp CodeBlock[cfr], t1 >Index: Source/JavaScriptCore/runtime/JSCPtrTag.h >=================================================================== >--- Source/JavaScriptCore/runtime/JSCPtrTag.h (revision 245246) >+++ Source/JavaScriptCore/runtime/JSCPtrTag.h (working copy) >@@ -39,6 +39,7 @@ using PtrTag = WTF::PtrTag; > v(ExceptionHandlerPtrTag) \ > v(ExecutableMemoryPtrTag) \ > v(JITThunkPtrTag) \ >+ v(JITTraceletPtrTag) \ > v(JITStubRoutinePtrTag) \ > v(JSEntryPtrTag) \ > v(JSInternalPtrTag) \ >Index: Source/JavaScriptCore/runtime/Options.cpp >=================================================================== >--- Source/JavaScriptCore/runtime/Options.cpp (revision 245246) >+++ Source/JavaScriptCore/runtime/Options.cpp (working copy) >@@ -386,6 +386,10 @@ static void correctOptions() > > static void recomputeDependentOptions() > { >+ Options::minimumTraceThreshold() = 20; >+ Options::maximumTraceThreshold() = 20; >+ Options::traceWaveThreshold() = 5; >+ > #if !defined(NDEBUG) > Options::validateDFGExceptionHandling() = true; > #endif >Index: Source/JavaScriptCore/runtime/Options.h >=================================================================== >--- Source/JavaScriptCore/runtime/Options.h (revision 245246) >+++ Source/JavaScriptCore/runtime/Options.h (working copy) >@@ -336,6 +336,11 @@ constexpr bool enableWebAssemblyStreamin > \ > v(int32, thresholdForFTLOptimizeAfterWarmUp, 100000, Normal, nullptr) \ > v(int32, thresholdForFTLOptimizeSoon, 1000, Normal, nullptr) \ >+ v(int32, minimumTraceThreshold, 2, Normal, nullptr) \ >+ v(int32, maximumTraceThreshold, 2, Normal, nullptr) \ >+ v(int32, traceWaveThreshold, 2, Normal, nullptr) \ >+ v(int32, traceJITSoonThreshold, 1000, Normal, nullptr) \ >+ v(double, traceThresholdMultiplier, 0.5, Normal, nullptr) \ > v(int32, ftlTierUpCounterIncrementForLoop, 1, Normal, nullptr) \ > v(int32, ftlTierUpCounterIncrementForReturn, 15, Normal, nullptr) \ > v(unsigned, ftlOSREntryFailureCountForReoptimization, 15, Normal, nullptr) \ >Index: Source/JavaScriptCore/runtime/ScriptExecutable.cpp >=================================================================== >--- Source/JavaScriptCore/runtime/ScriptExecutable.cpp (revision 245246) >+++ Source/JavaScriptCore/runtime/ScriptExecutable.cpp (working copy) >@@ -186,6 +186,11 @@ void ScriptExecutable::installCode(VM& v > break; > } > >+ //dataLogLn("Install code on executable: ", RawPointer(this), " m_jitCodeForConstruct=", RawPointer(m_jitCodeForConstruct.get()), " m_jitCodeForCall=", RawPointer(m_jitCodeForCall.get())); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); >+ > auto& clearableCodeSet = VM::SpaceAndSet::setFor(*subspace()); > if (hasClearableCode(vm)) > clearableCodeSet.add(this); >@@ -397,7 +402,7 @@ static void setupLLInt(CodeBlock* codeBl > static void setupJIT(VM& vm, CodeBlock* codeBlock) > { > #if ENABLE(JIT) >- CompilationResult result = JIT::compile(&vm, codeBlock, JITCompilationMustSucceed); >+ CompilationResult result = JIT::compileNow(&vm, codeBlock, JITCompilationMustSucceed, 0, false); > RELEASE_ASSERT(result == CompilationSuccessful); > #else > UNUSED_PARAM(vm); >Index: Source/WTF/wtf/Bag.h >=================================================================== >--- Source/WTF/wtf/Bag.h (revision 245246) >+++ Source/WTF/wtf/Bag.h (working copy) >@@ -93,6 +93,19 @@ public: > m_head = newNode; > return &newNode->m_item; > } >+ >+ void adopt(Bag& other) >+ { >+ if (m_head) { >+ Node* tail = unwrappedHead(); >+ while (tail->m_next) >+ tail = tail->m_next; >+ tail->m_next = other.unwrappedHead(); >+ } else >+ m_head = other.unwrappedHead(); >+ >+ other.m_head = nullptr; >+ } > > class iterator { > public:
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 196943
:
367712
|
367768
|
367960
|
367998
|
368007
|
368292
|
368299
|
368355
|
368357
|
368369
|
368379
|
368505
|
368525
|
368527
|
368640
|
368658
|
368660
|
368662
|
368664
|
368754
|
368767
|
369170
|
369453
|
369455
|
369515
|
369535
|
369581
|
369607
|
369770
|
369792
|
370097
|
370364