WebKit Bugzilla
Attachment 369453 Details for
Bug 196943
: Add a baseline tracelet JIT
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
WIP
c-backup.diff (text/plain), 108.37 KB, created by
Saam Barati
on 2019-05-08 18:14:46 PDT
(
hide
)
Description:
WIP
Filename:
MIME Type:
Creator:
Saam Barati
Created:
2019-05-08 18:14:46 PDT
Size:
108.37 KB
patch
obsolete
>Index: Source/JavaScriptCore/bytecode/ArithProfile.h >=================================================================== >--- Source/JavaScriptCore/bytecode/ArithProfile.h (revision 244813) >+++ Source/JavaScriptCore/bytecode/ArithProfile.h (working copy) >@@ -310,6 +310,11 @@ private: > friend class JSC::LLIntOffsetsExtractor; > }; > >+struct TraceProfile { >+ unsigned start; >+ unsigned end; // Not inclusive of this instruction. >+}; >+ > } // namespace JSC > > namespace WTF { >Index: Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeDumper.cpp (revision 244813) >+++ Source/JavaScriptCore/bytecode/BytecodeDumper.cpp (working copy) >@@ -110,6 +110,25 @@ void BytecodeDumper<Block>::dumpIdentifi > } > } > >+template<> >+void BytecodeDumper<UnlinkedCodeBlock>::dumpPreciseJumpTargets() >+{ >+} >+ >+template<> >+void BytecodeDumper<CodeBlock>::dumpPreciseJumpTargets() >+{ >+ m_out.printf("\nPreciseJumpTargets = { "); >+ Vector<InstructionStream::Offset, 32> jumpTargets; >+ computePreciseJumpTargets(m_block, jumpTargets); >+ for (size_t i = 0; i < jumpTargets.size(); ++i) { >+ m_out.print(jumpTargets[i]); >+ if (i + 1 < jumpTargets.size()) >+ m_out.print(", "); >+ } >+ m_out.printf(" }\n"); >+} >+ > template<class Block> > void BytecodeDumper<Block>::dumpConstants() > { >@@ -225,6 +244,7 @@ void BytecodeDumper<Block>::dumpBlock(Bl > dumper.dumpExceptionHandlers(); > dumper.dumpSwitchJumpTables(); > dumper.dumpStringSwitchJumpTables(); >+ dumper.dumpPreciseJumpTargets(); > > out.printf("\n"); > } >Index: Source/JavaScriptCore/bytecode/BytecodeDumper.h >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeDumper.h (revision 244813) >+++ Source/JavaScriptCore/bytecode/BytecodeDumper.h (working copy) >@@ -82,6 +82,7 @@ private: > void dumpExceptionHandlers(); > void dumpSwitchJumpTables(); > void dumpStringSwitchJumpTables(); >+ void dumpPreciseJumpTargets(); > > void dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&); > >Index: Source/JavaScriptCore/bytecode/BytecodeList.rb >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeList.rb (revision 244813) >+++ Source/JavaScriptCore/bytecode/BytecodeList.rb (working copy) >@@ -60,6 +60,7 @@ types [ > :ArrayProfile, > :ArrayAllocationProfile, > :ObjectAllocationProfile, >+ :TraceProfile, > ] > > namespace :Special do >@@ -660,6 +661,14 @@ op_group :BinaryJmp, > > op :loop_hint > >+op :trace_hint, >+ metadata: { >+ entrypoint: uintptr_t, >+ traceProfile: TraceProfile, >+ count: int, >+ shouldCompile: bool, >+ } >+ > op_group :SwitchValue, > [ > :switch_imm, >Index: Source/JavaScriptCore/bytecode/BytecodeUseDef.h >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeUseDef.h (revision 244813) >+++ Source/JavaScriptCore/bytecode/BytecodeUseDef.h (working copy) >@@ -76,6 +76,7 @@ void computeUsesForBytecodeOffset(Block* > case op_debug: > case op_jneq_ptr: > case op_loop_hint: >+ case op_trace_hint: > case op_jmp: > case op_new_object: > case op_enter: >@@ -321,6 +322,7 @@ void computeDefsForBytecodeOffset(Block* > case op_jbelow: > case op_jbeloweq: > case op_loop_hint: >+ case op_trace_hint: > case op_switch_imm: > case op_switch_char: > case op_switch_string: >Index: Source/JavaScriptCore/bytecode/CodeBlock.cpp >=================================================================== >--- Source/JavaScriptCore/bytecode/CodeBlock.cpp (revision 244813) >+++ Source/JavaScriptCore/bytecode/CodeBlock.cpp (working copy) >@@ -190,7 +190,7 @@ void CodeBlock::dumpAssumingJITType(Prin > > if (codeType() == FunctionCode) > out.print(specializationKind()); >- out.print(", ", instructionsSize()); >+ out.print(", ", bytecodeCost()); > if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined) > out.print(" (ShouldAlwaysBeInlined)"); > if (ownerExecutable()->neverInline()) >@@ -521,6 +521,19 @@ bool CodeBlock::finishCreation(VM& vm, S > break; \ > } > >+ TraceProfile* lastTraceProfile = nullptr; >+ OpTraceHint::Metadata* lastTraceHintMetadata = nullptr; >+ >+ auto setTraceCount = [&] { >+ double threshold = lastTraceProfile->end - lastTraceProfile->start; >+ threshold *= Options::traceThresholdMultiplier(); >+ int32_t count = threshold; >+ count = std::max(Options::minimumTraceThreshold(), count); >+ count = std::min(Options::maximumTraceThreshold(), count); >+ count = -count; >+ lastTraceHintMetadata->m_count = count; >+ }; >+ > const InstructionStream& instructionStream = instructions(); > for (const auto& instruction : instructionStream) { > OpcodeID opcodeID = instruction->opcodeID(); >@@ -770,12 +783,33 @@ bool CodeBlock::finishCreation(VM& vm, S > m_numberOfArgumentsToSkip = numberOfArgumentsToSkip; > break; > } >+ >+ case op_trace_hint: { >+ INITIALIZE_METADATA(OpTraceHint) >+ unsigned offset = instruction.offset(); >+ if (lastTraceProfile) { >+ lastTraceProfile->end = offset; >+ setTraceCount(); >+ } >+ >+ metadata.m_traceProfile.start = offset; >+ lastTraceProfile = &metadata.m_traceProfile; >+ lastTraceHintMetadata = &metadata; >+ >+ metadata.m_entrypoint = 0; >+ break; >+ } > > default: > break; > } > } > >+ if (lastTraceProfile) { >+ lastTraceProfile->end = instructionsSize(); >+ setTraceCount(); >+ } >+ > #undef CASE > #undef INITIALIZE_METADATA > #undef LINK_FIELD >@@ -1068,7 +1102,8 @@ void CodeBlock::propagateTransitions(con > > VM& vm = *m_vm; > >- if (jitType() == JITType::InterpreterThunk) { >+ //if (jitType() == JITType::InterpreterThunk) { >+ if (JITCode::couldBeInterpreted(jitType())) { > const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); > const InstructionStream& instructionStream = instructions(); > for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { >@@ -1525,6 +1560,9 @@ void CodeBlock::setCalleeSaveRegisters(s > ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList); > } > >+/* >+ OOPS: Make this temporary per compile until we actually >+ get executable code! > void CodeBlock::resetJITData() > { > RELEASE_ASSERT(!JITCode::isJIT(jitType())); >@@ -1543,6 +1581,7 @@ void CodeBlock::resetJITData() > jitData->m_rareCaseProfiles.clear(); > } > } >+*/ > #endif > > void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor) >Index: Source/JavaScriptCore/bytecode/CodeBlock.h >=================================================================== >--- Source/JavaScriptCore/bytecode/CodeBlock.h (revision 244813) >+++ Source/JavaScriptCore/bytecode/CodeBlock.h (working copy) >@@ -309,16 +309,16 @@ public: > // looking for a CallLinkInfoMap to amortize the cost of calling this. > CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex); > >- void setJITCodeMap(JITCodeMap&& jitCodeMap) >- { >- ConcurrentJSLocker locker(m_lock); >- ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap); >- } >- const JITCodeMap& jitCodeMap() >- { >- ConcurrentJSLocker locker(m_lock); >- return ensureJITData(locker).m_jitCodeMap; >- } >+ //void setJITCodeMap(JITCodeMap&& jitCodeMap) >+ //{ >+ // ConcurrentJSLocker locker(m_lock); >+ // ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap); >+ //} >+ //const JITCodeMap& jitCodeMap() >+ //{ >+ // ConcurrentJSLocker locker(m_lock); >+ // return ensureJITData(locker).m_jitCodeMap; >+ //} > > void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&); > Optional<CodeOrigin> findPC(void* pc); >@@ -348,12 +348,6 @@ public: > return value >= Options::couldTakeSlowCaseMinimumCount(); > } > >- // We call this when we want to reattempt compiling something with the baseline JIT. Ideally >- // the baseline JIT would not add data to CodeBlock, but instead it would put its data into >- // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we >- // would be able to get rid of this silly function. >- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061 >- void resetJITData(); > #endif // ENABLE(JIT) > > void unlinkIncomingCalls(); >@@ -884,6 +878,10 @@ public: > return m_unlinkedCode->metadataSizeInBytes(); > } > >+ MetadataTable* metadataTable() { return m_metadata.get(); } >+ >+ const void* instructionsRawPointer() { return m_instructionsRawPointer; } >+ > protected: > void finalizeLLIntInlineCaches(); > #if ENABLE(JIT) >Index: Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp (revision 244813) >+++ Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp (working copy) >@@ -1369,6 +1369,9 @@ void BytecodeGenerator::emitLabel(Label& > > m_codeBlock->addJumpTarget(newLabelIndex); > >+ //if (m_lastInstruction->opcodeID() != op_trace_hint) >+ // OpTraceHint::emit(this); >+ > // This disables peephole optimizations when an instruction is a jump target > m_lastOpcodeID = op_end; > } >@@ -1384,6 +1387,8 @@ void BytecodeGenerator::emitEnter() > // This disables peephole optimizations when an instruction is a jump target > m_lastOpcodeID = op_end; > } >+ >+ OpTraceHint::emit(this); > } > > void BytecodeGenerator::emitLoopHint() >@@ -1850,6 +1855,8 @@ void BytecodeGenerator::emitProfileType( > > void BytecodeGenerator::emitProfileControlFlow(int textOffset) > { >+ OpTraceHint::emit(this); >+ > if (vm()->controlFlowProfiler()) { > RELEASE_ASSERT(textOffset >= 0); > >Index: Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp >=================================================================== >--- Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp (revision 244813) >+++ Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp (working copy) >@@ -3356,7 +3356,7 @@ void ReturnNode::emitBytecode(BytecodeGe > generator.emitProfileControlFlow(endOffset()); > // Emitting an unreachable return here is needed in case this op_profile_control_flow is the > // last opcode in a CodeBlock because a CodeBlock's instructions must end with a terminal opcode. >- if (generator.vm()->controlFlowProfiler()) >+ if (generator.vm()->controlFlowProfiler() || generator.lastOpcodeID() == op_trace_hint) > generator.emitReturn(generator.emitLoad(nullptr, jsUndefined())); > } > >Index: Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp (revision 244813) >+++ Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp (working copy) >@@ -6554,6 +6554,19 @@ void ByteCodeParser::parseBlock(unsigned > NEXT_OPCODE(op_put_to_scope); > } > >+ case op_trace_hint: { >+ addToGraph(Check); // We add a nop here so that basic block linking doesn't break. >+ >+ //static double compiled; >+ //static double total; >+ //++total; >+ //auto bytecode = currentInstruction->as<OpTraceHint>(); >+ //if (bytecode.metadata(codeBlock).m_entrypoint) >+ // ++compiled; >+ //dataLogLn("percent compiled: ", compiled / total * 100); >+ NEXT_OPCODE(op_trace_hint); >+ } >+ > case op_loop_hint: { > // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG > // OSR can only happen at basic block boundaries. Assert that these two statements >Index: Source/JavaScriptCore/dfg/DFGCapabilities.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGCapabilities.cpp (revision 244813) >+++ Source/JavaScriptCore/dfg/DFGCapabilities.cpp (working copy) >@@ -202,6 +202,7 @@ CapabilityLevel capabilityLevel(OpcodeID > case op_jbelow: > case op_jbeloweq: > case op_loop_hint: >+ case op_trace_hint: > case op_check_traps: > case op_nop: > case op_ret: >Index: Source/JavaScriptCore/dfg/DFGOSREntry.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSREntry.cpp (revision 244813) >+++ Source/JavaScriptCore/dfg/DFGOSREntry.cpp (working copy) >@@ -97,7 +97,6 @@ void* prepareOSREntry(ExecState* exec, C > ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType())); > ASSERT(codeBlock->alternative()); > ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT); >- ASSERT(!codeBlock->jitCodeMap()); > > if (!Options::useOSREntryToDFG()) > return nullptr; >Index: Source/JavaScriptCore/dfg/DFGOSRExit.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExit.cpp (revision 244813) >+++ Source/JavaScriptCore/dfg/DFGOSRExit.cpp (working copy) >@@ -371,12 +371,12 @@ void OSRExit::executeOSRExit(Context& co > // results will be cached in the OSRExitState record for use of the rest of the > // exit ramp code. > >- // Ensure we have baseline codeBlocks to OSR exit to. >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >- > CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative(); > ASSERT(baselineCodeBlock->jitType() == JITType::BaselineJIT); > >+ // Ensure we have baseline codeBlocks to OSR exit to. >+ prepareCodeOriginForOSRExit(exec, baselineCodeBlock, exit.m_codeOrigin); >+ > SpeculationRecovery* recovery = nullptr; > if (exit.m_recoveryIndex != UINT_MAX) { > recovery = &dfgJITCode->speculationRecovery[exit.m_recoveryIndex]; >@@ -405,11 +405,10 @@ void OSRExit::executeOSRExit(Context& co > adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold); > > CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock); >- const JITCodeMap& codeMap = codeBlockForExit->jitCodeMap(); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(exit.m_codeOrigin.bytecodeIndex()); >- ASSERT(codeLocation); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = static_cast<TraceletJITCode*>(codeBlockForExit->jitCode().get())->findCodeLocation(exit.m_codeOrigin.bytecodeIndex()); >+ ASSERT(!!codePtr); > >- void* jumpTarget = codeLocation.executableAddress(); >+ void* jumpTarget = codePtr.executableAddress(); > > // Compute the value recoveries. > Operands<ValueRecovery> operands; >@@ -1047,7 +1046,7 @@ void JIT_OPERATION OSRExit::compileOSREx > ASSERT(!vm->callFrameForCatch || exit.m_kind == GenericUnwind); > EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler()); > >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >+ prepareCodeOriginForOSRExit(exec, codeBlock->baselineAlternative(), exit.m_codeOrigin); > > // Compute the value recoveries. > Operands<ValueRecovery> operands; >Index: Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp (revision 244813) >+++ Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp (working copy) >@@ -311,10 +311,11 @@ void adjustAndJumpToTarget(VM& vm, CCall > CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin); > ASSERT(codeBlockForExit == codeBlockForExit->baselineVersion()); > ASSERT(codeBlockForExit->jitType() == JITType::BaselineJIT); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeBlockForExit->jitCodeMap().find(exit.m_codeOrigin.bytecodeIndex()); >- ASSERT(codeLocation); >+ RELEASE_ASSERT(codeBlockForExit->jitCode()->isTraceletJITCode()); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = static_cast<TraceletJITCode*>(codeBlockForExit->jitCode().get())->findCodeLocation(exit.m_codeOrigin.bytecodeIndex()); >+ ASSERT(!!codePtr); > >- void* jumpTarget = codeLocation.retagged<OSRExitPtrTag>().executableAddress(); >+ void* jumpTarget = codePtr.retagged<OSRExitPtrTag>().executableAddress(); > jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister); > if (exit.isExceptionHandler()) { > // Since we're jumping to op_catch, we need to set callFrameForCatch. >Index: Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp (revision 244813) >+++ Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp (working copy) >@@ -36,15 +36,19 @@ > > namespace JSC { namespace DFG { > >-void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin) >+void prepareCodeOriginForOSRExit(ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin) > { > VM& vm = exec->vm(); > DeferGC deferGC(vm.heap); >- >+ >+ RELEASE_ASSERT(codeBlock->baselineAlternative() == codeBlock); >+ > for (; codeOrigin.inlineCallFrame(); codeOrigin = codeOrigin.inlineCallFrame()->directCaller) { > CodeBlock* codeBlock = codeOrigin.inlineCallFrame()->baselineCodeBlock.get(); >- JITWorklist::ensureGlobalWorklist().compileNow(codeBlock); >+ JITWorklist::ensureGlobalWorklist().compileNow(codeBlock, codeOrigin.bytecodeIndex()); > } >+ >+ JITWorklist::ensureGlobalWorklist().compileNow(codeBlock, codeOrigin.bytecodeIndex()); > } > > } } // namespace JSC::DFG >Index: Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h (revision 244813) >+++ Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h (working copy) >@@ -41,7 +41,7 @@ namespace JSC { namespace DFG { > // probably it's a good sign that the thing we're exiting into is hot. Even more > // interestingly, since the code was inlined, it may never otherwise get JIT > // compiled since the act of inlining it may ensure that it otherwise never runs. >-void prepareCodeOriginForOSRExit(ExecState*, CodeOrigin); >+void prepareCodeOriginForOSRExit(ExecState*, CodeBlock*, CodeOrigin); > > } } // namespace JSC::DFG > >Index: Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp (revision 244813) >+++ Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp (working copy) >@@ -532,7 +532,7 @@ extern "C" void* compileFTLOSRExit(ExecS > } > } > >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >+ prepareCodeOriginForOSRExit(exec, codeBlock->baselineAlternative(), exit.m_codeOrigin); > > compileStub(exitID, jitCode, exit, &vm, codeBlock); > >Index: Source/JavaScriptCore/heap/Heap.cpp >=================================================================== >--- Source/JavaScriptCore/heap/Heap.cpp (revision 244813) >+++ Source/JavaScriptCore/heap/Heap.cpp (working copy) >@@ -58,6 +58,7 @@ > #include "MarkStackMergingConstraint.h" > #include "MarkedSpaceInlines.h" > #include "MarkingConstraintSet.h" >+#include "OpcodeInlines.h" > #include "PreventCollectionScope.h" > #include "SamplingProfiler.h" > #include "ShadowChicken.h" >@@ -85,6 +86,7 @@ > #include <wtf/SimpleStats.h> > #include <wtf/Threading.h> > >+ > #if PLATFORM(IOS_FAMILY) > #include <bmalloc/bmalloc.h> > #endif >@@ -2993,4 +2995,33 @@ void Heap::runTaskInParallel(RefPtr<Shar > } > } > >+void Heap::dumpAllCodeBlockTraces() >+{ >+ HeapIterationScope iterationScope(*this); >+ double total = 0.0; >+ double compiled = 0.0; >+ m_objectSpace.forEachLiveCell(iterationScope, [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus { >+ if (!isJSCellKind(kind)) >+ return IterationStatus::Continue; >+ >+ if (CodeBlock* codeBlock = jsDynamicCast<CodeBlock*>(*m_vm, static_cast<JSCell*>(cell))) { >+ const InstructionStream& instructionStream = codeBlock->instructions(); >+ for (const auto& instruction : instructionStream) { >+ OpcodeID opcodeID = instruction->opcodeID(); >+ if (opcodeID != op_trace_hint) >+ continue; >+ >+ ++total; >+ auto bytecode = instruction->as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(codeBlock); >+ if (metadata.m_entrypoint) >+ ++compiled; >+ } >+ } >+ >+ return IterationStatus::Continue; >+ }); >+ dataLogLn("total compiled: ", compiled/total); >+} >+ > } // namespace JSC >Index: Source/JavaScriptCore/heap/Heap.h >=================================================================== >--- Source/JavaScriptCore/heap/Heap.h (revision 244813) >+++ Source/JavaScriptCore/heap/Heap.h (working copy) >@@ -112,6 +112,9 @@ class HeapUtil; > class Heap { > WTF_MAKE_NONCOPYABLE(Heap); > public: >+ >+ void dumpAllCodeBlockTraces(); >+ > friend class JIT; > friend class DFG::SpeculativeJIT; > static Heap* heap(const JSValue); // 0 for immediate values >Index: Source/JavaScriptCore/jit/AssemblyHelpers.h >=================================================================== >--- Source/JavaScriptCore/jit/AssemblyHelpers.h (revision 244813) >+++ Source/JavaScriptCore/jit/AssemblyHelpers.h (working copy) >@@ -52,7 +52,7 @@ class AssemblyHelpers : public MacroAsse > public: > AssemblyHelpers(CodeBlock* codeBlock) > : m_codeBlock(codeBlock) >- , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0) >+ , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : nullptr) > { > if (m_codeBlock) { > ASSERT(m_baselineCodeBlock); >Index: Source/JavaScriptCore/jit/ExecutableAllocator.cpp >=================================================================== >--- Source/JavaScriptCore/jit/ExecutableAllocator.cpp (revision 244813) >+++ Source/JavaScriptCore/jit/ExecutableAllocator.cpp (working copy) >@@ -159,9 +159,10 @@ public: > return; > > size_t reservationSize; >- if (Options::jitMemoryReservationSize()) >- reservationSize = Options::jitMemoryReservationSize(); >- else >+ // OOPS! >+ //if (Options::jitMemoryReservationSize()) >+ // reservationSize = Options::jitMemoryReservationSize(); >+ //else > reservationSize = fixedExecutableMemoryPoolSize; > reservationSize = std::max(roundUpToMultipleOf(pageSize(), reservationSize), pageSize() * 2); > >Index: Source/JavaScriptCore/jit/JIT.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JIT.cpp (revision 244813) >+++ Source/JavaScriptCore/jit/JIT.cpp (working copy) >@@ -41,6 +41,7 @@ > #include "JSCInlines.h" > #include "JSFunction.h" > #include "LinkBuffer.h" >+#include "LLIntData.h" > #include "MaxFrameExtentForSlowPathCall.h" > #include "ModuleProgramCodeBlock.h" > #include "PCToCodeOriginMap.h" >@@ -56,11 +57,15 @@ > #include <wtf/GraphNodeWorklist.h> > #include <wtf/SimpleStats.h> > >+#include "MacroAssemblerPrinter.h" >+ > namespace JSC { > namespace JITInternal { > static constexpr const bool verbose = false; > } > >+static constexpr bool verboseProbes = false; >+ > Seconds totalBaselineCompileTime; > Seconds totalDFGCompileTime; > Seconds totalFTLCompileTime; >@@ -77,7 +82,6 @@ void ctiPatchCallByReturnAddress(ReturnA > JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) > : JSInterfaceJIT(vm, codeBlock) > , m_interpreter(vm->interpreter) >- , m_labels(codeBlock ? codeBlock->instructions().size() : 0) > , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) > , m_pcToCodeOriginMapBuilder(*vm) > , m_canBeOptimized(false) >@@ -85,6 +89,9 @@ JIT::JIT(VM* vm, CodeBlock* codeBlock, u > , m_shouldUseIndexMasking(Options::enableSpectreMitigations()) > , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset) > { >+ RefPtr<JITCode> jitCode = codeBlock->jitCode(); >+ if (jitCode && jitCode->isTraceletJITCode()) >+ m_priorCode = static_cast<TraceletJITCode*>(jitCode.get()); > } > > JIT::~JIT() >@@ -187,13 +194,16 @@ void JIT::privateCompileMainPass() > jitAssertArgumentCountSane(); > > auto& instructions = m_codeBlock->instructions(); >- unsigned instructionCount = m_codeBlock->instructions().size(); > > m_callLinkInfoIndex = 0; > >- VM& vm = *m_codeBlock->vm(); >+ // OOPS: Don't keep recompiling the same traces! >+ //VM& vm = *m_codeBlock->vm(); > unsigned startBytecodeOffset = 0; >- if (m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) { >+ >+ // OOPS: probs not needed anymore. >+ /* >+ if (!m_isTracelet && m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) { > // We can only do this optimization because we execute ProgramCodeBlock's exactly once. > // This optimization would be invalid otherwise. When the LLInt determines it wants to > // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it >@@ -232,230 +242,245 @@ void JIT::privateCompileMainPass() > } > } > } >+ */ > >- for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { >- if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) { >- // We've proven all bytecode instructions up until here are unreachable. >- // Let's ensure that by crashing if it's ever hit. >- breakpoint(); >- } >+ for (const TraceProfile& trace : m_traces) { >+ if (verboseProbes) >+ dataLogLn("Compiling trace: [", trace.start, ", ", trace.end, ")"); >+ >+ for (m_bytecodeOffset = trace.start; m_bytecodeOffset < trace.end; ) { >+ if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) { >+ // We've proven all bytecode instructions up until here are unreachable. >+ // Let's ensure that by crashing if it's ever hit. >+ breakpoint(); >+ } > >- if (m_disassembler) >- m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); >- const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr(); >- ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); >+ if (m_disassembler) >+ m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); >+ const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr(); >+ ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); > >- m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); >+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); > > #if ENABLE(OPCODE_SAMPLING) >- if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. >- sampleInstruction(currentInstruction); >+ if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. >+ sampleInstruction(currentInstruction); > #endif > >- m_labels[m_bytecodeOffset] = label(); >+ m_labels.add(m_bytecodeOffset, label()); >+ if (m_bytecodeOffset == trace.start && verboseProbes) >+ print("Started running trace in: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), " [", trace.start, ", ", trace.end, ")\n"); > >- if (JITInternal::verbose) >- dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); >+ if (JITInternal::verbose) >+ dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); > >- OpcodeID opcodeID = currentInstruction->opcodeID(); >+ OpcodeID opcodeID = currentInstruction->opcodeID(); > >- if (UNLIKELY(m_compilation)) { >- add64( >- TrustedImm32(1), >- AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( >- m_compilation->bytecodes(), m_bytecodeOffset)))->address())); >- } >- >- if (Options::eagerlyUpdateTopCallFrame()) >- updateTopCallFrame(); >+ if (UNLIKELY(m_compilation)) { >+ add64( >+ TrustedImm32(1), >+ AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( >+ m_compilation->bytecodes(), m_bytecodeOffset)))->address())); >+ } >+ >+ if (Options::eagerlyUpdateTopCallFrame()) >+ updateTopCallFrame(); > >- unsigned bytecodeOffset = m_bytecodeOffset; >+ unsigned bytecodeOffset = m_bytecodeOffset; > #if ENABLE(MASM_PROBE) >- if (UNLIKELY(Options::traceBaselineJITExecution())) { >- CodeBlock* codeBlock = m_codeBlock; >- probe([=] (Probe::Context& ctx) { >- dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); >- }); >- } >+ if (UNLIKELY(Options::traceBaselineJITExecution())) { >+ CodeBlock* codeBlock = m_codeBlock; >+ probe([=] (Probe::Context& ctx) { >+ dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); >+ }); >+ } > #endif > >- switch (opcodeID) { >- DEFINE_SLOW_OP(in_by_val) >- DEFINE_SLOW_OP(less) >- DEFINE_SLOW_OP(lesseq) >- DEFINE_SLOW_OP(greater) >- DEFINE_SLOW_OP(greatereq) >- DEFINE_SLOW_OP(is_function) >- DEFINE_SLOW_OP(is_object_or_null) >- DEFINE_SLOW_OP(typeof) >- DEFINE_SLOW_OP(strcat) >- DEFINE_SLOW_OP(push_with_scope) >- DEFINE_SLOW_OP(create_lexical_environment) >- DEFINE_SLOW_OP(get_by_val_with_this) >- DEFINE_SLOW_OP(put_by_id_with_this) >- DEFINE_SLOW_OP(put_by_val_with_this) >- DEFINE_SLOW_OP(resolve_scope_for_hoisting_func_decl_in_eval) >- DEFINE_SLOW_OP(define_data_property) >- DEFINE_SLOW_OP(define_accessor_property) >- DEFINE_SLOW_OP(unreachable) >- DEFINE_SLOW_OP(throw_static_error) >- DEFINE_SLOW_OP(new_array_with_spread) >- DEFINE_SLOW_OP(new_array_buffer) >- DEFINE_SLOW_OP(spread) >- DEFINE_SLOW_OP(get_enumerable_length) >- DEFINE_SLOW_OP(has_generic_property) >- DEFINE_SLOW_OP(get_property_enumerator) >- DEFINE_SLOW_OP(to_index_string) >- DEFINE_SLOW_OP(create_direct_arguments) >- DEFINE_SLOW_OP(create_scoped_arguments) >- DEFINE_SLOW_OP(create_cloned_arguments) >- DEFINE_SLOW_OP(create_rest) >- DEFINE_SLOW_OP(pow) >- >- DEFINE_OP(op_add) >- DEFINE_OP(op_bitnot) >- DEFINE_OP(op_bitand) >- DEFINE_OP(op_bitor) >- DEFINE_OP(op_bitxor) >- DEFINE_OP(op_call) >- DEFINE_OP(op_tail_call) >- DEFINE_OP(op_call_eval) >- DEFINE_OP(op_call_varargs) >- DEFINE_OP(op_tail_call_varargs) >- DEFINE_OP(op_tail_call_forward_arguments) >- DEFINE_OP(op_construct_varargs) >- DEFINE_OP(op_catch) >- DEFINE_OP(op_construct) >- DEFINE_OP(op_create_this) >- DEFINE_OP(op_to_this) >- DEFINE_OP(op_get_argument) >- DEFINE_OP(op_argument_count) >- DEFINE_OP(op_get_rest_length) >- DEFINE_OP(op_check_tdz) >- DEFINE_OP(op_identity_with_profile) >- DEFINE_OP(op_debug) >- DEFINE_OP(op_del_by_id) >- DEFINE_OP(op_del_by_val) >- DEFINE_OP(op_div) >- DEFINE_OP(op_end) >- DEFINE_OP(op_enter) >- DEFINE_OP(op_get_scope) >- DEFINE_OP(op_eq) >- DEFINE_OP(op_eq_null) >- DEFINE_OP(op_below) >- DEFINE_OP(op_beloweq) >- DEFINE_OP(op_try_get_by_id) >- DEFINE_OP(op_in_by_id) >- DEFINE_OP(op_get_by_id) >- DEFINE_OP(op_get_by_id_with_this) >- DEFINE_OP(op_get_by_id_direct) >- DEFINE_OP(op_get_by_val) >- DEFINE_OP(op_overrides_has_instance) >- DEFINE_OP(op_instanceof) >- DEFINE_OP(op_instanceof_custom) >- DEFINE_OP(op_is_empty) >- DEFINE_OP(op_is_undefined) >- DEFINE_OP(op_is_undefined_or_null) >- DEFINE_OP(op_is_boolean) >- DEFINE_OP(op_is_number) >- DEFINE_OP(op_is_object) >- DEFINE_OP(op_is_cell_with_type) >- DEFINE_OP(op_jeq_null) >- DEFINE_OP(op_jfalse) >- DEFINE_OP(op_jmp) >- DEFINE_OP(op_jneq_null) >- DEFINE_OP(op_jneq_ptr) >- DEFINE_OP(op_jless) >- DEFINE_OP(op_jlesseq) >- DEFINE_OP(op_jgreater) >- DEFINE_OP(op_jgreatereq) >- DEFINE_OP(op_jnless) >- DEFINE_OP(op_jnlesseq) >- DEFINE_OP(op_jngreater) >- DEFINE_OP(op_jngreatereq) >- DEFINE_OP(op_jeq) >- DEFINE_OP(op_jneq) >- DEFINE_OP(op_jstricteq) >- DEFINE_OP(op_jnstricteq) >- DEFINE_OP(op_jbelow) >- DEFINE_OP(op_jbeloweq) >- DEFINE_OP(op_jtrue) >- DEFINE_OP(op_loop_hint) >- DEFINE_OP(op_check_traps) >- DEFINE_OP(op_nop) >- DEFINE_OP(op_super_sampler_begin) >- DEFINE_OP(op_super_sampler_end) >- DEFINE_OP(op_lshift) >- DEFINE_OP(op_mod) >- DEFINE_OP(op_mov) >- DEFINE_OP(op_mul) >- DEFINE_OP(op_negate) >- DEFINE_OP(op_neq) >- DEFINE_OP(op_neq_null) >- DEFINE_OP(op_new_array) >- DEFINE_OP(op_new_array_with_size) >- DEFINE_OP(op_new_func) >- DEFINE_OP(op_new_func_exp) >- DEFINE_OP(op_new_generator_func) >- DEFINE_OP(op_new_generator_func_exp) >- DEFINE_OP(op_new_async_func) >- DEFINE_OP(op_new_async_func_exp) >- DEFINE_OP(op_new_async_generator_func) >- DEFINE_OP(op_new_async_generator_func_exp) >- DEFINE_OP(op_new_object) >- DEFINE_OP(op_new_regexp) >- DEFINE_OP(op_not) >- DEFINE_OP(op_nstricteq) >- DEFINE_OP(op_dec) >- DEFINE_OP(op_inc) >- DEFINE_OP(op_profile_type) >- DEFINE_OP(op_profile_control_flow) >- DEFINE_OP(op_get_parent_scope) >- DEFINE_OP(op_put_by_id) >- DEFINE_OP(op_put_by_val_direct) >- DEFINE_OP(op_put_by_val) >- DEFINE_OP(op_put_getter_by_id) >- DEFINE_OP(op_put_setter_by_id) >- DEFINE_OP(op_put_getter_setter_by_id) >- DEFINE_OP(op_put_getter_by_val) >- DEFINE_OP(op_put_setter_by_val) >- >- DEFINE_OP(op_ret) >- DEFINE_OP(op_rshift) >- DEFINE_OP(op_unsigned) >- DEFINE_OP(op_urshift) >- DEFINE_OP(op_set_function_name) >- DEFINE_OP(op_stricteq) >- DEFINE_OP(op_sub) >- DEFINE_OP(op_switch_char) >- DEFINE_OP(op_switch_imm) >- DEFINE_OP(op_switch_string) >- DEFINE_OP(op_throw) >- DEFINE_OP(op_to_number) >- DEFINE_OP(op_to_string) >- DEFINE_OP(op_to_object) >- DEFINE_OP(op_to_primitive) >- >- DEFINE_OP(op_resolve_scope) >- DEFINE_OP(op_get_from_scope) >- DEFINE_OP(op_put_to_scope) >- DEFINE_OP(op_get_from_arguments) >- DEFINE_OP(op_put_to_arguments) >- >- DEFINE_OP(op_has_structure_property) >- DEFINE_OP(op_has_indexed_property) >- DEFINE_OP(op_get_direct_pname) >- DEFINE_OP(op_enumerator_structure_pname) >- DEFINE_OP(op_enumerator_generic_pname) >- >- DEFINE_OP(op_log_shadow_chicken_prologue) >- DEFINE_OP(op_log_shadow_chicken_tail) >- default: >- RELEASE_ASSERT_NOT_REACHED(); >+ switch (opcodeID) { >+ DEFINE_SLOW_OP(in_by_val) >+ DEFINE_SLOW_OP(less) >+ DEFINE_SLOW_OP(lesseq) >+ DEFINE_SLOW_OP(greater) >+ DEFINE_SLOW_OP(greatereq) >+ DEFINE_SLOW_OP(is_function) >+ DEFINE_SLOW_OP(is_object_or_null) >+ DEFINE_SLOW_OP(typeof) >+ DEFINE_SLOW_OP(strcat) >+ DEFINE_SLOW_OP(push_with_scope) >+ DEFINE_SLOW_OP(create_lexical_environment) >+ DEFINE_SLOW_OP(get_by_val_with_this) >+ DEFINE_SLOW_OP(put_by_id_with_this) >+ DEFINE_SLOW_OP(put_by_val_with_this) >+ DEFINE_SLOW_OP(resolve_scope_for_hoisting_func_decl_in_eval) >+ DEFINE_SLOW_OP(define_data_property) >+ DEFINE_SLOW_OP(define_accessor_property) >+ DEFINE_SLOW_OP(unreachable) >+ DEFINE_SLOW_OP(throw_static_error) >+ DEFINE_SLOW_OP(new_array_with_spread) >+ DEFINE_SLOW_OP(new_array_buffer) >+ DEFINE_SLOW_OP(spread) >+ DEFINE_SLOW_OP(get_enumerable_length) >+ DEFINE_SLOW_OP(has_generic_property) >+ DEFINE_SLOW_OP(get_property_enumerator) >+ DEFINE_SLOW_OP(to_index_string) >+ DEFINE_SLOW_OP(create_direct_arguments) >+ DEFINE_SLOW_OP(create_scoped_arguments) >+ DEFINE_SLOW_OP(create_cloned_arguments) >+ DEFINE_SLOW_OP(create_rest) >+ DEFINE_SLOW_OP(pow) >+ >+ DEFINE_OP(op_add) >+ DEFINE_OP(op_bitnot) >+ DEFINE_OP(op_bitand) >+ DEFINE_OP(op_bitor) >+ DEFINE_OP(op_bitxor) >+ DEFINE_OP(op_call) >+ DEFINE_OP(op_tail_call) >+ DEFINE_OP(op_call_eval) >+ DEFINE_OP(op_call_varargs) >+ DEFINE_OP(op_tail_call_varargs) >+ DEFINE_OP(op_tail_call_forward_arguments) >+ DEFINE_OP(op_construct_varargs) >+ DEFINE_OP(op_catch) >+ DEFINE_OP(op_construct) >+ DEFINE_OP(op_create_this) >+ DEFINE_OP(op_to_this) >+ DEFINE_OP(op_get_argument) >+ DEFINE_OP(op_argument_count) >+ DEFINE_OP(op_get_rest_length) >+ DEFINE_OP(op_check_tdz) >+ DEFINE_OP(op_identity_with_profile) >+ DEFINE_OP(op_debug) >+ DEFINE_OP(op_del_by_id) >+ DEFINE_OP(op_del_by_val) >+ DEFINE_OP(op_div) >+ DEFINE_OP(op_end) >+ DEFINE_OP(op_enter) >+ DEFINE_OP(op_get_scope) >+ DEFINE_OP(op_eq) >+ DEFINE_OP(op_eq_null) >+ DEFINE_OP(op_below) >+ DEFINE_OP(op_beloweq) >+ DEFINE_OP(op_try_get_by_id) >+ DEFINE_OP(op_in_by_id) >+ DEFINE_OP(op_get_by_id) >+ DEFINE_OP(op_get_by_id_with_this) >+ DEFINE_OP(op_get_by_id_direct) >+ DEFINE_OP(op_get_by_val) >+ DEFINE_OP(op_overrides_has_instance) >+ DEFINE_OP(op_instanceof) >+ DEFINE_OP(op_instanceof_custom) >+ DEFINE_OP(op_is_empty) >+ DEFINE_OP(op_is_undefined) >+ DEFINE_OP(op_is_undefined_or_null) >+ DEFINE_OP(op_is_boolean) >+ DEFINE_OP(op_is_number) >+ DEFINE_OP(op_is_object) >+ DEFINE_OP(op_is_cell_with_type) >+ DEFINE_OP(op_jeq_null) >+ DEFINE_OP(op_jfalse) >+ DEFINE_OP(op_jmp) >+ DEFINE_OP(op_jneq_null) >+ DEFINE_OP(op_jneq_ptr) >+ DEFINE_OP(op_jless) >+ DEFINE_OP(op_jlesseq) >+ DEFINE_OP(op_jgreater) >+ DEFINE_OP(op_jgreatereq) >+ DEFINE_OP(op_jnless) >+ DEFINE_OP(op_jnlesseq) >+ DEFINE_OP(op_jngreater) >+ DEFINE_OP(op_jngreatereq) >+ DEFINE_OP(op_jeq) >+ DEFINE_OP(op_jneq) >+ DEFINE_OP(op_jstricteq) >+ DEFINE_OP(op_jnstricteq) >+ DEFINE_OP(op_jbelow) >+ DEFINE_OP(op_jbeloweq) >+ DEFINE_OP(op_jtrue) >+ DEFINE_OP(op_loop_hint) >+ DEFINE_OP(op_trace_hint) >+ DEFINE_OP(op_check_traps) >+ DEFINE_OP(op_nop) >+ DEFINE_OP(op_super_sampler_begin) >+ DEFINE_OP(op_super_sampler_end) >+ DEFINE_OP(op_lshift) >+ DEFINE_OP(op_mod) >+ DEFINE_OP(op_mov) >+ DEFINE_OP(op_mul) >+ DEFINE_OP(op_negate) >+ DEFINE_OP(op_neq) >+ DEFINE_OP(op_neq_null) >+ DEFINE_OP(op_new_array) >+ DEFINE_OP(op_new_array_with_size) >+ DEFINE_OP(op_new_func) >+ DEFINE_OP(op_new_func_exp) >+ DEFINE_OP(op_new_generator_func) >+ DEFINE_OP(op_new_generator_func_exp) >+ DEFINE_OP(op_new_async_func) >+ DEFINE_OP(op_new_async_func_exp) >+ DEFINE_OP(op_new_async_generator_func) >+ DEFINE_OP(op_new_async_generator_func_exp) >+ DEFINE_OP(op_new_object) >+ DEFINE_OP(op_new_regexp) >+ DEFINE_OP(op_not) >+ DEFINE_OP(op_nstricteq) >+ DEFINE_OP(op_dec) >+ DEFINE_OP(op_inc) >+ DEFINE_OP(op_profile_type) >+ DEFINE_OP(op_profile_control_flow) >+ DEFINE_OP(op_get_parent_scope) >+ DEFINE_OP(op_put_by_id) >+ DEFINE_OP(op_put_by_val_direct) >+ DEFINE_OP(op_put_by_val) >+ DEFINE_OP(op_put_getter_by_id) >+ DEFINE_OP(op_put_setter_by_id) >+ DEFINE_OP(op_put_getter_setter_by_id) >+ DEFINE_OP(op_put_getter_by_val) >+ DEFINE_OP(op_put_setter_by_val) >+ >+ DEFINE_OP(op_ret) >+ DEFINE_OP(op_rshift) >+ DEFINE_OP(op_unsigned) >+ DEFINE_OP(op_urshift) >+ DEFINE_OP(op_set_function_name) >+ DEFINE_OP(op_stricteq) >+ DEFINE_OP(op_sub) >+ DEFINE_OP(op_switch_char) >+ DEFINE_OP(op_switch_imm) >+ DEFINE_OP(op_switch_string) >+ DEFINE_OP(op_throw) >+ DEFINE_OP(op_to_number) >+ DEFINE_OP(op_to_string) >+ DEFINE_OP(op_to_object) >+ DEFINE_OP(op_to_primitive) >+ >+ DEFINE_OP(op_resolve_scope) >+ DEFINE_OP(op_get_from_scope) >+ DEFINE_OP(op_put_to_scope) >+ DEFINE_OP(op_get_from_arguments) >+ DEFINE_OP(op_put_to_arguments) >+ >+ DEFINE_OP(op_has_structure_property) >+ DEFINE_OP(op_has_indexed_property) >+ DEFINE_OP(op_get_direct_pname) >+ DEFINE_OP(op_enumerator_structure_pname) >+ DEFINE_OP(op_enumerator_generic_pname) >+ >+ DEFINE_OP(op_log_shadow_chicken_prologue) >+ DEFINE_OP(op_log_shadow_chicken_tail) >+ default: >+ RELEASE_ASSERT_NOT_REACHED(); >+ } >+ >+ if (JITInternal::verbose) >+ dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); > } > >- if (JITInternal::verbose) >- dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); >+ if (m_isTracelet) { >+ if (verboseProbes) >+ dataLogLn("end of trace going to: ", trace.end); >+ m_jmpTable.append(JumpTable(jump(), trace.end)); >+ } > } > > RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); >@@ -468,9 +493,158 @@ void JIT::privateCompileMainPass() > > void JIT::privateCompileLinkPass() > { >- unsigned jmpTableCount = m_jmpTable.size(); >- for (unsigned i = 0; i < jmpTableCount; ++i) >- m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this); >+ // OOPS: concurrency issue here when we look it up in JIT code! >+ >+ auto emitJumpToLLInt = [&] (unsigned bytecodeOffset) { >+ if (verboseProbes) >+ dataLogLn("compiling jump to LLInt bc#", bytecodeOffset); >+ const Instruction& currentInstruction = *m_codeBlock->instructions().at(bytecodeOffset).ptr(); >+ MacroAssemblerCodePtr<JSEntryPtrTag> destination; >+ if (currentInstruction.isWide()) >+ destination = LLInt::getWideCodePtr<JSEntryPtrTag>(currentInstruction.opcodeID()); >+ else >+ destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction.opcodeID()); >+ >+ auto start = label(); >+ // OOPS: Abstract LLInt registers since this is x86 specific! >+ move(TrustedImmPtr(m_codeBlock->metadataTable()), GPRInfo::regCS1); >+ move(TrustedImmPtr(m_codeBlock->instructionsRawPointer()), GPRInfo::regCS2); >+ move(TrustedImm32(bytecodeOffset), GPRInfo::regT4); >+ if (verboseProbes) >+ print("Exiting trace to LLInt: bc#", bytecodeOffset, " to: ", RawPointer(destination.executableAddress()), " is wide: ", currentInstruction.isWide(), " in codeblock: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), "\n"); >+ move(TrustedImmPtr(destination.executableAddress()), GPRInfo::regT0); >+ jump(GPRInfo::regT0, JSEntryPtrTag); >+ auto end = label(); >+ >+ m_locationsOfJumpToLLInt.add(bytecodeOffset, Vector<std::pair<Label, Label>>()).iterator->value.append({ start, end }); >+ }; >+ >+ auto getLocalBytecode = [&] (unsigned bytecodeOffset) -> Optional<Label> { >+ auto iter = m_labels.find(bytecodeOffset); >+ if (iter != m_labels.end()) >+ return iter->value; >+ return WTF::nullopt; >+ }; >+ >+ auto getPriorBytecode = [&] (unsigned bytecodeOffset) -> Optional<CodeLocationLabel<JITTraceletPtrTag>> { >+ if (!m_priorCode) >+ return WTF::nullopt; >+ >+ // OOPS: Same concurrency dependency as described above. Should we allow for it? >+ auto iter = m_priorCode->m_codeLocations.find(bytecodeOffset); >+ if (iter != m_priorCode->m_codeLocations.end()) >+ return CodeLocationLabel<JITTraceletPtrTag>(iter->value); >+ >+ return WTF::nullopt; >+ }; >+ >+ for (const JumpTable& entry : m_jmpTable) { >+ unsigned bytecodeOffset = entry.toBytecodeOffset; >+ if (verboseProbes) >+ dataLogLn("Have jump table entry to: bc#", bytecodeOffset); >+ >+ if (auto label = getLocalBytecode(bytecodeOffset)) { >+ entry.from.linkTo(*label, this); >+ continue; >+ } >+ >+ if (auto priorCode = getPriorBytecode(bytecodeOffset)) { >+ Jump from = entry.from; >+ addLinkTask([=] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(from, *priorCode); >+ }); >+ continue; >+ } >+ >+ RELEASE_ASSERT(m_isTracelet); >+ entry.from.linkTo(label(), this); >+ >+ if (bytecodeOffset >= m_codeBlock->instructionsSize()) { >+ if (verboseProbes) >+ dataLogLn("Have jump table entry exceeding instructionsSize() bc#", bytecodeOffset); >+ // This is the ending trace. We should never get here in bytecode, e.g, >+ // we should have returned, jumped, or done something to terminate execution >+ // of this code. >+ breakpoint(); >+ continue; >+ } >+ >+ emitJumpToLLInt(bytecodeOffset); >+ } >+ >+ // Translate vPC offsets into addresses in JIT generated code, for switch tables. >+ for (auto& record : m_switches) { >+ Vector<Label> jumpDestinations; >+ auto appendDestination = [&] (CodeLocationLabel<JSSwitchPtrTag>& ctiOffset, unsigned bytecodeOffset) { >+ if (auto label = getLocalBytecode(bytecodeOffset)) { >+ jumpDestinations.append(*label); >+ return; >+ } >+ >+ if (auto priorCode = getPriorBytecode(bytecodeOffset)) { >+ jumpDestinations.append(Label()); >+ ctiOffset = priorCode->retagged<JSSwitchPtrTag>(); >+ return; >+ } >+ >+ jumpDestinations.append(label()); >+ emitJumpToLLInt(bytecodeOffset); >+ }; >+ >+ unsigned bytecodeOffset = record.bytecodeOffset; >+ >+ if (record.type != SwitchRecord::String) { >+ ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); >+ ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); >+ >+ auto* simpleJumpTable = record.jumpTable.simpleJumpTable; >+ appendDestination(simpleJumpTable->ctiDefault, bytecodeOffset + record.defaultOffset); // First is the 'default' case. >+ >+ for (unsigned i = 0; i < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++i) { >+ unsigned offset = simpleJumpTable->branchOffsets[i]; >+ if (offset) >+ appendDestination(simpleJumpTable->ctiOffsets[i], bytecodeOffset + offset); >+ else >+ jumpDestinations.append(jumpDestinations[0]); >+ } >+ >+ addLinkTask([=, jumpDestinations = WTFMove(jumpDestinations)] (LinkBuffer& linkBuffer) { >+ if (jumpDestinations[0].isSet()) >+ simpleJumpTable->ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[0]); >+ >+ for (unsigned i = 0; i < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++i) { >+ if (jumpDestinations[i + 1].isSet()) >+ simpleJumpTable->ctiOffsets[i] = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[i + 1]); >+ } >+ }); >+ } else { >+ ASSERT(record.type == SwitchRecord::String); >+ >+ auto* stringJumpTable = record.jumpTable.stringJumpTable; >+ >+ appendDestination(stringJumpTable->ctiDefault, bytecodeOffset + record.defaultOffset); // First is the 'default' case. >+ >+ for (auto& location : stringJumpTable->offsetTable.values()) { >+ unsigned offset = location.branchOffset; >+ if (offset) >+ appendDestination(location.ctiOffset, bytecodeOffset + offset); >+ else >+ jumpDestinations.append(jumpDestinations[0]); >+ } >+ >+ addLinkTask([=, jumpDestinations = WTFMove(jumpDestinations)] (LinkBuffer& linkBuffer) { >+ if (jumpDestinations[0].isSet()) >+ stringJumpTable->ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[0]); >+ unsigned i = 1; >+ for (auto& location : stringJumpTable->offsetTable.values()) { >+ if (jumpDestinations[i].isSet()) >+ location.ctiOffset = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[i]); >+ ++i; >+ } >+ }); >+ } >+ } >+ > m_jmpTable.clear(); > } > >@@ -545,6 +719,7 @@ void JIT::privateCompileSlowCases() > DEFINE_SLOWCASE_OP(op_jneq) > DEFINE_SLOWCASE_OP(op_jstricteq) > DEFINE_SLOWCASE_OP(op_jnstricteq) >+ DEFINE_SLOWCASE_OP(op_trace_hint) > DEFINE_SLOWCASE_OP(op_loop_hint) > DEFINE_SLOWCASE_OP(op_check_traps) > DEFINE_SLOWCASE_OP(op_mod) >@@ -653,6 +828,7 @@ void JIT::compileWithoutLinking(JITCompi > > if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) > m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock); >+ > if (UNLIKELY(m_vm->m_perBytecodeProfiler)) { > m_compilation = adoptRef( > new Profiler::Compilation( >@@ -660,7 +836,83 @@ void JIT::compileWithoutLinking(JITCompi > Profiler::Baseline)); > m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); > } >- >+ >+ unsigned instructionCount = m_codeBlock->instructions().size(); >+ if (m_isTracelet) { >+ //if (m_requiredBytecodeToCompile) >+ // dataLogLn("Required bytecode to compile = bc#", *m_requiredBytecodeToCompile); >+ const Instruction* firstTraceHint = nullptr; >+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) { >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(bytecodeOffset).ptr(); >+ if (currentInstruction->opcodeID() == op_trace_hint) { >+ auto bytecode = currentInstruction->as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ >+ //dataLogLn("Looking at trace: [", metadata.m_traceProfile.start, ",", metadata.m_traceProfile.end, ")"); >+ bool shouldCompile = [&] { >+ if (!firstTraceHint && m_requiredBytecodeToCompile && *m_requiredBytecodeToCompile <= metadata.m_traceProfile.start) >+ metadata.m_shouldCompile = true; >+ >+ if (m_requiredBytecodeToCompile && metadata.m_traceProfile.start <= *m_requiredBytecodeToCompile && *m_requiredBytecodeToCompile < metadata.m_traceProfile.end) { >+ //dataLogLn("compiling trace that contains m_requiredBytecodeToCompile bc#", *m_requiredBytecodeToCompile); >+ metadata.m_shouldCompile = true; >+ } >+ >+ // Haven't tripped tier up yet. >+ if (!metadata.m_shouldCompile) { >+ //dataLogLn("\t! should compile"); >+ return false; >+ } >+ >+ // Already compiled. >+ if (metadata.m_entrypoint) { >+ //dataLogLn("\thas entrypoint already"); >+ //dataLogLn("\tin hash table: ", m_priorCode->m_codeLocations.contains(bytecodeOffset)); >+ return false; >+ } >+ >+ // OOPS: This is only safe to do because we: >+ // - Link on the main thread. >+ // - Never compile the same CodeBlock* concurrently. >+ // Is this ok to rely on? >+ if (m_priorCode && m_priorCode->m_codeLocations.contains(bytecodeOffset)) { >+ //dataLogLn("\talready compiled in code locs"); >+ // Already compiled. >+ return false; >+ } >+ >+ return true; >+ }(); >+ >+ if (!firstTraceHint) { >+ firstTraceHint = currentInstruction; >+ if (shouldCompile) { >+ m_traces.append(TraceProfile { 0, metadata.m_traceProfile.start }); >+ m_isCompilingPrologue = true; >+ } >+ } >+ >+ if (shouldCompile) { >+ //dataLogLn("compiling trace: [", metadata.m_traceProfile.start, ", ", metadata.m_traceProfile.end, ")"); >+ m_traces.append(metadata.m_traceProfile); >+ } >+ } >+ >+ bytecodeOffset += currentInstruction->size(); >+ } >+ } else { >+ TraceProfile wholeTrace; >+ wholeTrace.start = 0; >+ wholeTrace.end = instructionCount; >+ m_traces.append(wholeTrace); >+ m_isCompilingPrologue = true; >+ } >+ >+ if (!m_traces.size()) { >+ //dataLogLn("No traces to compile!"); >+ // OOPS: Make eager options really affect tier up threshold. >+ } >+ > m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr)); > > Label entryLabel(this); >@@ -671,106 +923,120 @@ void JIT::compileWithoutLinking(JITCompi > if (random() & 1) > nop(); > >- emitFunctionPrologue(); >- emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ if (m_isCompilingPrologue) { >+ emitFunctionPrologue(); >+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ } > > Label beginLabel(this); >+ if (verboseProbes) >+ print("executing JIT prologue for: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), "\n"); > >- sampleCodeBlock(m_codeBlock); >+ JumpList stackOverflow; >+ if (m_isCompilingPrologue) { >+ sampleCodeBlock(m_codeBlock); > #if ENABLE(OPCODE_SAMPLING) >- sampleInstruction(m_codeBlock->instructions().begin()); >+ sampleInstruction(m_codeBlock->instructions().begin()); > #endif > >- if (m_codeBlock->codeType() == FunctionCode) { >- ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); >- if (shouldEmitProfiling()) { >- for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { >- // If this is a constructor, then we want to put in a dummy profiling site (to >- // keep things consistent) but we don't actually want to record the dummy value. >- if (m_codeBlock->isConstructor() && !argument) >- continue; >- int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); >+ if (m_codeBlock->codeType() == FunctionCode) { >+ ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); >+ if (shouldEmitProfiling()) { >+ for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { >+ // If this is a constructor, then we want to put in a dummy profiling site (to >+ // keep things consistent) but we don't actually want to record the dummy value. >+ if (m_codeBlock->isConstructor() && !argument) >+ continue; >+ int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); > #if USE(JSVALUE64) >- load64(Address(callFrameRegister, offset), regT0); >+ load64(Address(callFrameRegister, offset), regT0); > #elif USE(JSVALUE32_64) >- load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); >- load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); >+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); >+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); > #endif >- emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); >+ emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); >+ } > } > } >- } > >- int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register); >- unsigned maxFrameSize = -frameTopOffset; >- addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); >- JumpList stackOverflow; >- if (UNLIKELY(maxFrameSize > Options::reservedZoneSize())) >- stackOverflow.append(branchPtr(Above, regT1, callFrameRegister)); >- stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1)); >- >- move(regT1, stackPointerRegister); >- checkStackPointerAlignment(); >- if (Options::zeroStackFrame()) >- clearStackFrame(callFrameRegister, stackPointerRegister, regT0, maxFrameSize); >+ int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register); >+ unsigned maxFrameSize = -frameTopOffset; >+ addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); >+ if (UNLIKELY(maxFrameSize > Options::reservedZoneSize())) >+ stackOverflow.append(branchPtr(Above, regT1, callFrameRegister)); >+ stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1)); >+ >+ move(regT1, stackPointerRegister); >+ checkStackPointerAlignment(); >+ if (Options::zeroStackFrame()) >+ clearStackFrame(callFrameRegister, stackPointerRegister, regT0, maxFrameSize); > >- emitSaveCalleeSaves(); >- emitMaterializeTagCheckRegisters(); >+ emitSaveCalleeSaves(); >+ emitMaterializeTagCheckRegisters(); >+ } > >- RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); >+ //RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); > > privateCompileMainPass(); >- privateCompileLinkPass(); >+ //privateCompileLinkPass(); >+ if (m_disassembler) >+ m_disassembler->setStartOfSlowPath(label()); > privateCompileSlowCases(); > > if (m_disassembler) > m_disassembler->setEndOfSlowPath(label()); > m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); > >- stackOverflow.link(this); >- m_bytecodeOffset = 0; >- if (maxFrameExtentForSlowPathCall) >- addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >- callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); >- >- // If the number of parameters is 1, we never require arity fixup. >- bool requiresArityFixup = m_codeBlock->m_numParameters != 1; >- if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) { >- m_arityCheck = label(); >- store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); >- emitFunctionPrologue(); >- emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >- >- load32(payloadFor(CallFrameSlot::argumentCount), regT1); >- branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); >- >+ if (m_isCompilingPrologue) { >+ stackOverflow.link(this); > m_bytecodeOffset = 0; >- > if (maxFrameExtentForSlowPathCall) > addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >- callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck); >- if (maxFrameExtentForSlowPathCall) >- addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); >- branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); >- move(returnValueGPR, GPRInfo::argumentGPR0); >- emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>()); >+ callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); >+ } >+ >+ if (m_isCompilingPrologue) { >+ // If the number of parameters is 1, we never require arity fixup. >+ bool requiresArityFixup = m_codeBlock->m_numParameters != 1; >+ if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) { >+ m_arityCheck = label(); >+ if (verboseProbes) >+ print("executing JIT arity check prologue for: ", m_codeBlock->inferredName().data(), "\n"); >+ store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); >+ emitFunctionPrologue(); >+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ >+ load32(payloadFor(CallFrameSlot::argumentCount), regT1); >+ branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); >+ >+ m_bytecodeOffset = 0; >+ >+ if (maxFrameExtentForSlowPathCall) >+ addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >+ callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck); >+ if (maxFrameExtentForSlowPathCall) >+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); >+ branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); >+ move(returnValueGPR, GPRInfo::argumentGPR0); >+ emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>()); > > #if !ASSERT_DISABLED >- m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. >+ m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. > #endif > >- jump(beginLabel); >- } else >- m_arityCheck = entryLabel; // Never require arity fixup. >+ jump(beginLabel); >+ } else >+ m_arityCheck = entryLabel; // Never require arity fixup. >+ } > >- ASSERT(m_jmpTable.isEmpty()); >- >+ privateCompileLinkPass(); > privateCompileExceptionHandlers(); > > if (m_disassembler) > m_disassembler->setEndOfCode(label()); > m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); > >+ // OOPS: Need to clear stale codeblock data on fails! > m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, effort)); > > MonotonicTime after { }; >@@ -789,48 +1055,22 @@ void JIT::compileWithoutLinking(JITCompi > > CompilationResult JIT::link() > { >+ if (m_isTracelet && m_traces.isEmpty()) >+ return CompilationDeferred; >+ > LinkBuffer& patchBuffer = *m_linkBuffer; > > if (patchBuffer.didFailToAllocate()) > return CompilationFailed; > >- // Translate vPC offsets into addresses in JIT generated code, for switch tables. >- for (auto& record : m_switches) { >- unsigned bytecodeOffset = record.bytecodeOffset; >- >- if (record.type != SwitchRecord::String) { >- ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); >- ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); >- >- auto* simpleJumpTable = record.jumpTable.simpleJumpTable; >- simpleJumpTable->ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); >- >- for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { >- unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; >- simpleJumpTable->ctiOffsets[j] = offset >- ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset]) >- : simpleJumpTable->ctiDefault; >- } >- } else { >- ASSERT(record.type == SwitchRecord::String); >- >- auto* stringJumpTable = record.jumpTable.stringJumpTable; >- stringJumpTable->ctiDefault = >- patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); >- >- for (auto& location : stringJumpTable->offsetTable.values()) { >- unsigned offset = location.branchOffset; >- location.ctiOffset = offset >- ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset]) >- : stringJumpTable->ctiDefault; >- } >- } >- } >- > for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { > HandlerInfo& handler = m_codeBlock->exceptionHandler(i); >+ >+ // OOPS: handle this! >+ UNUSED_PARAM(handler); >+ > // FIXME: <rdar://problem/39433318>. >- handler.nativeCode = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_labels[handler.target]); >+ //handler.nativeCode = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_labels[handler.target]); > } > > for (auto& record : m_calls) { >@@ -845,7 +1085,7 @@ CompilationResult JIT::link() > finalizeInlineCaches(m_instanceOfs, patchBuffer); > > if (m_byValCompilationInfo.size()) { >- CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionHandler); >+ CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler = m_exceptionHandler.isSet() ? patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionHandler) : m_priorCode->m_exceptionHandler; > > for (const auto& byValCompilationInfo : m_byValCompilationInfo) { > PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump; >@@ -878,15 +1118,12 @@ CompilationResult JIT::link() > patchBuffer.locationOfNearCall<JSInternalPtrTag>(compilationInfo.hotPathOther)); > } > >- JITCodeMap jitCodeMap; >- for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { >- if (m_labels[bytecodeOffset].isSet()) >- jitCodeMap.append(bytecodeOffset, patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); >- } >- jitCodeMap.finish(); >- m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap)); >- >- MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); >+ //JITCodeMap jitCodeMap; >+ //for (auto entry : m_labels) { >+ // jitCodeMap.append(entry.key, patchBuffer.locationOf<JSEntryPtrTag>(entry.value)); >+ //} >+ //jitCodeMap.finish(); >+ //m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap)); > > if (Options::dumpDisassembly()) { > m_disassembler->dump(patchBuffer); >@@ -905,13 +1142,86 @@ CompilationResult JIT::link() > patchBuffer, JSEntryPtrTag, > "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITType::BaselineJIT)).data()); > >+ // OOPS: this is now wrong. Fix to be # insns in trace. > m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add( > static_cast<double>(result.size()) / > static_cast<double>(m_codeBlock->instructionsSize())); > >- m_codeBlock->shrinkToFit(CodeBlock::LateShrink); >- m_codeBlock->setJITCode( >- adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT))); >+ TraceletJITCode* traceletJITCode; >+ RefPtr<JITCode> jitCode = m_codeBlock->jitCode(); >+ if (!jitCode) { >+ // OOPS: should shrinkToFit perhaps for all of these? This code path is only taken when useLLInt=0 >+ //m_codeBlock->shrinkToFit(CodeBlock::LateShrink); >+ RELEASE_ASSERT(m_isCompilingPrologue); >+ MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); >+ traceletJITCode = new TraceletJITCode(result, withArityCheck, JITType::BaselineJIT); >+ m_codeBlock->setJITCode( >+ adoptRef(*traceletJITCode)); >+ m_shouldInstallCode = true; >+ } else if (jitCode->isTraceletJITCode()) >+ traceletJITCode = static_cast<TraceletJITCode*>(jitCode.get()); >+ else { >+ RELEASE_ASSERT(jitCode->isJITCodeWithCodeRef()); >+ JITCodeWithCodeRef* jitCodeWithCodeRef = static_cast<JITCodeWithCodeRef*>(jitCode.get()); >+ MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = jitCodeWithCodeRef->codeRef(); >+ MacroAssemblerCodePtr<JSEntryPtrTag> arityCheck = jitCodeWithCodeRef->addressForCall(MustCheckArity); >+ traceletJITCode = new TraceletJITCode(codeRef, arityCheck, JITType::BaselineJIT); >+ //dataLogLn("Allocated tracelet JIT code: ", RawPointer(traceletJITCode)); >+ m_codeBlock->setJITCode(adoptRef(*traceletJITCode)); >+ m_shouldInstallCode = true; >+ } >+ >+ //traceletJITCode->m_codeRefs.append(WTFMove(result)); >+ traceletJITCode->m_codeRefs.append(result.retagged<JITTraceletPtrTag>()); >+ >+ for (const auto& entry : m_locationsOfJumpToLLInt) { >+ for (auto pair : entry.value) { >+ Label start = pair.first; >+ Label end = pair.second; >+ MacroAssemblerCodePtr<JITTraceletPtrTag> startPtr = patchBuffer.locationOf<JITTraceletPtrTag>(start); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> endPtr = patchBuffer.locationOf<JITTraceletPtrTag>(end); >+ RELEASE_ASSERT(endPtr.dataLocation<uintptr_t>() - startPtr.dataLocation<uintptr_t>() >= static_cast<uintptr_t>(MacroAssembler::maxJumpReplacementSize())); >+ >+ traceletJITCode->m_locationsOfJumpsToLLIntBytecode.add(entry.key, Vector<MacroAssemblerCodePtr<JITTraceletPtrTag>>()).iterator->value.append(startPtr); >+ } >+ } >+ >+ if (m_isCompilingPrologue) { >+ if (verboseProbes) >+ dataLogLn("compiling prologue: ", RawPointer(result.code().executableAddress())); >+ traceletJITCode->installPrologue(result, patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck)); >+ m_shouldInstallCode = true; >+ } >+ >+ for (auto entry : m_labels) { >+ unsigned bytecodeOffset = entry.key; >+ auto codeLabel = patchBuffer.locationOf<JSEntryPtrTag>(entry.value); >+ traceletJITCode->m_codeLocations.add(bytecodeOffset, codeLabel.retagged<JITTraceletPtrTag>()); >+ >+ auto iter = traceletJITCode->m_locationsOfJumpsToLLIntBytecode.find(bytecodeOffset); >+ if (iter != traceletJITCode->m_locationsOfJumpsToLLIntBytecode.end()) { >+ if (verboseProbes) >+ dataLogLn("repatching prior to LLInt jump to go to new JIT code: bc#", bytecodeOffset); >+ for (auto codePtr : iter->value) { >+ CCallHelpers jit; >+ auto jump = jit.jump(); >+ >+ LinkBuffer linkBuffer(jit, codePtr, MacroAssembler::maxJumpReplacementSize()); >+ RELEASE_ASSERT(linkBuffer.isValid()); >+ if (verboseProbes) >+ dataLogLn("\trepatching from: ", RawPointer(codePtr.dataLocation())); >+ linkBuffer.link(jump, codeLabel); >+ FINALIZE_CODE(linkBuffer, NoPtrTag, "TraceletJIT: linking constant jump to away from LLInt to newly allocated JIT code for bc#%d", bytecodeOffset); >+ } >+ >+ traceletJITCode->m_locationsOfJumpsToLLIntBytecode.remove(iter); >+ } >+ } >+ >+ if (m_exceptionChecksWithCallFrameRollbackLabel.isSet()) >+ traceletJITCode->m_exceptionCheckWithCallFrameRollback = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionChecksWithCallFrameRollbackLabel); >+ if (m_exceptionHandler.isSet()) >+ traceletJITCode->m_exceptionHandler = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionHandler); > > if (JITInternal::verbose) > dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr()); >@@ -929,41 +1239,56 @@ CompilationResult JIT::privateCompile(JI > void JIT::privateCompileExceptionHandlers() > { > if (!m_exceptionChecksWithCallFrameRollback.empty()) { >- m_exceptionChecksWithCallFrameRollback.link(this); >+ if (m_priorCode && m_priorCode->m_exceptionCheckWithCallFrameRollback) { >+ addLinkTask([=] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(m_exceptionChecksWithCallFrameRollback, m_priorCode->m_exceptionCheckWithCallFrameRollback); >+ }); >+ } else { >+ m_exceptionChecksWithCallFrameRollback.link(this); >+ m_exceptionChecksWithCallFrameRollbackLabel = label(); > >- copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); >+ copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); > >- // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). >+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). > >- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); >- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); >+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); >+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); > > #if CPU(X86) >- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! >- poke(GPRInfo::argumentGPR0); >- poke(GPRInfo::argumentGPR1, 1); >+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! >+ poke(GPRInfo::argumentGPR0); >+ poke(GPRInfo::argumentGPR1, 1); > #endif >- m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame))); >- jumpToExceptionHandler(*vm()); >+ m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame))); >+ jumpToExceptionHandler(*vm()); >+ } > } > > if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) { >- m_exceptionHandler = label(); >- m_exceptionChecks.link(this); >+ if (m_priorCode && m_priorCode->m_exceptionHandler) { >+ if (!m_exceptionChecks.empty()) { >+ addLinkTask([=] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(m_exceptionChecks, m_priorCode->m_exceptionHandler); >+ }); >+ } >+ } else { >+ m_exceptionChecks.link(this); >+ m_exceptionHandler = label(); > >- copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); >+ copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); > >- // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). >- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); >- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); >+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). >+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); >+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); > > #if CPU(X86) >- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! >- poke(GPRInfo::argumentGPR0); >- poke(GPRInfo::argumentGPR1, 1); >+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! >+ poke(GPRInfo::argumentGPR0); >+ poke(GPRInfo::argumentGPR1, 1); > #endif >- m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandler))); >- jumpToExceptionHandler(*vm()); >+ m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandler))); >+ jumpToExceptionHandler(*vm()); >+ } > } > } > >@@ -983,6 +1308,8 @@ unsigned JIT::frameRegisterCountFor(Code > > int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) > { >+ //return - codeBlock->numCalleeLocals(); >+ // OOPS: This sort of disagrees with the LLInt? > return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); > } > >Index: Source/JavaScriptCore/jit/JIT.h >=================================================================== >--- Source/JavaScriptCore/jit/JIT.h (revision 244813) >+++ Source/JavaScriptCore/jit/JIT.h (working copy) >@@ -200,9 +200,12 @@ namespace JSC { > > void doMainThreadPreparationBeforeCompile(); > >- static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned bytecodeOffset = 0) >+ static CompilationResult compileNow(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned requiredBytecodeOffset, bool isTracelet) > { >- return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort); >+ JIT jit(vm, codeBlock); >+ jit.m_isTracelet = isTracelet; >+ jit.m_requiredBytecodeToCompile = requiredBytecodeOffset; >+ return jit.privateCompile(effort); > } > > static void compileGetByVal(const ConcurrentJSLocker& locker, VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) >@@ -571,6 +574,7 @@ namespace JSC { > void emit_op_jbeloweq(const Instruction*); > void emit_op_jtrue(const Instruction*); > void emit_op_loop_hint(const Instruction*); >+ void emit_op_trace_hint(const Instruction*); > void emit_op_check_traps(const Instruction*); > void emit_op_nop(const Instruction*); > void emit_op_super_sampler_begin(const Instruction*); >@@ -669,6 +673,7 @@ namespace JSC { > void emitSlow_op_jnstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_jtrue(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_loop_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_trace_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_mul(const Instruction*, Vector<SlowCaseEntry>::iterator&); >@@ -918,7 +923,8 @@ namespace JSC { > Interpreter* m_interpreter; > > Vector<CallRecord> m_calls; >- Vector<Label> m_labels; >+ //Vector<Label> m_labels; >+ HashMap<unsigned, Label, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_labels; > Vector<JITGetByIdGenerator> m_getByIds; > Vector<JITGetByIdWithThisGenerator> m_getByIdsWithThis; > Vector<JITPutByIdGenerator> m_putByIds; >@@ -963,6 +969,15 @@ namespace JSC { > bool m_shouldEmitProfiling; > bool m_shouldUseIndexMasking; > unsigned m_loopOSREntryBytecodeOffset { 0 }; >+ public: >+ bool m_isTracelet { false }; >+ Optional<unsigned> m_requiredBytecodeToCompile; >+ bool m_isCompilingPrologue { false }; >+ bool m_shouldInstallCode { false }; >+ Vector<TraceProfile> m_traces; >+ RefPtr<TraceletJITCode> m_priorCode; >+ HashMap<unsigned, Vector<std::pair<Label, Label>>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_locationsOfJumpToLLInt; >+ Label m_exceptionChecksWithCallFrameRollbackLabel; > }; > > } // namespace JSC >Index: Source/JavaScriptCore/jit/JITCode.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITCode.cpp (revision 244813) >+++ Source/JavaScriptCore/jit/JITCode.cpp (working copy) >@@ -36,10 +36,18 @@ JITCode::JITCode(JITType jitType, ShareA > : m_jitType(jitType) > , m_shareAttribute(shareAttribute) > { >+ //dataLogLn("Allocated JITCode: ", RawPointer(this)); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); > } > > JITCode::~JITCode() > { >+ //dataLogLn("Deallocated JITCode: ", RawPointer(this)); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); > } > > const char* JITCode::typeName(JITType jitType) >@@ -237,6 +245,12 @@ RegisterSet JITCode::liveRegistersToPres > } > #endif > >+void TraceletJITCode::installPrologue(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck) >+{ >+ m_ref = WTFMove(entry); >+ m_withArityCheck = withArityCheck; >+} >+ > } // namespace JSC > > namespace WTF { >Index: Source/JavaScriptCore/jit/JITCode.h >=================================================================== >--- Source/JavaScriptCore/jit/JITCode.h (revision 244813) >+++ Source/JavaScriptCore/jit/JITCode.h (working copy) >@@ -48,6 +48,7 @@ class Signature; > } > > struct ProtoCallFrame; >+class TraceletJITCode; > class TrackedReferences; > class VM; > >@@ -61,6 +62,7 @@ enum class JITType : uint8_t { > }; > > class JITCode : public ThreadSafeRefCounted<JITCode> { >+ using Base = ThreadSafeRefCounted<JITCode>; > public: > template<PtrTag tag> using CodePtr = MacroAssemblerCodePtr<tag>; > template<PtrTag tag> using CodeRef = MacroAssemblerCodeRef<tag>; >@@ -163,6 +165,8 @@ public: > Shared > }; > >+ TraceletJITCode* asTracelet(); >+ > protected: > JITCode(JITType, JITCode::ShareAttribute = JITCode::ShareAttribute::NotShared); > >@@ -203,6 +207,9 @@ public: > > virtual bool contains(void*) = 0; > >+ virtual bool isTraceletJITCode() const { return false; } >+ virtual bool isJITCodeWithCodeRef() const { return false; } >+ > #if ENABLE(JIT) > virtual RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex); > virtual Optional<CodeOrigin> findPC(CodeBlock*, void* pc) { UNUSED_PARAM(pc); return WTF::nullopt; } >@@ -233,6 +240,9 @@ public: > size_t size() override; > bool contains(void*) override; > >+ bool isJITCodeWithCodeRef() const override { return true; } >+ CodeRef<JSEntryPtrTag> codeRef() { return m_ref; } >+ > protected: > CodeRef<JSEntryPtrTag> m_ref; > }; >@@ -248,11 +258,40 @@ public: > > protected: > void initializeCodeRefForDFG(CodeRef<JSEntryPtrTag>, CodePtr<JSEntryPtrTag> withArityCheck); >- >-private: > CodePtr<JSEntryPtrTag> m_withArityCheck; > }; > >+class TraceletJITCode : public DirectJITCode { >+ using Base = DirectJITCode; >+public: >+ TraceletJITCode(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck, JITType type, JITCode::ShareAttribute shareAttribute = JITCode::ShareAttribute::NotShared) >+ : Base(WTFMove(entry), WTFMove(withArityCheck), type, shareAttribute) >+ { } >+ >+ bool isTraceletJITCode() const override { return true; } >+ >+ CodeLocationLabel<ExceptionHandlerPtrTag> exceptionCheckWithCallFrameRollback() { return m_exceptionCheckWithCallFrameRollback; } >+ CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler() { return m_exceptionHandler; } >+ >+ void installPrologue(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck); >+ >+ MacroAssemblerCodePtr<JITTraceletPtrTag> findCodeLocation(unsigned bytecodeOffset) >+ { >+ auto iter = m_codeLocations.find(bytecodeOffset); >+ if (iter != m_codeLocations.end()) >+ return iter->value; >+ return { }; >+ } >+ >+public: >+//private: >+ HashMap<unsigned, MacroAssemblerCodePtr<JITTraceletPtrTag>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_codeLocations; >+ HashMap<unsigned, Vector<MacroAssemblerCodePtr<JITTraceletPtrTag>>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_locationsOfJumpsToLLIntBytecode; >+ Vector<CodeRef<JITTraceletPtrTag>> m_codeRefs; >+ CodeLocationLabel<ExceptionHandlerPtrTag> m_exceptionCheckWithCallFrameRollback; >+ CodeLocationLabel<ExceptionHandlerPtrTag> m_exceptionHandler; >+}; >+ > class NativeJITCode : public JITCodeWithCodeRef { > public: > NativeJITCode(JITType); >@@ -273,6 +312,13 @@ private: > const DOMJIT::Signature* m_signature; > }; > >+ALWAYS_INLINE TraceletJITCode* JITCode::asTracelet() >+{ >+ if (isTraceletJITCode()) >+ return static_cast<TraceletJITCode*>(this); >+ return nullptr; >+} >+ > } // namespace JSC > > namespace WTF { >Index: Source/JavaScriptCore/jit/JITCodeMap.h >=================================================================== >--- Source/JavaScriptCore/jit/JITCodeMap.h (revision 244813) >+++ Source/JavaScriptCore/jit/JITCodeMap.h (working copy) >@@ -35,6 +35,7 @@ namespace JSC { > > class JITCodeMap { > private: >+ /* > struct Entry { > Entry() { } > >@@ -44,16 +45,33 @@ private: > { } > > inline unsigned bytecodeIndex() const { return m_bytecodeIndex; } >- inline CodeLocationLabel<JSEntryPtrTag> codeLocation() { return m_codeLocation; } >+ inline CodeLocationLabel<JSEntryPtrTag> codeLocation() const { return m_codeLocation; } > > private: > unsigned m_bytecodeIndex; > CodeLocationLabel<JSEntryPtrTag> m_codeLocation; > }; >+ */ > > public: > void append(unsigned bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation) > { >+ m_entries.add(bytecodeIndex, codeLocation); >+ } >+ void finish() {} >+ >+ CodeLocationLabel<JSEntryPtrTag> find(unsigned bytecodeIndex) const >+ { >+ auto iter = m_entries.find(bytecodeIndex); >+ if (iter == m_entries.end()) >+ return CodeLocationLabel<JSEntryPtrTag>(); >+ return iter->value; >+ } >+ >+ /* >+ >+ void append(unsigned bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation) >+ { > m_entries.append({ bytecodeIndex, codeLocation }); > } > >@@ -70,11 +88,13 @@ public: > return CodeLocationLabel<JSEntryPtrTag>(); > return entry->codeLocation(); > } >+ */ > > explicit operator bool() const { return m_entries.size(); } > > private: >- Vector<Entry> m_entries; >+ //Vector<Entry> m_entries; >+ HashMap<unsigned, CodeLocationLabel<JSEntryPtrTag>, DefaultHash<unsigned>::Hash, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_entries; > }; > > } // namespace JSC >Index: Source/JavaScriptCore/jit/JITDisassembler.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITDisassembler.cpp (revision 244813) >+++ Source/JavaScriptCore/jit/JITDisassembler.cpp (working copy) >@@ -53,7 +53,7 @@ JITDisassembler::~JITDisassembler() > void JITDisassembler::dump(PrintStream& out, LinkBuffer& linkBuffer) > { > dumpHeader(out, linkBuffer); >- dumpDisassembly(out, linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]); >+ dumpDisassembly(out, linkBuffer, m_startOfCode, firstFastLabel()); > > dumpForInstructions(out, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel()); > out.print(" (End Of Main Path)\n"); >@@ -75,7 +75,7 @@ void JITDisassembler::reportToProfiler(P > dumpHeader(out, linkBuffer); > compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString())); > out.reset(); >- dumpDisassembly(out, linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]); >+ dumpDisassembly(out, linkBuffer, m_startOfCode, firstFastLabel()); > compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString())); > > reportInstructions(compilation, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel()); >@@ -106,6 +106,15 @@ MacroAssembler::Label JITDisassembler::f > return firstSlowLabel.isSet() ? firstSlowLabel : m_endOfSlowPath; > } > >+MacroAssembler::Label JITDisassembler::firstFastLabel() >+{ >+ for (unsigned i = 0; i < m_labelForBytecodeIndexInMainPath.size(); ++i) { >+ if (m_labelForBytecodeIndexInMainPath[i].isSet()) >+ return m_labelForBytecodeIndexInMainPath[i]; >+ } >+ return m_startOfSlowPath; >+} >+ > Vector<JITDisassembler::DumpedOp> JITDisassembler::dumpVectorForInstructions(LinkBuffer& linkBuffer, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel) > { > StringPrintStream out; >Index: Source/JavaScriptCore/jit/JITDisassembler.h >=================================================================== >--- Source/JavaScriptCore/jit/JITDisassembler.h (revision 244813) >+++ Source/JavaScriptCore/jit/JITDisassembler.h (working copy) >@@ -55,6 +55,8 @@ public: > { > m_labelForBytecodeIndexInSlowPath[bytecodeIndex] = label; > } >+ >+ void setStartOfSlowPath(MacroAssembler::Label label) { m_startOfSlowPath = label; } > void setEndOfSlowPath(MacroAssembler::Label label) { m_endOfSlowPath = label; } > void setEndOfCode(MacroAssembler::Label label) { m_endOfCode = label; } > >@@ -64,6 +66,7 @@ public: > > private: > void dumpHeader(PrintStream&, LinkBuffer&); >+ MacroAssembler::Label firstFastLabel(); > MacroAssembler::Label firstSlowLabel(); > > struct DumpedOp { >@@ -81,6 +84,7 @@ private: > MacroAssembler::Label m_startOfCode; > Vector<MacroAssembler::Label> m_labelForBytecodeIndexInMainPath; > Vector<MacroAssembler::Label> m_labelForBytecodeIndexInSlowPath; >+ MacroAssembler::Label m_startOfSlowPath; > MacroAssembler::Label m_endOfSlowPath; > MacroAssembler::Label m_endOfCode; > }; >Index: Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp (revision 244813) >+++ Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp (working copy) >@@ -47,6 +47,7 @@ JITInlineCacheGenerator::JITInlineCacheG > const RegisterSet& usedRegisters) > : m_codeBlock(codeBlock) > { >+ // OOPS: If we fail a compilation, we need a way to remove all this from the CodeBlock! > m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo(accessType) : garbageStubInfo(); > m_stubInfo->codeOrigin = codeOrigin; > m_stubInfo->callSiteIndex = callSite; >Index: Source/JavaScriptCore/jit/JITInlines.h >=================================================================== >--- Source/JavaScriptCore/jit/JITInlines.h (revision 244813) >+++ Source/JavaScriptCore/jit/JITInlines.h (working copy) >@@ -247,7 +247,8 @@ ALWAYS_INLINE void JIT::emitJumpSlowToHo > { > ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. > >- jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); >+ //jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); >+ m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset)); > } > > #if ENABLE(SAMPLING_FLAGS) >Index: Source/JavaScriptCore/jit/JITOpcodes.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITOpcodes.cpp (revision 244813) >+++ Source/JavaScriptCore/jit/JITOpcodes.cpp (working copy) >@@ -1034,6 +1034,13 @@ void JIT::emitSlow_op_loop_hint(const In > #endif > } > >+void JIT::emit_op_trace_hint(const Instruction*) >+{ >+} >+void JIT::emitSlow_op_trace_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&) >+{ >+} >+ > void JIT::emit_op_check_traps(const Instruction*) > { > addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->needTrapHandlingAddress()))); >Index: Source/JavaScriptCore/jit/JITWorklist.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITWorklist.cpp (revision 244813) >+++ Source/JavaScriptCore/jit/JITWorklist.cpp (working copy) >@@ -36,10 +36,12 @@ namespace JSC { > > class JITWorklist::Plan : public ThreadSafeRefCounted<JITWorklist::Plan> { > public: >- Plan(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+ Plan(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset, bool isTracelet) > : m_codeBlock(codeBlock) > , m_jit(codeBlock->vm(), codeBlock, loopOSREntryBytecodeOffset) > { >+ m_jit.m_isTracelet = isTracelet; >+ m_jit.m_isTracelet = true; > m_jit.doMainThreadPreparationBeforeCompile(); > } > >@@ -54,6 +56,11 @@ public: > void finalize() > { > CompilationResult result = m_jit.link(); >+ >+ //static uint64_t counter; >+ //if (++counter % 400 == 0) >+ // vm()->heap.dumpAllCodeBlockTraces(); >+ > switch (result) { > case CompilationFailed: > CODEBLOCK_LOG_EVENT(m_codeBlock, "delayJITCompile", ("compilation failed")); >@@ -65,9 +72,15 @@ public: > case CompilationSuccessful: > if (Options::verboseOSR()) > dataLogF(" JIT compilation successful.\n"); >- m_codeBlock->ownerExecutable()->installCode(m_codeBlock); >+ if (m_jit.m_shouldInstallCode) >+ m_codeBlock->ownerExecutable()->installCode(m_codeBlock); > m_codeBlock->jitSoon(); > return; >+ case CompilationDeferred: >+ //dataLogLn("Bogus compile!"); >+ // Nothing to compile! >+ //m_codeBlock->jitSoon(); >+ return; > default: > RELEASE_ASSERT_NOT_REACHED(); > return; >@@ -83,9 +96,9 @@ public: > return m_isFinishedCompiling; > } > >- static void compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+ static void compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset, bool isTracelet) > { >- Plan plan(codeBlock, loopOSREntryBytecodeOffset); >+ Plan plan(codeBlock, loopOSREntryBytecodeOffset, isTracelet); > plan.compileInThread(); > plan.finalize(); > } >@@ -159,7 +172,10 @@ JITWorklist::JITWorklist() > , m_condition(AutomaticThreadCondition::create()) > { > LockHolder locker(*m_lock); >- m_thread = new Thread(locker, *this); >+ //m_threads.append(new Thread(locker, *this)); >+ //m_threads.append(new Thread(locker, *this)); >+ //m_threads.append(new Thread(locker, *this)); >+ //m_threads.append(new Thread(locker, *this)); > } > > JITWorklist::~JITWorklist() >@@ -228,10 +244,10 @@ void JITWorklist::poll(VM& vm) > finalizePlans(myPlans); > } > >-void JITWorklist::compileLater(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+void JITWorklist::compileLater(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset, bool isTracelet) > { > DeferGC deferGC(codeBlock->vm()->heap); >- RELEASE_ASSERT(codeBlock->jitType() == JITType::InterpreterThunk); >+ //RELEASE_ASSERT(codeBlock->jitType() == JITType::InterpreterThunk); > > if (codeBlock->m_didFailJITCompilation) { > codeBlock->dontJITAnytimeSoon(); >@@ -239,7 +255,7 @@ void JITWorklist::compileLater(CodeBlock > } > > if (!Options::useConcurrentJIT()) { >- Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset); >+ Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset, isTracelet); > return; > } > >@@ -253,7 +269,7 @@ void JITWorklist::compileLater(CodeBlock > > if (m_numAvailableThreads) { > m_planned.add(codeBlock); >- RefPtr<Plan> plan = adoptRef(new Plan(codeBlock, loopOSREntryBytecodeOffset)); >+ RefPtr<Plan> plan = adoptRef(new Plan(codeBlock, loopOSREntryBytecodeOffset, isTracelet)); > m_plans.append(plan); > m_queue.append(plan); > m_condition->notifyAll(locker); >@@ -277,14 +293,26 @@ void JITWorklist::compileLater(CodeBlock > // This works around the issue. If the concurrent JIT thread is convoyed, we revert to main > // thread compiles. This is probably not as good as if we had multiple JIT threads. Maybe we > // can do that someday. >- Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset); >+ Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset, isTracelet); > } > >-void JITWorklist::compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+void JITWorklist::compileNow(CodeBlock* codeBlock, unsigned bytecodeIndexToCompile) > { > VM* vm = codeBlock->vm(); > DeferGC deferGC(vm->heap); >- if (codeBlock->jitType() != JITType::InterpreterThunk) >+ >+ auto didCompileBytecode = [&] { >+ RefPtr<JITCode> jitCode = codeBlock->jitCode(); >+ if (jitCode->isTraceletJITCode()) { >+ TraceletJITCode* tracelet = static_cast<TraceletJITCode*>(jitCode.get()); >+ if (tracelet->m_codeLocations.find(bytecodeIndexToCompile) != tracelet->m_codeLocations.end()) >+ return true; >+ } >+ >+ return false; >+ }; >+ >+ if (didCompileBytecode()) > return; > > bool isPlanned; >@@ -300,15 +328,14 @@ void JITWorklist::compileNow(CodeBlock* > } > > // Now it might be compiled! >- if (codeBlock->jitType() != JITType::InterpreterThunk) >+ if (didCompileBytecode()) > return; > >- // We do this in case we had previously attempted, and then failed, to compile with the >- // baseline JIT. >- codeBlock->resetJITData(); >- > // OK, just compile it. >- JIT::compile(vm, codeBlock, JITCompilationMustSucceed, loopOSREntryBytecodeOffset); >+ bool isTracelet = Options::useLLInt() ? true : false; >+ JIT::compileNow(vm, codeBlock, JITCompilationMustSucceed, bytecodeIndexToCompile, isTracelet); >+ ASSERT(didCompileBytecode()); >+ // OOPS: change how we installCode(). > codeBlock->ownerExecutable()->installCode(codeBlock); > } > >Index: Source/JavaScriptCore/jit/JITWorklist.h >=================================================================== >--- Source/JavaScriptCore/jit/JITWorklist.h (revision 244813) >+++ Source/JavaScriptCore/jit/JITWorklist.h (working copy) >@@ -53,9 +53,8 @@ public: > bool completeAllForVM(VM&); // Return true if any JIT work happened. > void poll(VM&); > >- void compileLater(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0); >- >- void compileNow(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0); >+ void compileLater(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0, bool isTracelet = false); >+ void compileNow(CodeBlock*, unsigned bytecodeIndexToCompile); > > static JITWorklist& ensureGlobalWorklist(); > static JITWorklist* existingGlobalWorklistOrNull(); >@@ -74,7 +73,7 @@ private: > > Box<Lock> m_lock; > Ref<AutomaticThreadCondition> m_condition; // We use One True Condition for everything because that's easier. >- RefPtr<AutomaticThread> m_thread; >+ Vector<RefPtr<AutomaticThread>> m_threads; > > unsigned m_numAvailableThreads { 0 }; > }; >Index: Source/JavaScriptCore/llint/LLIntEntrypoint.cpp >=================================================================== >--- Source/JavaScriptCore/llint/LLIntEntrypoint.cpp (revision 244813) >+++ Source/JavaScriptCore/llint/LLIntEntrypoint.cpp (working copy) >@@ -180,6 +180,8 @@ void setEntrypoint(CodeBlock* codeBlock) > > unsigned frameRegisterCountFor(CodeBlock* codeBlock) > { >+ // OOPS: Combine this with JIT's function to ensure they're always the same! >+ > ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals()))); > > return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters); >Index: Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >=================================================================== >--- Source/JavaScriptCore/llint/LLIntSlowPaths.cpp (revision 244813) >+++ Source/JavaScriptCore/llint/LLIntSlowPaths.cpp (working copy) >@@ -400,6 +400,13 @@ inline bool jitCompileAndSetHeuristics(C > } > } > >+static ALWAYS_INLINE MacroAssemblerCodePtr<JITTraceletPtrTag> traceletPC(CodeBlock* codeBlock, unsigned bytecodeOffset) >+{ >+ if (TraceletJITCode* tracelet = codeBlock->jitCode()->asTracelet()) >+ return tracelet->findCodeLocation(bytecodeOffset); >+ return { }; >+} >+ > static SlowPathReturnType entryOSR(ExecState* exec, const Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind) > { > if (Options::verboseOSR()) { >@@ -412,9 +419,14 @@ static SlowPathReturnType entryOSR(ExecS > codeBlock->dontJITAnytimeSoon(); > LLINT_RETURN_TWO(0, 0); > } >- if (!jitCompileAndSetHeuristics(codeBlock, exec)) >- LLINT_RETURN_TWO(0, 0); >- >+ >+ if (!traceletPC(codeBlock, 0)) { >+ if (!jitCompileAndSetHeuristics(codeBlock, exec)) >+ LLINT_RETURN_TWO(0, 0); >+ if (!traceletPC(codeBlock, 0)) >+ LLINT_RETURN_TWO(0, 0); >+ } >+ > CODEBLOCK_LOG_EVENT(codeBlock, "OSR entry", ("in prologue")); > > if (kind == Prologue) >@@ -474,6 +486,10 @@ LLINT_SLOW_PATH_DECL(loop_osr) > codeBlock->dontJITAnytimeSoon(); > LLINT_RETURN_TWO(0, 0); > } >+ >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = traceletPC(codeBlock, loopOSREntryBytecodeOffset); >+ if (codePtr) >+ LLINT_RETURN_TWO(codePtr.retagged<JSEntryPtrTag>().executableAddress(), exec->topOfFrame()); > > if (!jitCompileAndSetHeuristics(codeBlock, exec, loopOSREntryBytecodeOffset)) > LLINT_RETURN_TWO(0, 0); >@@ -481,12 +497,13 @@ LLINT_SLOW_PATH_DECL(loop_osr) > CODEBLOCK_LOG_EVENT(codeBlock, "osrEntry", ("at bc#", loopOSREntryBytecodeOffset)); > > ASSERT(codeBlock->jitType() == JITType::BaselineJIT); >+ codePtr = static_cast<TraceletJITCode*>(codeBlock->jitCode().get())->findCodeLocation(loopOSREntryBytecodeOffset); >+ if (!codePtr) { >+ //dataLogLn("loop_hint not compiled yet!"); >+ LLINT_RETURN_TWO(0, 0); >+ } > >- const JITCodeMap& codeMap = codeBlock->jitCodeMap(); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(loopOSREntryBytecodeOffset); >- ASSERT(codeLocation); >- >- void* jumpTarget = codeLocation.executableAddress(); >+ void* jumpTarget = codePtr.retagged<JSEntryPtrTag>().executableAddress(); > ASSERT(jumpTarget); > > LLINT_RETURN_TWO(jumpTarget, exec->topOfFrame()); >@@ -1956,6 +1973,45 @@ LLINT_SLOW_PATH_DECL(slow_path_out_of_li > LLINT_END_IMPL(); > } > >+LLINT_SLOW_PATH_DECL(trace_hint) >+{ >+ LLINT_BEGIN_NO_SET_PC(); >+ UNUSED_PARAM(throwScope); >+ //dataLogLn("trace_hint slow path!"); >+ >+ auto bytecode = pc->as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(exec); >+ >+ CodeBlock* codeBlock = exec->codeBlock(); >+ if (!shouldJIT(codeBlock)) >+ LLINT_RETURN_TWO(0, 0); >+ >+ auto returnPC = [&] () -> void* { >+ if (MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = traceletPC(codeBlock, codeBlock->bytecodeOffset(pc))) { >+ void* result = codePtr.executableAddress(); >+ metadata.m_entrypoint = bitwise_cast<uintptr_t>(result); >+ return result; >+ } >+ >+ return nullptr; >+ }; >+ >+ if (auto* ret = returnPC()) >+ LLINT_RETURN_TWO(ret, 0); >+ >+ if (metadata.m_shouldCompile) { >+ metadata.m_shouldCompile = true; >+ JITWorklist::ensureGlobalWorklist().compileLater(codeBlock, 0, true); >+ if (auto* ret = returnPC()) >+ LLINT_RETURN_TWO(ret, 0); >+ } >+ >+ metadata.m_shouldCompile = true; >+ metadata.m_count = -Options::traceJITSoonThreshold(); >+ >+ LLINT_RETURN_TWO(0, 0); >+} >+ > extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM* vm, ProtoCallFrame* protoFrame) > { > ExecState* exec = vm->topCallFrame; >Index: Source/JavaScriptCore/llint/LLIntSlowPaths.h >=================================================================== >--- Source/JavaScriptCore/llint/LLIntSlowPaths.h (revision 244813) >+++ Source/JavaScriptCore/llint/LLIntSlowPaths.h (working copy) >@@ -134,6 +134,7 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_lo > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_super_sampler_begin); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_super_sampler_end); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_out_of_line_jump_target); >+LLINT_SLOW_PATH_HIDDEN_DECL(trace_hint); > extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM*, ProtoCallFrame*) WTF_INTERNAL; > #if ENABLE(C_LOOP) > extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM*, Register*) WTF_INTERNAL; >Index: Source/JavaScriptCore/llint/LowLevelInterpreter.asm >=================================================================== >--- Source/JavaScriptCore/llint/LowLevelInterpreter.asm (revision 244813) >+++ Source/JavaScriptCore/llint/LowLevelInterpreter.asm (working copy) >@@ -247,6 +247,7 @@ const ArithProfileNumberNumber = constex > # Pointer Tags > const BytecodePtrTag = constexpr BytecodePtrTag > const JSEntryPtrTag = constexpr JSEntryPtrTag >+const JITTraceletPtrTag = constexpr JITTraceletPtrTag > const ExceptionHandlerPtrTag = constexpr ExceptionHandlerPtrTag > const NoPtrTag = constexpr NoPtrTag > const SlowPathPtrTag = constexpr SlowPathPtrTag >@@ -1632,6 +1633,28 @@ llintOp(op_loop_hint, OpLoopHint, macro > dispatch() > end) > >+llintOpWithMetadata(op_trace_hint, OpTraceHint, macro (size, get, dispatch, metadata, return) >+ metadata(t2, t0) >+ baddis 1, OpTraceHint::Metadata::m_count[t2], .continue >+ loadp OpTraceHint::Metadata::m_entrypoint[t2], t1 >+ btpz t1, .doCompile >+ jmp t1, JITTraceletPtrTag >+ >+.doCompile: >+ storei PC, ArgumentCount + TagOffset[cfr] >+ prepareStateForCCall() >+ move cfr, a0 >+ move PC, a1 >+ cCall2(_llint_trace_hint) >+ btpz r0, .recover >+ jmp r0, JITTraceletPtrTag >+.recover: >+ loadi ArgumentCount + TagOffset[cfr], PC >+ >+.continue: >+ dispatch() >+end) >+ > > llintOp(op_check_traps, OpCheckTraps, macro (unused, unused, dispatch) > loadp CodeBlock[cfr], t1 >Index: Source/JavaScriptCore/runtime/JSCPtrTag.h >=================================================================== >--- Source/JavaScriptCore/runtime/JSCPtrTag.h (revision 244813) >+++ Source/JavaScriptCore/runtime/JSCPtrTag.h (working copy) >@@ -39,6 +39,7 @@ using PtrTag = WTF::PtrTag; > v(ExceptionHandlerPtrTag) \ > v(ExecutableMemoryPtrTag) \ > v(JITThunkPtrTag) \ >+ v(JITTraceletPtrTag) \ > v(JITStubRoutinePtrTag) \ > v(JSEntryPtrTag) \ > v(JSInternalPtrTag) \ >Index: Source/JavaScriptCore/runtime/Options.h >=================================================================== >--- Source/JavaScriptCore/runtime/Options.h (revision 244813) >+++ Source/JavaScriptCore/runtime/Options.h (working copy) >@@ -336,6 +336,10 @@ constexpr bool enableWebAssemblyStreamin > \ > v(int32, thresholdForFTLOptimizeAfterWarmUp, 100000, Normal, nullptr) \ > v(int32, thresholdForFTLOptimizeSoon, 1000, Normal, nullptr) \ >+ v(int32, minimumTraceThreshold, 2, Normal, nullptr) \ >+ v(int32, maximumTraceThreshold, 20, Normal, nullptr) \ >+ v(int32, traceJITSoonThreshold, 40, Normal, nullptr) \ >+ v(double, traceThresholdMultiplier, 0.5, Normal, nullptr) \ > v(int32, ftlTierUpCounterIncrementForLoop, 1, Normal, nullptr) \ > v(int32, ftlTierUpCounterIncrementForReturn, 15, Normal, nullptr) \ > v(unsigned, ftlOSREntryFailureCountForReoptimization, 15, Normal, nullptr) \ >Index: Source/JavaScriptCore/runtime/ScriptExecutable.cpp >=================================================================== >--- Source/JavaScriptCore/runtime/ScriptExecutable.cpp (revision 244813) >+++ Source/JavaScriptCore/runtime/ScriptExecutable.cpp (working copy) >@@ -184,6 +184,11 @@ void ScriptExecutable::installCode(VM& v > break; > } > >+ //dataLogLn("Install code on executable: ", RawPointer(this), " m_jitCodeForConstruct=", RawPointer(m_jitCodeForConstruct.get()), " m_jitCodeForCall=", RawPointer(m_jitCodeForCall.get())); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); >+ > auto& clearableCodeSet = VM::SpaceAndSet::setFor(*subspace()); > if (hasClearableCode(vm)) > clearableCodeSet.add(this); >@@ -386,7 +391,7 @@ static void setupLLInt(CodeBlock* codeBl > static void setupJIT(VM& vm, CodeBlock* codeBlock) > { > #if ENABLE(JIT) >- CompilationResult result = JIT::compile(&vm, codeBlock, JITCompilationMustSucceed); >+ CompilationResult result = JIT::compileNow(&vm, codeBlock, JITCompilationMustSucceed, 0, false); > RELEASE_ASSERT(result == CompilationSuccessful); > #else > UNUSED_PARAM(vm);
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 196943
:
367712
|
367768
|
367960
|
367998
|
368007
|
368292
|
368299
|
368355
|
368357
|
368369
|
368379
|
368505
|
368525
|
368527
|
368640
|
368658
|
368660
|
368662
|
368664
|
368754
|
368767
|
369170
|
369453
|
369455
|
369515
|
369535
|
369581
|
369607
|
369770
|
369792
|
370097
|
370364