WebKit Bugzilla
Attachment 360907 Details for
Bug 194036
: [WebAssembly] Write a new register allocator for Air O0 and make BBQ use it
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
WIP
b-backup.diff (text/plain), 48.31 KB, created by
Saam Barati
on 2019-02-01 14:49:17 PST
(
hide
)
Description:
WIP
Filename:
MIME Type:
Creator:
Saam Barati
Created:
2019-02-01 14:49:17 PST
Size:
48.31 KB
patch
obsolete
>Index: PerformanceTests/JetStream2/JetStreamDriver.js >=================================================================== >--- PerformanceTests/JetStream2/JetStreamDriver.js (revision 240697) >+++ PerformanceTests/JetStream2/JetStreamDriver.js (working copy) >@@ -1538,7 +1538,7 @@ let runSeaMonster = true; > let runCodeLoad = true; > let runWasm = true; > >-if (false) { >+if (true) { > runOctane = false; > runARES = false; > runWSL = false; >@@ -1550,7 +1550,7 @@ if (false) { > runWorkerTests = false; > runSeaMonster = false; > runCodeLoad = false; >- runWasm = false; >+ //runWasm = false; > } > > if (typeof testList !== "undefined") { >Index: Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackByLinearScan.cpp >=================================================================== >--- Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackByLinearScan.cpp (revision 240697) >+++ Source/JavaScriptCore/b3/air/AirAllocateRegistersAndStackByLinearScan.cpp (working copy) >@@ -44,10 +44,22 @@ > #include <wtf/ListDump.h> > #include <wtf/Range.h> > >+#include "SuperSampler.h" >+ > namespace JSC { namespace B3 { namespace Air { > > namespace { > >+static size_t totalMemOps = 0; >+ >+ALWAYS_INLINE size_t rdtsc() >+{ >+ unsigned high; >+ unsigned low; >+ asm volatile ("rdtsc" : "=a"(low), "=d"(high)); >+ return low; >+} >+ > NO_RETURN_DUE_TO_CRASH NEVER_INLINE void crash() > { > CRASH(); >@@ -61,6 +73,353 @@ NO_RETURN_DUE_TO_CRASH NEVER_INLINE void > } \ > } while (0) > >+ >+class DumbRegAlloc { >+ struct TmpData { >+ StackSlot* spillSlot; >+ Reg reg; >+ }; >+ >+public: >+ NEVER_INLINE DumbRegAlloc(Code& code) >+ : m_code(code) >+ , m_map(code) >+ , m_currentAllocation(Reg::maxIndex() + 1) >+ { >+ RegisterSet allowedRegisters; >+ >+ m_code.forEachTmp([&] (Tmp tmp) { >+ RELEASE_ASSERT(!tmp.isReg()); >+ TmpData data; >+ data.spillSlot = m_code.addStackSlot(8, StackSlotKind::Spill); >+ data.reg = Reg(); >+ m_map[tmp] = data; >+ m_allTmps[tmp.bank()].append(tmp); >+ }); >+ >+ forEachBank([&] (Bank bank) { >+ m_registers[bank] = m_code.regsInPriorityOrder(bank); >+ for (Reg reg : m_registers[bank]) { >+ allowedRegisters.set(reg); >+ TmpData data; >+ data.spillSlot = m_code.addStackSlot(8, StackSlotKind::Spill); >+ data.reg = Reg(); >+ m_map[Tmp(reg)] = data; >+ m_allTmps[bank].append(Tmp(reg)); >+ } >+ }); >+ >+ UnifiedTmpLiveness liveness(m_code); >+ >+ TmpMap<size_t> liveRangeEnd(m_code, 0); // OOPS: How to handle named Tmps? >+ >+ { >+ size_t globalIndex = 0; >+ for (BasicBlock* block : m_code) { >+ for (Tmp tmp : liveness.liveAtHead(block)) { >+ if (tmp.isReg()) >+ continue; >+ liveRangeEnd[tmp] = std::max(globalIndex, liveRangeEnd[tmp]); >+ } >+ for (size_t instIndex = 0; instIndex < block->size(); ++instIndex) { >+ Inst& inst = block->at(instIndex); >+ inst.forEachTmpFast([&] (Tmp tmp) { >+ if (tmp.isReg()) >+ return; >+ liveRangeEnd[tmp] = std::max(globalIndex, liveRangeEnd[tmp]); >+ }); >+ ++globalIndex; >+ } >+ for (Tmp tmp : liveness.liveAtTail(block)) { >+ if (tmp.isReg()) >+ continue; >+ liveRangeEnd[tmp] = std::max(globalIndex, liveRangeEnd[tmp]); >+ } >+ } >+ } >+ >+ static double flushAtTail = 0.0; >+ static double totalFlush = 0.0; >+ >+ static size_t section = 0; >+ UNUSED_PARAM(section); >+ // forEachTmp = ~32654956 >+ // actual alloc loop = ~196500447 >+ // last actual alloc loop = 162908252 >+ // alloc lambda = 264039289 >+ // alloc spill = 139815925 >+ // insertionSet.insert = 204328737 >+ // entire reg alloc = 703087528 >+ >+ >+ size_t globalIndex = 0; >+ >+ //auto start = rdtsc(); >+ for (BasicBlock* block : m_code) { >+ >+ forEachBank([&] (Bank bank) { >+ m_availableRegs[bank] = RegisterSet(); >+ }); >+ >+ m_currentAllocation.clear(); >+ >+ forEachBank([&] (Bank bank) { >+ for (Tmp tmp : m_allTmps[bank]) >+ m_map[tmp].reg = Reg(); // Everything is spilled at block boundaries. >+ for (Reg reg : m_registers[bank]) >+ m_availableRegs[Tmp(reg).bank()].set(reg); >+ }); >+ >+ if (block == m_code[0]) { >+ for (Tmp tmp : liveness.liveAtHead(block)) { >+ if (!tmp.isReg()) >+ continue; >+ Reg reg = tmp.reg(); >+ m_map[tmp].reg = reg; >+ m_availableRegs[tmp.bank()].clear(reg); >+ m_currentAllocation[reg] = tmp; >+ } >+ } >+ >+ InsertionSet insertionSet(code); >+ //Vector<Tmp*, 8> tmpsToAlloc[numBanks]; >+ for (size_t instIndex = 0; instIndex < block->size(); ++instIndex) { >+ Inst& inst = block->at(instIndex); >+ >+ auto flush = [&] (Tmp tmp, bool atTail = false) { >+ if (atTail) >+ ++flushAtTail; >+ ++totalFlush; >+ if (Reg reg = m_map[tmp].reg) { >+ Opcode move = tmp.bank() == GP ? Move : MoveDouble; >+ //auto start = rdtsc(); >+ insertionSet.insert(instIndex, move, inst.origin, reg, Arg::stack(m_map[tmp].spillSlot)); >+ if (atTail) >+ ++totalMemOps; >+ //auto end = rdtsc(); >+ //section += end - start; >+ } >+ }; >+ >+ auto spill = [&] (Tmp tmp) { >+ if (Reg reg = m_map[tmp].reg) { >+ m_availableRegs[tmp.bank()].set(reg); >+ m_currentAllocation[reg] = Tmp(); >+ flush(tmp); >+ } else { >+ ASSERT(!m_currentAllocation[reg]); >+ } >+ >+ m_map[tmp].reg = Reg(); >+ }; >+ >+ // OOPS: This does dumb things for defs like loads a value right before we def! >+ auto alloc = [&] (Tmp tmp, Reg reg, bool isDef) { >+ if (Tmp occupyingTmp = m_currentAllocation[reg]) { >+ auto start = rdtsc(); >+ spill(occupyingTmp); >+ auto end = rdtsc(); >+ section += end - start; >+ } >+ >+ m_map[tmp].reg = reg; >+ m_availableRegs[tmp.bank()].clear(reg); >+ m_currentAllocation[reg] = tmp; >+ >+ if (!isDef) { >+ //auto start = rdtsc(); >+ Opcode move = tmp.bank() == GP ? Move : MoveDouble; >+ insertionSet.insert(instIndex, move, inst.origin, Arg::stack(m_map[tmp].spillSlot), reg); >+ ++totalMemOps; >+ //auto end = rdtsc(); >+ //section += end - start; >+ } >+ }; >+ >+ if (inst.isTerminal() && block->numSuccessors()) { >+ // We spill everything between block boundaries. >+ // >+ for (Tmp tmp : liveness.liveAtTail(block)) { >+ if (tmp.isReg() && !allowedRegisters.contains(tmp.reg())) >+ continue; >+ if (m_map[tmp].reg) >+ flush(tmp, true); >+ } >+ } >+ >+ for (size_t i = 0; i < m_currentAllocation.size(); ++i) { >+ Tmp tmp = m_currentAllocation[i]; >+ if (!tmp) >+ continue; >+ if (tmp.isReg()) >+ continue; >+ if (liveRangeEnd[tmp] >= globalIndex) >+ continue; >+ >+ Reg reg = Reg::fromIndex(i); >+ m_map[tmp].reg = Reg(); >+ m_availableRegs[tmp.bank()].set(reg); >+ m_currentAllocation[i] = Tmp(); >+ } >+ >+ RegisterSet namedUsedRegs; >+ RegisterSet namedDefdRegs; >+ RegisterSet clobberedRegs; >+ >+ //Vector<Tmp, 8> insnUses; >+ >+ //forEachBank([&] (Bank bank) { >+ // tmpsToAlloc[bank].resize(0); >+ //}); >+ >+ { >+ inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Bank, Width) { >+ //if (Arg::isAnyUse(role)) >+ // insnUses.append(tmp); >+ if (tmp.isReg()) { >+ if (!allowedRegisters.get(tmp.reg())) >+ return; >+ >+ if (Arg::isAnyUse(role)) >+ namedUsedRegs.set(tmp.reg()); >+ if (Arg::isAnyDef(role)) >+ namedDefdRegs.set(tmp.reg()); >+ return; >+ } >+ >+ //tmpsToAlloc[bank].append(&tmp); >+ }); >+ } >+ >+ if (inst.kind.opcode == Patch) >+ clobberedRegs.merge(inst.extraClobberedRegs()); >+ >+ if (Inst* nextInst = block->get(instIndex + 1)) { >+ if (nextInst->kind.opcode == Patch) >+ clobberedRegs.merge(nextInst->extraEarlyClobberedRegs()); >+ } >+ >+ clobberedRegs.filter(allowedRegisters); >+ >+ auto allocNamed = [&] (const RegisterSet& named, bool isDef) { >+ for (Reg reg : named) { >+ if (Tmp occupyingTmp = m_currentAllocation[reg]) { >+ // Something is in this register. >+ if (occupyingTmp == Tmp(reg)) >+ continue; >+ } >+ >+ alloc(Tmp(reg), reg, isDef); >+ } >+ }; >+ >+ allocNamed(namedUsedRegs, false); >+ allocNamed(namedDefdRegs, true); >+ allocNamed(clobberedRegs, true); >+ >+ { >+ auto handle = [&] (Tmp& tmp, Bank bank, bool isDef) { >+ if (Reg reg = m_map[tmp].reg) { >+ ASSERT(!namedDefdRegs.contains(reg)); >+ tmp = Tmp(reg); >+ namedUsedRegs.set(reg); >+ ASSERT(!m_availableRegs[bank].get(reg)); >+ return; >+ } >+ >+ if (m_availableRegs[bank].numberOfSetRegisters()) { >+ // We first try to take any available register. >+ for (Reg reg : m_registers[bank]) { >+ if (namedUsedRegs.contains(reg) || namedDefdRegs.contains(reg) || clobberedRegs.contains(reg)) >+ continue; >+ if (!m_availableRegs[bank].contains(reg)) >+ continue; >+ namedUsedRegs.set(reg); >+ alloc(tmp, reg, isDef); >+ tmp = Tmp(reg); >+ return; >+ } >+ RELEASE_ASSERT_NOT_REACHED(); >+ } >+ >+ // Nothing was available, let's make some room. >+ for (unsigned i = 0; i < m_registers[bank].size(); ++i) { >+ // OOPS: Super inefficient, but logically what we want. >+ Reg reg = m_registers[bank][i]; >+ if (namedUsedRegs.contains(reg) || namedDefdRegs.contains(reg) || clobberedRegs.contains(reg)) >+ continue; >+ >+ namedUsedRegs.set(reg); >+ >+ alloc(tmp, reg, isDef); >+ tmp = Tmp(reg); >+ //m_registers[bank].remove(i); >+ //m_registers[bank].append(reg); >+ return; >+ } >+ RELEASE_ASSERT_NOT_REACHED(); >+ }; >+ >+ inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Bank bank, Width) { >+ if (tmp.isReg()) >+ return; >+ if (Arg::isAnyUse(role)) { >+ bool isDef = false; >+ handle(tmp, bank, isDef); >+ // We do defs below since we may also use the same temp as we def it, and we need to be sure it's only a def and no use >+ return; >+ } >+ }); >+ >+ inst.forEachTmpFast([&] (Tmp& tmp) { >+ if (tmp.isReg()) >+ return; >+ // If we're not yet allocated, we must be a def. >+ bool isDef = true; >+ handle(tmp, tmp.bank(), isDef); >+ }); >+ } >+ >+ ++globalIndex; >+ } >+ >+ insertionSet.execute(block); >+ } >+ //auto end = rdtsc(); >+ //section += end - start; >+ >+ //dataLogLn("Flush at tail rate: ", flushAtTail/totalFlush); >+ >+ SuperSamplerScope superSamplerScope; >+ handleCalleeSaves(m_code); >+ allocateEscapedStackSlots(m_code); >+ >+ { >+ unsigned index = 0; >+ forEachBank([&] (Bank bank) { >+ for (Tmp tmp : m_allTmps[bank]) { >+ ptrdiff_t offset = -static_cast<ptrdiff_t>(m_code.frameSize()) - static_cast<ptrdiff_t>(index) * 8 - 8; >+ m_map[tmp].spillSlot->setOffsetFromFP(offset); >+ ++index; >+ } >+ }); >+ } >+ >+ updateFrameSizeBasedOnStackSlots(m_code); >+ m_code.setStackIsAllocated(true); >+ >+ //dataLogLn("rdtsc: ", section); >+ } >+ >+private: >+ Code& m_code; >+ Vector<Tmp> m_allTmps[numBanks]; >+ TmpMap<TmpData> m_map; >+ Vector<Reg> m_registers[numBanks]; >+ RegisterSet m_availableRegs[numBanks]; >+ IndexMap<Reg, Tmp> m_currentAllocation; // OOPS: Use an indexed mapping if possible. >+}; >+ > bool verbose() { return Options::airLinearScanVerbose(); } > > // Phase constants we use for the PhaseInsertionSet. >@@ -69,6 +428,7 @@ const unsigned secondPhase = 1; > > typedef Range<size_t> Interval; > >+ > struct TmpData { > void dump(PrintStream& out) const > { >@@ -551,10 +911,14 @@ private: > tmp = addSpillTmpWithInterval(bank, interval(indexOfEarly, Arg::timing(role))); > if (role == Arg::Scratch) > return; >- if (Arg::isAnyUse(role)) >+ if (Arg::isAnyUse(role)) { > m_insertionSets[block].insert(instIndex, secondPhase, move, inst.origin, Arg::stack(spilled), tmp); >- if (Arg::isAnyDef(role)) >+ ++totalMemOps; >+ } >+ if (Arg::isAnyDef(role)) { > m_insertionSets[block].insert(instIndex + 1, firstPhase, move, inst.origin, tmp, Arg::stack(spilled)); >+ ++totalMemOps; >+ } > }); > } > } >@@ -659,10 +1023,23 @@ void allocateRegistersAndStackByLinearSc > PhaseScope phaseScope(code, "allocateRegistersAndStackByLinearScan"); > if (verbose()) > dataLog("Air before linear scan:\n", code); >- LinearScan linearScan(code); >- linearScan.run(); >+ >+ //if (true || code.size() <= 1) { >+ // DumbRegAlloc regAlloc(code); >+ //} else { >+ LinearScan linearScan(code); >+ linearScan.run(); >+ //} >+ > if (verbose()) > dataLog("Air after linear scan:\n", code); >+ >+ // 719576 >+ // shitty = 476220 >+ // less shitty = 276981 >+ // linear scan = 13068 >+ // >+ //dataLogLn("Total mem ops: ", totalMemOps); > } > > } } } // namespace JSC::B3::Air >Index: Source/JavaScriptCore/b3/air/AirGenerate.cpp >=================================================================== >--- Source/JavaScriptCore/b3/air/AirGenerate.cpp (revision 240697) >+++ Source/JavaScriptCore/b3/air/AirGenerate.cpp (working copy) >@@ -36,6 +36,8 @@ > #include "AirFixObviousSpills.h" > #include "AirFixPartialRegisterStalls.h" > #include "AirGenerationContext.h" >+#include "AirHandleCalleeSaves.h" >+#include "AirLiveness.h" > #include "AirLogRegisterPressure.h" > #include "AirLowerAfterRegAlloc.h" > #include "AirLowerEntrySwitch.h" >@@ -45,6 +47,8 @@ > #include "AirOptimizeBlockOrder.h" > #include "AirReportUsedRegisters.h" > #include "AirSimplifyCFG.h" >+#include "AirStackAllocation.h" >+#include "AirTmpMap.h" > #include "AirValidate.h" > #include "B3Common.h" > #include "B3Procedure.h" >@@ -57,6 +61,14 @@ > > namespace JSC { namespace B3 { namespace Air { > >+ALWAYS_INLINE size_t rdtsc() >+{ >+ unsigned high; >+ unsigned low; >+ asm volatile ("rdtsc" : "=a"(low), "=d"(high)); >+ return low; >+} >+ > void prepareForGeneration(Code& code) > { > TimingScope timingScope("Air::prepareForGeneration"); >@@ -73,6 +85,36 @@ void prepareForGeneration(Code& code) > if (shouldValidateIR()) > validate(code); > >+ if (code.optLevel() == 0) { >+ lowerMacros(code); >+ >+ // We may still need to do post-allocation lowering. Doing it after both register and >+ // stack allocation is less optimal, but it works fine. >+ lowerAfterRegAlloc(code); >+ >+ // Actually create entrypoints. >+ lowerEntrySwitch(code); >+ >+ /* >+ // The control flow graph can be simplified further after we have lowered EntrySwitch. >+ simplifyCFG(code); >+ */ >+ >+ // This sorts the basic blocks in Code to achieve an ordering that maximizes the likelihood that a high >+ // frequency successor is also the fall-through target. >+ optimizeBlockOrder(code); >+ >+ if (shouldValidateIR()) >+ validate(code); >+ >+ if (shouldDumpIR(AirMode)) { >+ dataLog("Air after ", code.lastPhaseName(), ", before generation:\n"); >+ dataLog(code); >+ } >+ >+ return; >+ } >+ > simplifyCFG(code); > > lowerMacros(code); >@@ -161,7 +203,7 @@ void prepareForGeneration(Code& code) > } > } > >-void generate(Code& code, CCallHelpers& jit) >+NEVER_INLINE static void generateAlreadyAlloced(Code& code, CCallHelpers& jit) > { > TimingScope timingScope("Air::generate"); > >@@ -305,6 +347,532 @@ void generate(Code& code, CCallHelpers& > pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), Origin()); > } > >+NEVER_INLINE static void generateAndAllocateRegisters(Code& code, CCallHelpers& jit) >+{ >+ struct TmpData { >+ StackSlot* spillSlot; >+ Reg reg; >+ }; >+ >+ TimingScope timingScope("Air::generateAndAllocateRegisters"); >+ >+ DisallowMacroScratchRegisterUsage disallowScratch(jit); >+ >+ // And now, we generate code. >+ GenerationContext context; >+ context.code = &code; >+ context.blockLabels.resize(code.size()); >+ for (BasicBlock* block : code) { >+ if (block) >+ context.blockLabels[block] = Box<CCallHelpers::Label>::create(); >+ } >+ IndexMap<BasicBlock*, CCallHelpers::JumpList> blockJumps(code.size()); >+ >+ auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) { >+ if (context.blockLabels[target]->isSet()) { >+ jump.linkTo(*context.blockLabels[target], &jit); >+ return; >+ } >+ >+ blockJumps[target].append(jump); >+ }; >+ >+ /* >+ PCToOriginMap& pcToOriginMap = code.proc().pcToOriginMap(); >+ auto addItem = [&] (Inst& inst) { >+ if (!inst.origin) { >+ pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), Origin()); >+ return; >+ } >+ pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), inst.origin->origin()); >+ }; >+ */ >+ >+ Disassembler* disassembler = code.disassembler(); >+ >+ RegisterSet allowedRegisters; >+ >+ TmpMap<TmpData> m_map(code); >+ Vector<Tmp> m_allTmps[numBanks]; >+ Vector<Reg> m_registers[numBanks]; >+ RegisterSet m_availableRegs[numBanks]; >+ >+ >+ // Each Tmp gets its own stack slot. >+ code.forEachTmp([&] (Tmp tmp) { >+ RELEASE_ASSERT(!tmp.isReg()); >+ TmpData data; >+ data.spillSlot = code.addStackSlot(8, StackSlotKind::Spill); >+ data.reg = Reg(); >+ m_map[tmp] = data; >+ m_allTmps[tmp.bank()].append(tmp); >+ }); >+ >+ forEachBank([&] (Bank bank) { >+ m_registers[bank] = code.regsInPriorityOrder(bank); >+ for (Reg reg : m_registers[bank]) { >+ allowedRegisters.set(reg); >+ TmpData data; >+ data.spillSlot = code.addStackSlot(8, StackSlotKind::Spill); >+ data.reg = Reg(); >+ m_map[Tmp(reg)] = data; >+ m_allTmps[bank].append(Tmp(reg)); >+ } >+ }); >+ >+ { >+ handleCalleeSaves(code, RegisterSet::calleeSaveRegisters()); >+ allocateEscapedStackSlots(code); >+ unsigned index = 0; >+ forEachBank([&] (Bank bank) { >+ for (Tmp tmp : m_allTmps[bank]) { >+ ptrdiff_t offset = -static_cast<ptrdiff_t>(code.frameSize()) - static_cast<ptrdiff_t>(index) * 8 - 8; >+ m_map[tmp].spillSlot->setOffsetFromFP(offset); >+ ++index; >+ } >+ }); >+ updateFrameSizeBasedOnStackSlots(code); >+ code.setStackIsAllocated(true); >+ >+ lowerStackArgs(code); >+ } >+ >+ UnifiedTmpLiveness liveness(code); >+ >+ TmpMap<size_t> liveRangeEnd(code, 0); // OOPS: How to handle named Tmps? >+ >+ // Build live ranges. >+ { >+ size_t globalIndex = 0; >+ for (BasicBlock* block : code) { >+ for (Tmp tmp : liveness.liveAtHead(block)) { >+ if (tmp.isReg()) >+ continue; >+ liveRangeEnd[tmp] = std::max(globalIndex, liveRangeEnd[tmp]); >+ } >+ for (size_t instIndex = 0; instIndex < block->size(); ++instIndex) { >+ Inst& inst = block->at(instIndex); >+ inst.forEachTmpFast([&] (Tmp tmp) { >+ if (tmp.isReg()) >+ return; >+ liveRangeEnd[tmp] = std::max(globalIndex, liveRangeEnd[tmp]); >+ }); >+ ++globalIndex; >+ } >+ for (Tmp tmp : liveness.liveAtTail(block)) { >+ if (tmp.isReg()) >+ continue; >+ liveRangeEnd[tmp] = std::max(globalIndex, liveRangeEnd[tmp]); >+ } >+ } >+ } >+ >+ static double flushAtTail = 0.0; >+ static double totalFlush = 0.0; >+ >+ static size_t section = 0; >+ UNUSED_PARAM(section); >+ // forEachTmp = ~32654956 >+ // actual alloc loop = ~196500447 >+ // last actual alloc loop = 162908252 >+ // alloc lambda = 264039289 >+ // alloc spill = 139815925 >+ // insertionSet.insert = 204328737 >+ // entire reg alloc = 703087528 >+ >+ >+ size_t globalIndex = 0; >+ >+ static size_t totalMemOps = 0; >+ >+ IndexMap<BasicBlock*, IndexMap<Reg, Tmp>> currentAllocationMap(code.size()); >+ { >+ IndexMap<Reg, Tmp> defaultCurrentAllocation(Reg::maxIndex() + 1); >+ for (BasicBlock* block : code) { >+ if (block == code[0]) // Handled below. >+ continue; >+ currentAllocationMap[block] = defaultCurrentAllocation; >+ } >+ >+ for (Tmp tmp : liveness.liveAtHead(code[0])) { >+ if (!tmp.isReg()) >+ continue; >+ defaultCurrentAllocation[tmp.reg()] = tmp; >+ } >+ currentAllocationMap[code[0]] = defaultCurrentAllocation; >+ } >+ >+ //auto start = rdtsc(); >+ for (BasicBlock* block : code) { >+ context.currentBlock = block; >+ context.indexInBlock = UINT_MAX; >+ blockJumps[block].link(&jit); >+ CCallHelpers::Label label = jit.label(); >+ *context.blockLabels[block] = label; >+ >+ if (disassembler) >+ disassembler->startBlock(block, jit); >+ >+ if (Optional<unsigned> entrypointIndex = code.entrypointIndex(block)) { >+ ASSERT(code.isEntrypoint(block)); >+ >+ if (disassembler) >+ disassembler->startEntrypoint(jit); >+ >+ code.prologueGeneratorForEntrypoint(*entrypointIndex)->run(jit, code); >+ >+ if (disassembler) >+ disassembler->endEntrypoint(jit); >+ } else >+ ASSERT(!code.isEntrypoint(block)); >+ >+ >+ forEachBank([&] (Bank bank) { >+ m_availableRegs[bank] = RegisterSet(); >+ }); >+ >+ forEachBank([&] (Bank bank) { >+ if (!ASSERT_DISABLED) { >+ // Everything is spilled at block boundaries. We do this after we process each block so we don't >+ // have to walk all Tmps, since #Tmps >> #Available regs. Instead, we walk the register file at >+ // each block boundary and clear entries in this map. >+ for (Tmp tmp : m_allTmps[bank]) >+ RELEASE_ASSERT(m_map[tmp].reg == Reg()); >+ } >+ for (Reg reg : m_registers[bank]) >+ m_availableRegs[Tmp(reg).bank()].set(reg); >+ }); >+ >+ IndexMap<Reg, Tmp>& currentAllocation = currentAllocationMap[block]; >+ for (unsigned i = 0; i < currentAllocation.size(); ++i) { >+ Tmp tmp = currentAllocation[i]; >+ if (!tmp) >+ continue; >+ Reg reg = Reg::fromIndex(i); >+ m_map[tmp].reg = reg; >+ m_availableRegs[tmp.bank()].clear(reg); >+ } >+ >+ for (size_t instIndex = 0; instIndex < block->size(); ++instIndex) { >+ context.indexInBlock = instIndex; >+ Inst& inst = block->at(instIndex); >+ // OOPS: deal w/ addItem >+ >+ auto startLabel = jit.labelIgnoringWatchpoints(); >+ >+ /* >+ auto stackAddr = [&] (StackSlot* slot) { >+ Arg result = Arg::addr(Air::Tmp(GPRInfo::callFrameRegister), slot->offsetFromFP()); >+ RELEASE_ASSERT(result.isValidForm(Width64)); // OOPS: not correct on arm64. >+ return result; >+ }; >+ */ >+ >+ auto flush = [&] (Tmp tmp, bool atTail = false) { >+ if (atTail) >+ ++flushAtTail; >+ ++totalFlush; >+ if (Reg reg = m_map[tmp].reg) { >+ // OOPS: scratch reg on arm issue w/ offset size being too big. >+ ptrdiff_t offset = m_map[tmp].spillSlot->offsetFromFP(); >+ if (tmp.bank() == GP) { >+ jit.store64(reg.gpr(), CCallHelpers::Address(GPRInfo::callFrameRegister, offset)); >+ } else { >+ jit.storeDouble(reg.fpr(), CCallHelpers::Address(GPRInfo::callFrameRegister, offset)); >+ } >+ ++totalMemOps; >+ } >+ }; >+ >+ auto spill = [&] (Tmp tmp) { >+ if (Reg reg = m_map[tmp].reg) { >+ m_availableRegs[tmp.bank()].set(reg); >+ currentAllocation[reg] = Tmp(); >+ flush(tmp); >+ } else { >+ ASSERT(!currentAllocation[reg]); >+ } >+ >+ m_map[tmp].reg = Reg(); >+ }; >+ >+ // OOPS: This does dumb things for defs like loads a value right before we def! >+ auto alloc = [&] (Tmp tmp, Reg reg, bool isDef) { >+ if (Tmp occupyingTmp = currentAllocation[reg]) { >+ auto start = rdtsc(); >+ spill(occupyingTmp); >+ auto end = rdtsc(); >+ section += end - start; >+ } >+ >+ m_map[tmp].reg = reg; >+ m_availableRegs[tmp.bank()].clear(reg); >+ currentAllocation[reg] = tmp; >+ >+ if (!isDef) { >+ //auto start = rdtsc(); >+ ptrdiff_t offset = m_map[tmp].spillSlot->offsetFromFP(); >+ if (tmp.bank() == GP) { >+ jit.load64(CCallHelpers::Address(GPRInfo::callFrameRegister, offset), reg.gpr()); >+ } else { >+ jit.loadDouble(CCallHelpers::Address(GPRInfo::callFrameRegister, offset), reg.fpr()); >+ } >+ >+ ++totalMemOps; >+ //auto end = rdtsc(); >+ //section += end - start; >+ } >+ }; >+ >+ for (size_t i = 0; i < currentAllocation.size(); ++i) { >+ Tmp tmp = currentAllocation[i]; >+ if (!tmp) >+ continue; >+ if (tmp.isReg()) >+ continue; >+ if (liveRangeEnd[tmp] >= globalIndex) >+ continue; >+ >+ Reg reg = Reg::fromIndex(i); >+ m_map[tmp].reg = Reg(); >+ m_availableRegs[tmp.bank()].set(reg); >+ currentAllocation[i] = Tmp(); >+ } >+ >+ RegisterSet namedUsedRegs; >+ RegisterSet namedDefdRegs; >+ RegisterSet clobberedRegs; >+ >+ inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Bank, Width) { >+ if (tmp.isReg()) { >+ if (!allowedRegisters.get(tmp.reg())) >+ return; >+ >+ if (Arg::isAnyUse(role)) >+ namedUsedRegs.set(tmp.reg()); >+ if (Arg::isAnyDef(role)) >+ namedDefdRegs.set(tmp.reg()); >+ >+ return; >+ } >+ }); >+ >+ if (inst.kind.opcode == Patch) >+ clobberedRegs.merge(inst.extraClobberedRegs()); >+ >+ if (Inst* nextInst = block->get(instIndex + 1)) { >+ if (nextInst->kind.opcode == Patch) >+ clobberedRegs.merge(nextInst->extraEarlyClobberedRegs()); >+ } >+ >+ clobberedRegs.filter(allowedRegisters); >+ >+ auto allocNamed = [&] (const RegisterSet& named, bool isDef) { >+ for (Reg reg : named) { >+ if (Tmp occupyingTmp = currentAllocation[reg]) { >+ // Something is in this register. >+ if (occupyingTmp == Tmp(reg)) >+ continue; >+ } >+ >+ alloc(Tmp(reg), reg, isDef); >+ } >+ }; >+ >+ allocNamed(namedUsedRegs, false); // Must come before the defd registers since we may use and def the same register. >+ allocNamed(namedDefdRegs, true); >+ allocNamed(clobberedRegs, true); >+ >+ { >+ auto handle = [&] (Tmp& tmp, Bank bank, bool isDef) { >+ if (Reg reg = m_map[tmp].reg) { >+ ASSERT(!namedDefdRegs.contains(reg)); >+ tmp = Tmp(reg); >+ namedUsedRegs.set(reg); >+ ASSERT(!m_availableRegs[bank].get(reg)); >+ return; >+ } >+ >+ if (m_availableRegs[bank].numberOfSetRegisters()) { >+ // We first take an available register. >+ for (Reg reg : m_registers[bank]) { >+ if (namedUsedRegs.contains(reg) || namedDefdRegs.contains(reg) || clobberedRegs.contains(reg)) >+ continue; >+ if (!m_availableRegs[bank].contains(reg)) >+ continue; >+ namedUsedRegs.set(reg); >+ alloc(tmp, reg, isDef); >+ tmp = Tmp(reg); >+ return; >+ } >+ >+ RELEASE_ASSERT_NOT_REACHED(); >+ } >+ >+ // Nothing was available, let's make some room. >+ for (unsigned i = 0; i < m_registers[bank].size(); ++i) { >+ // OOPS: Super inefficient, but logically what we want. >+ Reg reg = m_registers[bank][i]; >+ if (namedUsedRegs.contains(reg) || namedDefdRegs.contains(reg) || clobberedRegs.contains(reg)) >+ continue; >+ >+ namedUsedRegs.set(reg); >+ >+ alloc(tmp, reg, isDef); >+ tmp = Tmp(reg); >+ return; >+ } >+ >+ RELEASE_ASSERT_NOT_REACHED(); >+ }; >+ >+ inst.forEachTmp([&] (Tmp& tmp, Arg::Role role, Bank bank, Width) { >+ if (tmp.isReg()) >+ return; >+ if (Arg::isAnyUse(role)) { >+ // We do defs below since we may also use the same temp as we def it, and we need to be sure it's only a def and no use >+ bool isDef = false; >+ handle(tmp, bank, isDef); >+ } >+ }); >+ >+ inst.forEachTmpFast([&] (Tmp& tmp) { >+ if (tmp.isReg()) >+ return; >+ // If we're not yet allocated, we must be a def since we handled uses above. >+ bool isDef = true; >+ handle(tmp, tmp.bank(), isDef); >+ }); >+ } >+ >+ // OOPS: Assert not terminal w/ def. If this isn't true, the below >+ // isn't our current regalloc state. If it is true, then this is our >+ // reg alloc state... >+ if (inst.isTerminal() && block->numSuccessors()) { >+ // We spill everything between block boundaries. >+ >+ bool goodToGo = true; >+ for (unsigned i = 0; i < block->numSuccessors(); ++i) { >+ BasicBlock* succ = block->successorBlock(i); >+ goodToGo &= succ->numPredecessors() == 1 && !context.blockLabels[succ]->isSet(); >+ } >+ if (goodToGo) { >+ for (unsigned i = 0; i < block->numSuccessors(); ++i) { >+ BasicBlock* succ = block->successorBlock(i); >+ currentAllocationMap[succ] = currentAllocation; >+ } >+ } else { >+ for (Tmp tmp : liveness.liveAtTail(block)) { >+ if (tmp.isReg() && !allowedRegisters.contains(tmp.reg())) >+ continue; >+ if (m_map[tmp].reg) { >+ flush(tmp, true); >+ } >+ } >+ } >+ } >+ >+ >+ if (!inst.isTerminal()) { >+ CCallHelpers::Jump jump = inst.generate(jit, context); >+ ASSERT_UNUSED(jump, !jump.isSet()); >+ // OOPS: figure this out for terminals >+ auto endLabel = jit.labelIgnoringWatchpoints(); >+ if (disassembler) >+ disassembler->addInst(&inst, startLabel, endLabel); >+ } else { >+ bool needsToGenerate = true; >+ if (inst.kind.opcode == Jump && block->successorBlock(0) == code.findNextBlock(block)) >+ needsToGenerate = false; >+ >+ if (isReturn(inst.kind.opcode)) { >+ needsToGenerate = false; >+ >+ // We currently don't represent the full prologue/epilogue in Air, so we need to >+ // have this override. >+ auto start = jit.labelIgnoringWatchpoints(); >+ if (code.frameSize()) { >+ jit.emitRestore(code.calleeSaveRegisterAtOffsetList()); >+ jit.emitFunctionEpilogue(); >+ } else >+ jit.emitFunctionEpilogueWithEmptyFrame(); >+ jit.ret(); >+ auto end = jit.labelIgnoringWatchpoints(); >+ if (disassembler) >+ disassembler->addInst(&block->last(), start, end); >+ } >+ >+ if (needsToGenerate) { >+ CCallHelpers::Jump jump = block->last().generate(jit, context); >+ >+ // The jump won't be set for patchpoints. It won't be set for Oops because then it won't have >+ // any successors. >+ if (jump.isSet()) { >+ switch (block->numSuccessors()) { >+ case 1: >+ link(jump, block->successorBlock(0)); >+ break; >+ case 2: >+ link(jump, block->successorBlock(0)); >+ if (block->successorBlock(1) != code.findNextBlock(block)) >+ link(jit.jump(), block->successorBlock(1)); >+ break; >+ default: >+ RELEASE_ASSERT_NOT_REACHED(); >+ break; >+ } >+ } >+ } >+ } >+ >+ >+ ++globalIndex; >+ } >+ >+ // Registers usually get spilled at block boundaries. We do it this way since we don't >+ // want to iterate the entire TmpMap, since #Tmps >> #Regs. We may not actually spill >+ // all registers, but at the top of this loop we handle that case by prepulating register >+ // state. Here, we just clear this map. After this loop, this map should contain only >+ // null entries. >+ for (size_t i = 0; i < currentAllocation.size(); ++i) { >+ if (Tmp tmp = currentAllocation[i]) >+ m_map[tmp].reg = Reg(); >+ } >+ } >+ >+ context.currentBlock = nullptr; >+ context.indexInBlock = UINT_MAX; >+ >+ Vector<CCallHelpers::Label> entrypointLabels(code.numEntrypoints()); >+ for (unsigned i = code.numEntrypoints(); i--;) >+ entrypointLabels[i] = *context.blockLabels[code.entrypoint(i).block()]; >+ code.setEntrypointLabels(WTFMove(entrypointLabels)); >+ >+ //pcToOriginMap.appendItem(jit.label(), Origin()); >+ // FIXME: Make late paths have Origins: https://bugs.webkit.org/show_bug.cgi?id=153689 >+ if (disassembler) >+ disassembler->startLatePath(jit); >+ >+ for (auto& latePath : context.latePaths) >+ latePath->run(jit, context); >+ >+ if (disassembler) >+ disassembler->endLatePath(jit); >+ //pcToOriginMap.appendItem(jit.labelIgnoringWatchpoints(), Origin()); >+ >+ //auto end = rdtsc(); >+ //section += end - start; >+ //dataLogLn("Flush at tail rate: ", flushAtTail/totalFlush); >+} >+ >+void generate(Code& code, CCallHelpers& jit) >+{ >+ if (code.optLevel() > 0) >+ generateAlreadyAlloced(code, jit); >+ else >+ generateAndAllocateRegisters(code, jit); >+} >+ > } } } // namespace JSC::B3::Air > > #endif // ENABLE(B3_JIT) >Index: Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp >=================================================================== >--- Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp (revision 240697) >+++ Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.cpp (working copy) >@@ -50,7 +50,12 @@ void handleCalleeSaves(Code& code) > } > } > >- // Now we filter to really get the callee saves. >+ handleCalleeSaves(code, WTFMove(usedCalleeSaves)); >+} >+ >+void handleCalleeSaves(Code& code, RegisterSet usedCalleeSaves) >+{ >+ // We filter to really get the callee saves. > usedCalleeSaves.filter(RegisterSet::calleeSaveRegisters()); > usedCalleeSaves.filter(code.mutableRegs()); > usedCalleeSaves.exclude(RegisterSet::stackRegisters()); // We don't need to save FP here. >Index: Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h >=================================================================== >--- Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h (revision 240697) >+++ Source/JavaScriptCore/b3/air/AirHandleCalleeSaves.h (working copy) >@@ -41,6 +41,7 @@ class Code; > // We should make this interact with the client: https://bugs.webkit.org/show_bug.cgi?id=150459 > > void handleCalleeSaves(Code&); >+void handleCalleeSaves(Code&, RegisterSet); > > } } } // namespace JSC::B3::Air > >Index: Source/JavaScriptCore/wasm/WasmAirIRGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/wasm/WasmAirIRGenerator.cpp (revision 240752) >+++ Source/JavaScriptCore/wasm/WasmAirIRGenerator.cpp (working copy) >@@ -284,6 +284,17 @@ public: > return result; > } > >+ ALWAYS_INLINE void didDie(const ExpressionType& typedTmp) >+ { >+ Tmp tmp = typedTmp.tmp(); >+ if (!tmp) >+ return; >+ if (tmp.isGP()) >+ m_freeGPs.append(tmp); >+ else >+ m_freeFPs.append(tmp); >+ } >+ > private: > ALWAYS_INLINE void validateInst(Inst& inst) > { >@@ -323,6 +334,16 @@ private: > > Tmp newTmp(B3::Bank bank) > { >+ switch (bank) { >+ case B3::GP: >+ if (m_freeGPs.size()) >+ return m_freeGPs.takeLast(); >+ break; >+ case B3::FP: >+ if (m_freeFPs.size()) >+ return m_freeFPs.takeLast(); >+ break; >+ } > return m_code.newTmp(bank); > } > >@@ -573,6 +594,9 @@ private: > GPRReg m_wasmContextInstanceGPR { InvalidGPRReg }; > bool m_makesCalls { false }; > >+ Vector<Tmp, 8> m_freeGPs; >+ Vector<Tmp, 8> m_freeFPs; >+ > TypedTmp m_instanceValue; // Always use the accessor below to ensure the instance value is materialized when used. > bool m_usesInstanceValue { false }; > TypedTmp instanceValue() >@@ -1895,6 +1919,9 @@ Expected<std::unique_ptr<InternalFunctio > // optLevel=1. > procedure.setNeedsUsedRegisters(false); > >+ //procedure.setOptLevel(compilationMode == CompilationMode::BBQMode >+ // ? Options::webAssemblyBBQOptimizationLevel() >+ // : Options::webAssemblyOMGOptimizationLevel()); > procedure.setOptLevel(compilationMode == CompilationMode::BBQMode > ? Options::webAssemblyBBQOptimizationLevel() > : Options::webAssemblyOMGOptimizationLevel()); >Index: Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp (revision 240752) >+++ Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp (working copy) >@@ -230,6 +230,8 @@ public: > Value* constant(B3::Type, uint64_t bits, Optional<Origin> = WTF::nullopt); > void insertConstants(); > >+ ALWAYS_INLINE void didDie(ExpressionType) { } >+ > private: > void emitExceptionCheck(CCallHelpers&, ExceptionType); > >Index: Source/JavaScriptCore/wasm/WasmBBQPlan.h >=================================================================== >--- Source/JavaScriptCore/wasm/WasmBBQPlan.h (revision 240697) >+++ Source/JavaScriptCore/wasm/WasmBBQPlan.h (working copy) >@@ -147,6 +147,7 @@ private: > HashMap<uint32_t, std::unique_ptr<InternalFunction>, typename DefaultHash<uint32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<uint32_t>> m_embedderToWasmInternalFunctions; > Vector<CompilationContext> m_compilationContexts; > Vector<TierUpCount> m_tierUpCounts; >+ Vector<std::pair<size_t, unsigned>> m_sortedFunctions; > > Vector<Vector<UnlinkedWasmToWasmCall>> m_unlinkedWasmToWasmCalls; > State m_state; >Index: Source/JavaScriptCore/wasm/WasmFunctionParser.h >=================================================================== >--- Source/JavaScriptCore/wasm/WasmFunctionParser.h (revision 240752) >+++ Source/JavaScriptCore/wasm/WasmFunctionParser.h (working copy) >@@ -168,6 +168,8 @@ auto FunctionParser<Context>::binaryCase > WASM_TRY_POP_EXPRESSION_STACK_INTO(right, "binary right"); > WASM_TRY_POP_EXPRESSION_STACK_INTO(left, "binary left"); > WASM_TRY_ADD_TO_CONTEXT(template addOp<op>(left, right, result)); >+ m_context.didDie(left); >+ m_context.didDie(right); > > m_expressionStack.append(result); > return { }; >@@ -182,6 +184,7 @@ auto FunctionParser<Context>::unaryCase( > > WASM_TRY_POP_EXPRESSION_STACK_INTO(value, "unary"); > WASM_TRY_ADD_TO_CONTEXT(template addOp<op>(value, result)); >+ m_context.didDie(value); > > m_expressionStack.append(result); > return { }; >@@ -211,6 +214,10 @@ auto FunctionParser<Context>::parseExpre > ExpressionType result; > WASM_TRY_ADD_TO_CONTEXT(addSelect(condition, nonZero, zero, result)); > >+ m_context.didDie(condition); >+ m_context.didDie(zero); >+ m_context.didDie(nonZero); >+ > m_expressionStack.append(result); > return { }; > } >@@ -226,6 +233,7 @@ auto FunctionParser<Context>::parseExpre > WASM_PARSER_FAIL_IF(!parseVarUInt32(offset), "can't get load offset"); > WASM_TRY_POP_EXPRESSION_STACK_INTO(pointer, "load pointer"); > WASM_TRY_ADD_TO_CONTEXT(load(static_cast<LoadOpType>(m_currentOpcode), pointer, result, offset)); >+ m_context.didDie(pointer); > m_expressionStack.append(result); > return { }; > } >@@ -241,6 +249,8 @@ auto FunctionParser<Context>::parseExpre > WASM_TRY_POP_EXPRESSION_STACK_INTO(value, "store value"); > WASM_TRY_POP_EXPRESSION_STACK_INTO(pointer, "store pointer"); > WASM_TRY_ADD_TO_CONTEXT(store(static_cast<StoreOpType>(m_currentOpcode), pointer, value, offset)); >+ m_context.didDie(value); >+ m_context.didDie(pointer); > return { }; > } > #undef CREATE_CASE >@@ -288,6 +298,7 @@ auto FunctionParser<Context>::parseExpre > WASM_PARSER_FAIL_IF(!parseVarUInt32(index), "can't get index for set_local"); > WASM_TRY_POP_EXPRESSION_STACK_INTO(value, "set_local"); > WASM_TRY_ADD_TO_CONTEXT(setLocal(index, value)); >+ m_context.didDie(value); > return { }; > } > >@@ -314,6 +325,7 @@ auto FunctionParser<Context>::parseExpre > WASM_PARSER_FAIL_IF(!parseVarUInt32(index), "can't get set_global's index"); > WASM_TRY_POP_EXPRESSION_STACK_INTO(value, "set_global value"); > WASM_TRY_ADD_TO_CONTEXT(setGlobal(index, value)); >+ m_context.didDie(value); > return { }; > } > >@@ -396,6 +408,7 @@ auto FunctionParser<Context>::parseExpre > WASM_TRY_ADD_TO_CONTEXT(addIf(condition, inlineSignature, control)); > m_controlStack.append({ WTFMove(m_expressionStack), control }); > m_expressionStack = ExpressionList(); >+ m_context.didDie(condition); > return { }; > } > >@@ -420,6 +433,9 @@ auto FunctionParser<Context>::parseExpre > ControlType& data = m_controlStack[m_controlStack.size() - 1 - target].controlData; > > WASM_TRY_ADD_TO_CONTEXT(addBranch(data, condition, m_expressionStack)); >+ >+ m_context.didDie(condition); >+ > return { }; > } > >@@ -446,6 +462,8 @@ auto FunctionParser<Context>::parseExpre > WASM_TRY_POP_EXPRESSION_STACK_INTO(condition, "br_table condition"); > WASM_TRY_ADD_TO_CONTEXT(addSwitch(condition, targets, m_controlStack[m_controlStack.size() - 1 - defaultTarget].controlData, m_expressionStack)); > >+ m_context.didDie(condition); >+ > m_unreachableBlocks = 1; > return { }; > } >@@ -503,6 +521,8 @@ auto FunctionParser<Context>::parseExpre > WASM_TRY_ADD_TO_CONTEXT(addGrowMemory(delta, result)); > m_expressionStack.append(result); > >+ m_context.didDie(delta); >+ > return { }; > } > >Index: Source/JavaScriptCore/wasm/WasmValidate.cpp >=================================================================== >--- Source/JavaScriptCore/wasm/WasmValidate.cpp (revision 240752) >+++ Source/JavaScriptCore/wasm/WasmValidate.cpp (working copy) >@@ -141,6 +141,8 @@ public: > Result WARN_UNUSED_RETURN addCall(unsigned calleeIndex, const Signature&, const Vector<ExpressionType>& args, ExpressionType& result); > Result WARN_UNUSED_RETURN addCallIndirect(const Signature&, const Vector<ExpressionType>& args, ExpressionType& result); > >+ ALWAYS_INLINE void didDie(ExpressionType) { } >+ > bool hasMemory() const { return !!m_module.memory; } > > Validate(const ModuleInformation& module)
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 194036
:
360608
|
360647
|
360667
|
360907
|
360947
|
360948
|
361270
|
361481
|
361488
|
361496
|
361497
|
361716
|
361742
|
361989
|
361990
|
361991
|
362002
|
362055
|
362098