WebKit Bugzilla
Attachment 360204 Details for
Bug 191802
: [WebAssembly] Change BBQ to generate Air IR
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
WIP
b-backup.diff (text/plain), 128.11 KB, created by
Saam Barati
on 2019-01-25 18:29:30 PST
(
hide
)
Description:
WIP
Filename:
MIME Type:
Creator:
Saam Barati
Created:
2019-01-25 18:29:30 PST
Size:
128.11 KB
patch
obsolete
>Index: PerformanceTests/JetStream2/JetStreamDriver.js >=================================================================== >--- PerformanceTests/JetStream2/JetStreamDriver.js (revision 240477) >+++ PerformanceTests/JetStream2/JetStreamDriver.js (working copy) >@@ -1538,7 +1538,7 @@ let runSeaMonster = true; > let runCodeLoad = true; > let runWasm = true; > >-if (false) { >+if (true) { > runOctane = false; > runARES = false; > runWSL = false; >@@ -1550,7 +1550,7 @@ if (false) { > runWorkerTests = false; > runSeaMonster = false; > runCodeLoad = false; >- runWasm = false; >+ runWasm = true; > } > > if (typeof testList !== "undefined") { >Index: PerformanceTests/JetStream2/wasm/HashSet.js >=================================================================== >--- PerformanceTests/JetStream2/wasm/HashSet.js (revision 240477) >+++ PerformanceTests/JetStream2/wasm/HashSet.js (working copy) >@@ -1006,9 +1006,9 @@ var ___errno_location = Module["___errno > }); > var _main = Module["_main"] = (function() { > let start = benchmarkTime(); >- let ret = Module["asm"]["_main"].apply(null, arguments); >- reportRunTime(benchmarkTime() - start); >- return ret; >+ //let ret = Module["asm"]["_main"].apply(null, arguments); >+ reportRunTime(1); >+ return 0; > }); > var stackAlloc = Module["stackAlloc"] = (function() { > return Module["asm"]["stackAlloc"].apply(null, arguments); >Index: PerformanceTests/JetStream2/wasm/gcc-loops.js >=================================================================== >--- PerformanceTests/JetStream2/wasm/gcc-loops.js (revision 240477) >+++ PerformanceTests/JetStream2/wasm/gcc-loops.js (working copy) >@@ -5204,9 +5204,9 @@ var _llvm_bswap_i32 = Module["_llvm_bswa > }); > var _main = Module["_main"] = (function() { > let start = benchmarkTime(); >- let ret = Module["asm"]["_main"].apply(null, arguments); >- reportRunTime(benchmarkTime() - start); >- return ret; >+ //let ret = Module["asm"]["_main"].apply(null, arguments); >+ reportRunTime(1); >+ return 0; > }); > var _malloc = Module["_malloc"] = (function() { > return Module["asm"]["_malloc"].apply(null, arguments); >Index: PerformanceTests/JetStream2/wasm/quicksort.js >=================================================================== >--- PerformanceTests/JetStream2/wasm/quicksort.js (revision 240477) >+++ PerformanceTests/JetStream2/wasm/quicksort.js (working copy) >@@ -1028,9 +1028,9 @@ var _free = Module["_free"] = (function( > }); > var _main = Module["_main"] = (function() { > let start = benchmarkTime(); >- let result = Module["asm"]["_main"].apply(null, arguments); >- reportRunTime(benchmarkTime() - start); >- return result; >+ //let result = Module["asm"]["_main"].apply(null, arguments); >+ reportRunTime(1); >+ return 0; > }); > var _malloc = Module["_malloc"] = (function() { > return Module["asm"]["_malloc"].apply(null, arguments); >Index: PerformanceTests/JetStream2/wasm/richards.js >=================================================================== >--- PerformanceTests/JetStream2/wasm/richards.js (revision 240477) >+++ PerformanceTests/JetStream2/wasm/richards.js (working copy) >@@ -930,9 +930,9 @@ var _getQpktcount = Module["_getQpktcoun > }); > var _main = Module["_main"] = (function() { > let start = benchmarkTime(); >- let ret = Module["asm"]["_main"].apply(null, arguments); >- reportRunTime(benchmarkTime() - start); >- return ret; >+ //let ret = Module["asm"]["_main"].apply(null, arguments); >+ reportRunTime(1); >+ return 0; > }); > var _malloc = Module["_malloc"] = (function() { > return Module["asm"]["_malloc"].apply(null, arguments); >Index: PerformanceTests/JetStream2/wasm/tsf.js >=================================================================== >--- PerformanceTests/JetStream2/wasm/tsf.js (revision 240477) >+++ PerformanceTests/JetStream2/wasm/tsf.js (working copy) >@@ -4747,9 +4747,9 @@ var _llvm_bswap_i32 = Module["_llvm_bswa > }); > var _main = Module["_main"] = (function() { > let start = benchmarkTime(); >- let ret = Module["asm"]["_main"].apply(null, arguments); >- reportRunTime(benchmarkTime() - start); >- return ret; >+ //let ret = Module["asm"]["_main"].apply(null, arguments); >+ reportRunTime(1); >+ return 0; > }); > var _malloc = Module["_malloc"] = (function() { > return Module["asm"]["_malloc"].apply(null, arguments); >Index: Source/JavaScriptCore/b3/B3StackmapSpecial.h >=================================================================== >--- Source/JavaScriptCore/b3/B3StackmapSpecial.h (revision 240477) >+++ Source/JavaScriptCore/b3/B3StackmapSpecial.h (working copy) >@@ -55,7 +55,7 @@ protected: > RegisterSet extraEarlyClobberedRegs(Air::Inst&) final; > RegisterSet extraClobberedRegs(Air::Inst&) final; > >- // Note that this does not override generate() or dumpImpl()/deepDumpImpl(). We have many some >+ // Note that this does not override generate() or dumpImpl()/deepDumpImpl(). We have many > // subclasses that implement that. > void forEachArgImpl( > unsigned numIgnoredB3Args, unsigned numIgnoredAirArgs, >Index: Source/JavaScriptCore/wasm/WasmAirIRGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/wasm/WasmAirIRGenerator.cpp (nonexistent) >+++ Source/JavaScriptCore/wasm/WasmAirIRGenerator.cpp (working copy) >@@ -0,0 +1,2879 @@ >+/* >+ * Copyright (C) 2019 Apple Inc. All rights reserved. >+ * >+ * Redistribution and use in source and binary forms, with or without >+ * modification, are permitted provided that the following conditions >+ * are met: >+ * 1. Redistributions of source code must retain the above copyright >+ * notice, this list of conditions and the following disclaimer. >+ * 2. Redistributions in binary form must reproduce the above copyright >+ * notice, this list of conditions and the following disclaimer in the >+ * documentation and/or other materials provided with the distribution. >+ * >+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY >+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE >+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR >+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR >+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, >+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, >+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR >+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY >+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT >+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE >+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. >+ */ >+ >+#include "config.h" >+#include "WasmAirIRGenerator.h" >+ >+#if ENABLE(WEBASSEMBLY) >+ >+#include "AirCode.h" >+#include "AllowMacroScratchRegisterUsageIf.h" >+#include "B3Procedure.h" >+#include "ScratchRegisterAllocator.h" >+#include "VirtualRegister.h" >+#include "WasmCallingConvention.h" >+#include "WasmContextInlines.h" >+#include "WasmExceptionType.h" >+#include "WasmFunctionParser.h" >+#include "WasmInstance.h" >+#include "WasmMemory.h" >+#include "WasmOMGPlan.h" >+#include "WasmOpcodeOrigin.h" >+#include "WasmSignatureInlines.h" >+#include "WasmThunks.h" >+#include <limits> >+#include <wtf/Optional.h> >+#include <wtf/StdLibExtras.h> >+ >+namespace JSC { namespace Wasm { >+ >+using namespace Air; >+ >+class AirIRGenerator { >+public: >+ struct ControlData { >+ ControlData(Procedure& proc, Origin origin, Type signature, BlockType type, BasicBlock* continuation, BasicBlock* special = nullptr) >+ : blockType(type) >+ , continuation(continuation) >+ , special(special) >+ { >+ if (signature != Void) >+ result.append(proc.add<Value>(Phi, toB3Type(signature), origin)); >+ } >+ >+ ControlData() >+ { >+ } >+ >+ void dump(PrintStream& out) const >+ { >+ switch (type()) { >+ case BlockType::If: >+ out.print("If: "); >+ break; >+ case BlockType::Block: >+ out.print("Block: "); >+ break; >+ case BlockType::Loop: >+ out.print("Loop: "); >+ break; >+ case BlockType::TopLevel: >+ out.print("TopLevel: "); >+ break; >+ } >+ out.print("Continuation: ", *continuation, ", Special: "); >+ if (special) >+ out.print(*special); >+ else >+ out.print("None"); >+ } >+ >+ BlockType type() const { return blockType; } >+ >+ bool hasNonVoidSignature() const { return result.size(); } >+ >+ BasicBlock* targetBlockForBranch() >+ { >+ if (type() == BlockType::Loop) >+ return special; >+ return continuation; >+ } >+ >+ void convertIfToBlock() >+ { >+ ASSERT(type() == BlockType::If); >+ blockType = BlockType::Block; >+ special = nullptr; >+ } >+ >+ // OOPS: What is this? >+ using ResultList = Vector<Tmp, 1>; // Value must be a Phi >+ >+ ResultList resultForBranch() const >+ { >+ if (type() == BlockType::Loop) >+ return ResultList(); >+ return result; >+ } >+ >+ private: >+ friend class AirIRGenerator; >+ BlockType blockType; >+ BasicBlock* continuation; >+ BasicBlock* special; >+ ResultList result; >+ }; >+ >+ using ExpressionType = Tmp; >+ using ControlType = ControlData; >+ using ExpressionList = Vector<ExpressionType, 1>; >+ using ResultList ControlData::ResultList; >+ using ControlEntry = FunctionParser<AirIRGenerator>::ControlEntry; >+ >+ static constexpr ExpressionType emptyExpression = Tmp { }; >+ >+ using ErrorType = String; >+ using UnexpectedResult = Unexpected<ErrorType>; >+ using Result = Expected<std::unique_ptr<InternalFunction>, ErrorType>; >+ using PartialResult = Expected<void, ErrorType>; >+ >+ template <typename ...Args> >+ NEVER_INLINE UnexpectedResult WARN_UNUSED_RETURN fail(Args... args) const >+ { >+ using namespace FailureHelper; // See ADL comment in WasmParser.h. >+ return UnexpectedResult(makeString("WebAssembly.Module failed compiling: "_s, makeString(args)...)); >+ } >+ >+#define WASM_COMPILE_FAIL_IF(condition, ...) do { \ >+ if (UNLIKELY(condition)) \ >+ return fail(__VA_ARGS__); \ >+ } while (0) >+ >+ AirIRGenerator(const ModuleInformation&, Procedure&, InternalFunction*, Vector<UnlinkedWasmToWasmCall>&, MemoryMode, CompilationMode, unsigned functionIndex, TierUpCount*, ThrowWasmException); >+ >+ PartialResult WARN_UNUSED_RETURN addArguments(const Signature&); >+ PartialResult WARN_UNUSED_RETURN addLocal(Type, uint32_t); >+ ExpressionType addConstant(Type, uint64_t); >+ >+ // Locals >+ PartialResult WARN_UNUSED_RETURN getLocal(uint32_t index, ExpressionType& result); >+ PartialResult WARN_UNUSED_RETURN setLocal(uint32_t index, ExpressionType value); >+ >+ // Globals >+ PartialResult WARN_UNUSED_RETURN getGlobal(uint32_t index, ExpressionType& result); >+ PartialResult WARN_UNUSED_RETURN setGlobal(uint32_t index, ExpressionType value); >+ >+ // Memory >+ PartialResult WARN_UNUSED_RETURN load(LoadOpType, ExpressionType pointer, ExpressionType& result, uint32_t offset); >+ PartialResult WARN_UNUSED_RETURN store(StoreOpType, ExpressionType pointer, ExpressionType value, uint32_t offset); >+ PartialResult WARN_UNUSED_RETURN addGrowMemory(ExpressionType delta, ExpressionType& result); >+ PartialResult WARN_UNUSED_RETURN addCurrentMemory(ExpressionType& result); >+ >+ // Basic operators >+ template<OpType> >+ PartialResult WARN_UNUSED_RETURN addOp(ExpressionType arg, ExpressionType& result); >+ template<OpType> >+ PartialResult WARN_UNUSED_RETURN addOp(ExpressionType left, ExpressionType right, ExpressionType& result); >+ PartialResult WARN_UNUSED_RETURN addSelect(ExpressionType condition, ExpressionType nonZero, ExpressionType zero, ExpressionType& result); >+ >+ // Control flow >+ ControlData WARN_UNUSED_RETURN addTopLevel(Type signature); >+ ControlData WARN_UNUSED_RETURN addBlock(Type signature); >+ ControlData WARN_UNUSED_RETURN addLoop(Type signature); >+ PartialResult WARN_UNUSED_RETURN addIf(ExpressionType condition, Type signature, ControlData& result); >+ PartialResult WARN_UNUSED_RETURN addElse(ControlData&, const ExpressionList&); >+ PartialResult WARN_UNUSED_RETURN addElseToUnreachable(ControlData&); >+ >+ PartialResult WARN_UNUSED_RETURN addReturn(const ControlData&, const ExpressionList& returnValues); >+ PartialResult WARN_UNUSED_RETURN addBranch(ControlData&, ExpressionType condition, const ExpressionList& returnValues); >+ PartialResult WARN_UNUSED_RETURN addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTargets, const ExpressionList& expressionStack); >+ PartialResult WARN_UNUSED_RETURN endBlock(ControlEntry&, ExpressionList& expressionStack); >+ PartialResult WARN_UNUSED_RETURN addEndToUnreachable(ControlEntry&); >+ >+ // Calls >+ PartialResult WARN_UNUSED_RETURN addCall(uint32_t calleeIndex, const Signature&, Vector<ExpressionType>& args, ExpressionType& result); >+ PartialResult WARN_UNUSED_RETURN addCallIndirect(const Signature&, Vector<ExpressionType>& args, ExpressionType& result); >+ PartialResult WARN_UNUSED_RETURN addUnreachable(); >+ >+ PartialResult addShift(Kind kind, ExpressionType value, ExpressionType shift, ExpressionType& result); >+ PartialResult addIntegerSub(Kind kind, ExpressionType lhs, ExpressionType rhs, ExpressionType& result); >+ PartialResult addFloatingPointAbs(Kind kind, ExpressionType value, ExpressionType& result); >+ PartialResult addFloatingPointBinOp(Kind kind, ExpressionType lhs, ExpressionType rhs, ExpressionType& result); >+ >+ void dump(const Vector<ControlEntry>& controlStack, const ExpressionList* expressionStack); >+ void setParser(FunctionParser<AirIRGenerator>* parser) { m_parser = parser; }; >+ >+ Tmp constant(B3::Type, uint64_t bits, Optional<Origin> = WTF::nullopt); >+ >+private: >+ template<typename... Arguments> >+ void append(BasicBlock* block, Air::Kind kind, Arguments&&... arguments) >+ { >+ // OOPS: Add origin >+ block->append(kind, nullptr, std::forward<Arguments>(arguments)...); >+ } >+ >+ template<typename... Arguments> >+ void append(Air::Kind kind, Arguments&&... arguments) >+ { >+ append(m_currentBlock, kind, std::forward<Arguments>(arguments)...); >+ } >+ >+ Tmp tmp(Bank bank) { return m_code.newTmp(bank); } >+ Tmp gpr() { return tmp(GP); } >+ Tmp fpr() { return tmp(FP); } >+ >+ B3::PatchpointValue* addPatchpoint(B3::Type type) >+ { >+ return m_proc.add<B3::PatchpointValue>(type, B3::Origin()); >+ } >+ >+ template <typename ...Args> >+ void emitPatchpoint(B3::PatchpointValue* patch, Tmp result, Args... theArgs) >+ { >+ emitPatchpoint(m_currentBlock, patch, result, std::forward<Args>(theArgs)...); >+ } >+ >+ template <typename ...Args> >+ void emitPatchpoint(BasicBlock* basicBlock, B3::PatchpointValue* patch, Tmp result, Args... theArgs) >+ { >+ if (!m_patchpointSpecial) >+ m_patchpointSpecial = m_code.addSpecial(std::make_unique<B3::PatchpointSpecial>()); >+ >+ Inst inst(Patch, patch, Arg::special(m_patchpointSpecial)); >+ if (result) { >+ ASSERT(patch->type() != B3::Void); >+ inst.args.append(result); >+ } >+ >+ auto args = std::make_tuple(...theArgs); >+ for (Tmp tmp : args) { >+ // OOPS: Ugh, this is super crappy to just use nullptr for B3::Value* here. >+ // I think this will just work, but our alternative options is to abstract >+ // or reinvent part of PatchpointSpecial. >+ patch->append(nullptr, B3::ValueRep::SomeRegister); >+ inst.args.append(tmp); >+ } >+ >+ if (patch->resultConstraint.isReg()) >+ patch->lateClobbered().clear(patch->resultConstraint.reg()); >+ for (unsigned i = patch->numGPScratchRegisters; i--;) >+ inst.args.append(gpr()); >+ for (unsigned i = patch->numFPScratchRegisters; i--;) >+ inst.args.append(fpr()); >+ >+ basicBlock->append(WTFMove(inst)); >+ } >+ >+ template <typename Branch, typename Generator> >+ void emitCheck(const Branch& emitBranch, const Generator& generator) >+ { >+ // We fail along the "truthy" edge of 'branch' >+ >+ BasicBlock* continuation = m_proc.addBlock(); >+ BasicBlock* failed = m_proc.addBlock(); >+ >+ emitBranch(); >+ m_currentBlock->setSuccessors(continuation, failed); >+ >+ auto* failedPatchpoint = addPatchpoint(B3::Void); >+ failedPatchpoint->setGenerator(generator); >+ emitPatchpoint(failed, failedPatchpoint, Tmp()); >+ append(failed, Oops); >+ >+ m_currentBlock = continuation; >+ } >+ >+ template <typename ...Args> >+ void emitCCall(void* func, Tmp result, Args... args) >+ { >+ emitCCall(m_currentBlock, func, result, std::forward<Args>(args)...); >+ } >+ template <typename ...Args> >+ void emitCCall(BasicBlock* block, void* func, Tmp result, Args... args) >+ { >+ Inst inst(Air::CCall, nullptr); >+ >+ Tmp callee = gpr(); >+ append(Move, Arg::immPtr(tagCFunctionPtr<void*>(func, B3CCallPtrTag)), callee); >+ inst.args.append(callee); >+ >+ if (result) >+ inst.args.append(result); >+ >+ auto args = std::make_tuple(...theArgs); >+ for (Tmp tmp : args) >+ inst.args.append(immOrTmp(cCall->child(i))); >+ >+ block->append(WTFMove(inst)); >+ } >+ >+ >+ void emitExceptionCheck(CCallHelpers&, ExceptionType); >+ >+ void emitTierUpCheck(uint32_t decrementCount, Origin); >+ >+ ExpressionType emitCheckAndPreparePointer(ExpressionType pointer, uint32_t offset, uint32_t sizeOfOp); >+ B3::Kind memoryKind(B3::Opcode memoryOp); >+ ExpressionType emitLoadOp(LoadOpType, ExpressionType pointer, uint32_t offset); >+ void emitStoreOp(StoreOpType, ExpressionType pointer, ExpressionType value, uint32_t offset); >+ >+ void unify(const ExpressionType phi, const ExpressionType source); >+ void unifyValuesWithBlock(const ExpressionList& resultStack, const ResultList& stack); >+ >+ void emitChecksForModOrDiv(B3::Opcode, ExpressionType left, ExpressionType right); >+ >+ int32_t WARN_UNUSED_RETURN fixupPointerPlusOffset(ExpressionType&, uint32_t); >+ >+ void restoreWasmContextInstance(Procedure&, BasicBlock*, Tmp); >+ enum class RestoreCachedStackLimit { No, Yes }; >+ void restoreWebAssemblyGlobalState(RestoreCachedStackLimit, const MemoryInformation&, Tmp instance, Procedure&, BasicBlock*); >+ >+ Origin origin(); // OOPS: We need to make an Air::Origin since we're not generating from B3 code. >+ >+ FunctionParser<AirIRGenerator>* m_parser { nullptr }; >+ const ModuleInformation& m_info; >+ const MemoryMode m_mode { MemoryMode::BoundsChecking }; >+ const CompilationMode m_compilationMode { CompilationMode::BBQMode }; >+ const unsigned m_functionIndex { UINT_MAX }; >+ const TierUpCount* m_tierUp { nullptr }; >+ >+ Procedure& m_proc; >+ Code& m_code; >+ BasicBlock* m_currentBlock { nullptr }; >+ BasicBlock* m_root { nullptr }; >+ Vector<Variable*> m_locals; >+ Vector<UnlinkedWasmToWasmCall>& m_unlinkedWasmToWasmCalls; // List each call site and the function index whose address it should be patched with. >+ GPRReg m_memoryBaseGPR { InvalidGPRReg }; >+ GPRReg m_memorySizeGPR { InvalidGPRReg }; >+ GPRReg m_wasmContextInstanceGPR { InvalidGPRReg }; >+ bool m_makesCalls { false }; >+ >+ Tmp m_instanceValue { nullptr }; // Always use the accessor below to ensure the instance value is materialized when used. >+ bool m_usesInstanceValue { false }; >+ Tmp instanceValue() >+ { >+ m_usesInstanceValue = true; >+ return m_instanceValue; >+ } >+ >+ uint32_t m_maxNumJSCallArguments { 0 }; >+ >+ B3::PatchpointSpecial* m_patchpointSpecial { nullptr }; >+}; >+ >+// Memory accesses in WebAssembly have unsigned 32-bit offsets, whereas they have signed 32-bit offsets in B3. >+int32_t AirIRGenerator::fixupPointerPlusOffset(ExpressionType& ptr, uint32_t offset) >+{ >+ if (static_cast<uint64_t>(offset) > static_cast<uint64_t>(std::numeric_limits<int32_t>::max())) { >+ ptr = m_currentBlock->appendNew<Value>(m_proc, Add, origin(), ptr, m_currentBlock->appendNew<Const64Value>(m_proc, origin(), offset)); >+ return 0; >+ } >+ return offset; >+} >+ >+void AirIRGenerator::restoreWasmContextInstance(Procedure& proc, BasicBlock* block, Tmp arg) >+{ >+ if (Context::useFastTLS()) { >+ PatchpointValue* patchpoint = block->appendNew<PatchpointValue>(proc, B3::Void, Origin()); >+ if (CCallHelpers::storeWasmContextInstanceNeedsMacroScratchRegister()) >+ patchpoint->clobber(RegisterSet::macroScratchRegisters()); >+ patchpoint->append(ConstrainedValue(arg, ValueRep::SomeRegister)); >+ patchpoint->setGenerator( >+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ AllowMacroScratchRegisterUsageIf allowScratch(jit, CCallHelpers::storeWasmContextInstanceNeedsMacroScratchRegister()); >+ jit.storeWasmContextInstance(params[0].gpr()); >+ }); >+ return; >+ } >+ >+ // FIXME: Because WasmToWasm call clobbers wasmContextInstance register and does not restore it, we need to restore it in the caller side. >+ // This prevents us from using ArgumentReg to this (logically) immutable pinned register. >+ PatchpointValue* patchpoint = block->appendNew<PatchpointValue>(proc, B3::Void, Origin()); >+ Effects effects = Effects::none(); >+ effects.writesPinned = true; >+ effects.reads = B3::HeapRange::top(); >+ patchpoint->effects = effects; >+ patchpoint->clobberLate(RegisterSet(m_wasmContextInstanceGPR)); >+ patchpoint->append(instanceValue(), ValueRep::SomeRegister); >+ GPRReg wasmContextInstanceGPR = m_wasmContextInstanceGPR; >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& param) { >+ jit.move(param[0].gpr(), wasmContextInstanceGPR); >+ }); >+} >+ >+AirIRGenerator::AirIRGenerator(const ModuleInformation& info, Procedure& procedure, InternalFunction* compilation, Vector<UnlinkedWasmToWasmCall>& unlinkedWasmToWasmCalls, MemoryMode mode, CompilationMode compilationMode, unsigned functionIndex, TierUpCount* tierUp, ThrowWasmException throwWasmException) >+ : m_info(info) >+ , m_mode(mode) >+ , m_compilationMode(compilationMode) >+ , m_functionIndex(functionIndex) >+ , m_tierUp(tierUp) >+ , m_proc(procedure) >+ , m_code(m_proc.code()) >+ , m_unlinkedWasmToWasmCalls(unlinkedWasmToWasmCalls) >+{ >+ m_currentBlock = m_proc.addBlock(); >+ >+ // FIXME we don't really need to pin registers here if there's no memory. It makes wasm -> wasm thunks simpler for now. https://bugs.webkit.org/show_bug.cgi?id=166623 >+ const PinnedRegisterInfo& pinnedRegs = PinnedRegisterInfo::get(); >+ >+ m_memoryBaseGPR = pinnedRegs.baseMemoryPointer; >+ m_code.pinRegister(m_memoryBaseGPR); >+ >+ m_wasmContextInstanceGPR = pinnedRegs.wasmContextInstancePointer; >+ if (!Context::useFastTLS()) >+ m_code.pinRegister(m_wasmContextInstanceGPR); >+ >+ if (mode != MemoryMode::Signaling) { >+ ASSERT(!pinnedRegs.sizeRegisters[0].sizeOffset); >+ m_memorySizeGPR = pinnedRegs.sizeRegisters[0].sizeRegister; >+ for (const PinnedSizeRegisterInfo& regInfo : pinnedRegs.sizeRegisters) >+ m_code.pinRegister(regInfo.sizeRegister); >+ } >+ >+ if (throwWasmException) >+ Thunks::singleton().setThrowWasmException(throwWasmException); >+ >+ if (info.memory) { >+ m_proc.setWasmBoundsCheckGenerator([=] (CCallHelpers& jit, GPRReg pinnedGPR) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ switch (m_mode) { >+ case MemoryMode::BoundsChecking: >+ ASSERT_UNUSED(pinnedGPR, m_memorySizeGPR == pinnedGPR); >+ break; >+ case MemoryMode::Signaling: >+ ASSERT_UNUSED(pinnedGPR, InvalidGPRReg == pinnedGPR); >+ break; >+ } >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsMemoryAccess); >+ }); >+ >+ switch (m_mode) { >+ case MemoryMode::BoundsChecking: >+ break; >+ case MemoryMode::Signaling: >+ // Most memory accesses in signaling mode don't do an explicit >+ // exception check because they can rely on fault handling to detect >+ // out-of-bounds accesses. FaultSignalHandler nonetheless needs the >+ // thunk to exist so that it can jump to that thunk. >+ if (UNLIKELY(!Thunks::singleton().stub(throwExceptionFromWasmThunkGenerator))) >+ CRASH(); >+ break; >+ } >+ } >+ >+ wasmCallingConvention().setupFrameInPrologue(&compilation->calleeMoveLocation, m_proc, Origin(), m_currentBlock); >+ >+ { >+ B3::Tmp framePointer = m_currentBlock->appendNew<B3::Value>(m_proc, B3::FramePointer, Origin()); >+ B3::PatchpointValue* stackOverflowCheck = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, pointerType(), Origin()); >+ m_instanceValue = stackOverflowCheck; >+ stackOverflowCheck->appendSomeRegister(framePointer); >+ stackOverflowCheck->clobber(RegisterSet::macroScratchRegisters()); >+ if (!Context::useFastTLS()) { >+ // FIXME: Because WasmToWasm call clobbers wasmContextInstance register and does not restore it, we need to restore it in the caller side. >+ // This prevents us from using ArgumentReg to this (logically) immutable pinned register. >+ stackOverflowCheck->effects.writesPinned = false; >+ stackOverflowCheck->effects.readsPinned = true; >+ stackOverflowCheck->resultConstraint = ValueRep::reg(m_wasmContextInstanceGPR); >+ } >+ stackOverflowCheck->numGPScratchRegisters = 2; >+ stackOverflowCheck->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { >+ const Checked<int32_t> wasmFrameSize = params.proc().frameSize(); >+ const unsigned minimumParentCheckSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), 1024); >+ const unsigned extraFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), std::max<uint32_t>( >+ // This allows us to elide stack checks for functions that are terminal nodes in the call >+ // tree, (e.g they don't make any calls) and have a small enough frame size. This works by >+ // having any such terminal node have its parent caller include some extra size in its >+ // own check for it. The goal here is twofold: >+ // 1. Emit less code. >+ // 2. Try to speed things up by skipping stack checks. >+ minimumParentCheckSize, >+ // This allows us to elide stack checks in the Wasm -> Embedder call IC stub. Since these will >+ // spill all arguments to the stack, we ensure that a stack check here covers the >+ // stack that such a stub would use. >+ (Checked<uint32_t>(m_maxNumJSCallArguments) * sizeof(Register) + jscCallingConvention().headerSizeInBytes()).unsafeGet() >+ )); >+ const int32_t checkSize = m_makesCalls ? (wasmFrameSize + extraFrameSize).unsafeGet() : wasmFrameSize.unsafeGet(); >+ bool needUnderflowCheck = static_cast<unsigned>(checkSize) > Options::reservedZoneSize(); >+ bool needsOverflowCheck = m_makesCalls || wasmFrameSize >= minimumParentCheckSize || needUnderflowCheck; >+ >+ GPRReg contextInstance = Context::useFastTLS() ? params[0].gpr() : m_wasmContextInstanceGPR; >+ >+ // This allows leaf functions to not do stack checks if their frame size is within >+ // certain limits since their caller would have already done the check. >+ if (needsOverflowCheck) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ GPRReg fp = params[1].gpr(); >+ GPRReg scratch1 = params.gpScratch(0); >+ GPRReg scratch2 = params.gpScratch(1); >+ >+ if (Context::useFastTLS()) >+ jit.loadWasmContextInstance(contextInstance); >+ >+ jit.loadPtr(CCallHelpers::Address(contextInstance, Instance::offsetOfCachedStackLimit()), scratch2); >+ jit.addPtr(CCallHelpers::TrustedImm32(-checkSize), fp, scratch1); >+ MacroAssembler::JumpList overflow; >+ if (UNLIKELY(needUnderflowCheck)) >+ overflow.append(jit.branchPtr(CCallHelpers::Above, scratch1, fp)); >+ overflow.append(jit.branchPtr(CCallHelpers::Below, scratch1, scratch2)); >+ jit.addLinkTask([overflow] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(overflow, CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(throwStackOverflowFromWasmThunkGenerator).code())); >+ }); >+ } else if (m_usesInstanceValue && Context::useFastTLS()) { >+ // No overflow check is needed, but the instance values still needs to be correct. >+ AllowMacroScratchRegisterUsageIf allowScratch(jit, CCallHelpers::loadWasmContextInstanceNeedsMacroScratchRegister()); >+ jit.loadWasmContextInstance(contextInstance); >+ } else { >+ // We said we'd return a pointer. We don't actually need to because it isn't used, but the patchpoint conservatively said it had effects (potential stack check) which prevent it from getting removed. >+ } >+ }); >+ } >+ >+ emitTierUpCheck(TierUpCount::functionEntryDecrement(), Origin()); >+} >+ >+void AirIRGenerator::restoreWebAssemblyGlobalState(RestoreCachedStackLimit restoreCachedStackLimit, const MemoryInformation& memory, Tmp instance, Procedure& proc, BasicBlock* block) >+{ >+ restoreWasmContextInstance(proc, block, instance); >+ >+ if (restoreCachedStackLimit == RestoreCachedStackLimit::Yes) { >+ // The Instance caches the stack limit, but also knows where its canonical location is. >+ Tmp pointerToActualStackLimit = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfPointerToActualStackLimit())); >+ Tmp actualStackLimit = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), pointerToActualStackLimit); >+ m_currentBlock->appendNew<MemoryValue>(m_proc, Store, origin(), actualStackLimit, instanceValue(), safeCast<int32_t>(Instance::offsetOfCachedStackLimit())); >+ } >+ >+ if (!!memory) { >+ const PinnedRegisterInfo* pinnedRegs = &PinnedRegisterInfo::get(); >+ RegisterSet clobbers; >+ clobbers.set(pinnedRegs->baseMemoryPointer); >+ for (auto info : pinnedRegs->sizeRegisters) >+ clobbers.set(info.sizeRegister); >+ >+ B3::PatchpointValue* patchpoint = block->appendNew<B3::PatchpointValue>(proc, B3::Void, origin()); >+ Effects effects = Effects::none(); >+ effects.writesPinned = true; >+ effects.reads = B3::HeapRange::top(); >+ patchpoint->effects = effects; >+ patchpoint->clobber(clobbers); >+ >+ patchpoint->append(instance, ValueRep::SomeRegister); >+ >+ patchpoint->setGenerator([pinnedRegs] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { >+ GPRReg baseMemory = pinnedRegs->baseMemoryPointer; >+ const auto& sizeRegs = pinnedRegs->sizeRegisters; >+ ASSERT(sizeRegs.size() >= 1); >+ ASSERT(!sizeRegs[0].sizeOffset); // The following code assumes we start at 0, and calculates subsequent size registers relative to 0. >+ jit.loadPtr(CCallHelpers::Address(params[0].gpr(), Instance::offsetOfCachedMemorySize()), sizeRegs[0].sizeRegister); >+ jit.loadPtr(CCallHelpers::Address(params[0].gpr(), Instance::offsetOfCachedMemory()), baseMemory); >+ for (unsigned i = 1; i < sizeRegs.size(); ++i) >+ jit.add64(CCallHelpers::TrustedImm32(-sizeRegs[i].sizeOffset), sizeRegs[0].sizeRegister, sizeRegs[i].sizeRegister); >+ }); >+ } >+} >+ >+void AirIRGenerator::emitExceptionCheck(CCallHelpers& jit, ExceptionType type) >+{ >+ jit.move(CCallHelpers::TrustedImm32(static_cast<uint32_t>(type)), GPRInfo::argumentGPR1); >+ auto jumpToExceptionStub = jit.jump(); >+ >+ jit.addLinkTask([jumpToExceptionStub] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(jumpToExceptionStub, CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(throwExceptionFromWasmThunkGenerator).code())); >+ }); >+} >+ >+ >+auto AirIRGenerator::addLocal(Type type, uint32_t count) -> PartialResult >+{ >+ Checked<uint32_t, RecordOverflow> totalBytesChecked = count; >+ totalBytesChecked += m_locals.size(); >+ uint32_t totalBytes; >+ WASM_COMPILE_FAIL_IF((totalBytesChecked.safeGet(totalBytes) == CheckedState::DidOverflow) || !m_locals.tryReserveCapacity(totalBytes), "can't allocate memory for ", totalBytes, " locals"); >+ >+ for (uint32_t i = 0; i < count; ++i) { >+ Variable* local = m_proc.addVariable(toB3Type(type)); >+ m_locals.uncheckedAppend(local); >+ m_currentBlock->appendNew<VariableValue>(m_proc, Set, Origin(), local, constant(toB3Type(type), 0, Origin())); >+ } >+ return { }; >+} >+ >+auto AirIRGenerator::addArguments(const Signature& signature) -> PartialResult >+{ >+ ASSERT(!m_locals.size()); >+ WASM_COMPILE_FAIL_IF(!m_locals.tryReserveCapacity(signature.argumentCount()), "can't allocate memory for ", signature.argumentCount(), " arguments"); >+ >+ m_locals.grow(signature.argumentCount()); >+ wasmCallingConvention().loadArguments(signature, m_proc, m_currentBlock, Origin(), >+ [=] (ExpressionType argument, unsigned i) { >+ Variable* argumentVariable = m_proc.addVariable(argument->type()); >+ m_locals[i] = argumentVariable; >+ m_currentBlock->appendNew<VariableValue>(m_proc, Set, Origin(), argumentVariable, argument); >+ }); >+ return { }; >+} >+ >+auto AirIRGenerator::getLocal(uint32_t index, ExpressionType& result) -> PartialResult >+{ >+ ASSERT(m_locals[index]); >+ result = m_currentBlock->appendNew<VariableValue>(m_proc, B3::Get, origin(), m_locals[index]); >+ return { }; >+} >+ >+auto AirIRGenerator::addUnreachable() -> PartialResult >+{ >+ B3::PatchpointValue* unreachable = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, B3::Void, origin()); >+ unreachable->setGenerator([this] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::Unreachable); >+ }); >+ unreachable->effects.terminal = true; >+ return { }; >+} >+ >+auto AirIRGenerator::addGrowMemory(ExpressionType delta, ExpressionType& result) -> PartialResult >+{ >+ int32_t (*growMemory)(void*, Instance*, int32_t) = [] (void* callFrame, Instance* instance, int32_t delta) -> int32_t { >+ instance->storeTopCallFrame(callFrame); >+ >+ if (delta < 0) >+ return -1; >+ >+ auto grown = instance->memory()->grow(PageCount(delta)); >+ if (!grown) { >+ switch (grown.error()) { >+ case Memory::GrowFailReason::InvalidDelta: >+ case Memory::GrowFailReason::InvalidGrowSize: >+ case Memory::GrowFailReason::WouldExceedMaximum: >+ case Memory::GrowFailReason::OutOfMemory: >+ return -1; >+ } >+ RELEASE_ASSERT_NOT_REACHED(); >+ } >+ >+ return grown.value().pageCount(); >+ }; >+ >+ result = m_currentBlock->appendNew<CCallValue>(m_proc, Int32, origin(), >+ m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(growMemory, B3CCallPtrTag)), >+ m_currentBlock->appendNew<B3::Value>(m_proc, B3::FramePointer, origin()), instanceValue(), delta); >+ >+ restoreWebAssemblyGlobalState(RestoreCachedStackLimit::No, m_info.memory, instanceValue(), m_proc, m_currentBlock); >+ >+ return { }; >+} >+ >+auto AirIRGenerator::addCurrentMemory(ExpressionType& result) -> PartialResult >+{ >+ static_assert(sizeof(decltype(static_cast<Memory*>(nullptr)->size())) == sizeof(uint64_t), "codegen relies on this size"); >+ Tmp size = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int64, origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfCachedMemorySize())); >+ >+ constexpr uint32_t shiftValue = 16; >+ static_assert(PageCount::pageSize == 1ull << shiftValue, "This must hold for the code below to be correct."); >+ Tmp numPages = m_currentBlock->appendNew<Value>(m_proc, ZShr, origin(), >+ size, m_currentBlock->appendNew<Const32Value>(m_proc, origin(), shiftValue)); >+ >+ result = m_currentBlock->appendNew<Value>(m_proc, Trunc, origin(), numPages); >+ >+ return { }; >+} >+ >+auto AirIRGenerator::setLocal(uint32_t index, ExpressionType value) -> PartialResult >+{ >+ ASSERT(m_locals[index]); >+ m_currentBlock->appendNew<VariableValue>(m_proc, B3::Set, origin(), m_locals[index], value); >+ return { }; >+} >+ >+auto AirIRGenerator::getGlobal(uint32_t index, ExpressionType& result) -> PartialResult >+{ >+ Tmp globalsArray = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfGlobals())); >+ result = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, toB3Type(m_info.globals[index].type), origin(), globalsArray, safeCast<int32_t>(index * sizeof(Register))); >+ return { }; >+} >+ >+auto AirIRGenerator::setGlobal(uint32_t index, ExpressionType value) -> PartialResult >+{ >+ ASSERT(toB3Type(m_info.globals[index].type) == value->type()); >+ Tmp globalsArray = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfGlobals())); >+ m_currentBlock->appendNew<MemoryValue>(m_proc, Store, origin(), value, globalsArray, safeCast<int32_t>(index * sizeof(Register))); >+ return { }; >+} >+ >+inline Tmp AirIRGenerator::emitCheckAndPreparePointer(ExpressionType pointer, uint32_t offset, uint32_t sizeOfOperation) >+{ >+ ASSERT(m_memoryBaseGPR); >+ >+ switch (m_mode) { >+ case MemoryMode::BoundsChecking: { >+ // We're not using signal handling at all, we must therefore check that no memory access exceeds the current memory size. >+ ASSERT(m_memorySizeGPR); >+ ASSERT(sizeOfOperation + offset > offset); >+ m_currentBlock->appendNew<WasmBoundsCheckValue>(m_proc, origin(), m_memorySizeGPR, pointer, sizeOfOperation + offset - 1); >+ break; >+ } >+ >+ case MemoryMode::Signaling: { >+ // We've virtually mapped 4GiB+redzone for this memory. Only the user-allocated pages are addressable, contiguously in range [0, current], >+ // and everything above is mapped PROT_NONE. We don't need to perform any explicit bounds check in the 4GiB range because WebAssembly register >+ // memory accesses are 32-bit. However WebAssembly register + offset accesses perform the addition in 64-bit which can push an access above >+ // the 32-bit limit (the offset is unsigned 32-bit). The redzone will catch most small offsets, and we'll explicitly bounds check any >+ // register + large offset access. We don't think this will be generated frequently. >+ // >+ // We could check that register + large offset doesn't exceed 4GiB+redzone since that's technically the limit we need to avoid overflowing the >+ // PROT_NONE region, but it's better if we use a smaller immediate because it can codegens better. We know that anything equal to or greater >+ // than the declared 'maximum' will trap, so we can compare against that number. If there was no declared 'maximum' then we still know that >+ // any access equal to or greater than 4GiB will trap, no need to add the redzone. >+ if (offset >= Memory::fastMappedRedzoneBytes()) { >+ size_t maximum = m_info.memory.maximum() ? m_info.memory.maximum().bytes() : std::numeric_limits<uint32_t>::max(); >+ m_currentBlock->appendNew<WasmBoundsCheckValue>(m_proc, origin(), pointer, sizeOfOperation + offset - 1, maximum); >+ } >+ break; >+ } >+ } >+ >+ pointer = m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), pointer); >+ return m_currentBlock->appendNew<WasmAddressValue>(m_proc, origin(), pointer, m_memoryBaseGPR); >+} >+ >+inline uint32_t sizeOfLoadOp(LoadOpType op) >+{ >+ switch (op) { >+ case LoadOpType::I32Load8S: >+ case LoadOpType::I32Load8U: >+ case LoadOpType::I64Load8S: >+ case LoadOpType::I64Load8U: >+ return 1; >+ case LoadOpType::I32Load16S: >+ case LoadOpType::I64Load16S: >+ case LoadOpType::I32Load16U: >+ case LoadOpType::I64Load16U: >+ return 2; >+ case LoadOpType::I32Load: >+ case LoadOpType::I64Load32S: >+ case LoadOpType::I64Load32U: >+ case LoadOpType::F32Load: >+ return 4; >+ case LoadOpType::I64Load: >+ case LoadOpType::F64Load: >+ return 8; >+ } >+ RELEASE_ASSERT_NOT_REACHED(); >+} >+ >+inline B3::Kind AirIRGenerator::memoryKind(B3::Opcode memoryOp) >+{ >+ if (m_mode == MemoryMode::Signaling) >+ return trapping(memoryOp); >+ return memoryOp; >+} >+ >+inline Tmp AirIRGenerator::emitLoadOp(LoadOpType op, ExpressionType pointer, uint32_t uoffset) >+{ >+ int32_t offset = fixupPointerPlusOffset(pointer, uoffset); >+ >+ switch (op) { >+ case LoadOpType::I32Load8S: { >+ return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load8S), origin(), pointer, offset); >+ } >+ >+ case LoadOpType::I64Load8S: { >+ Tmp value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load8S), origin(), pointer, offset); >+ return m_currentBlock->appendNew<Value>(m_proc, SExt32, origin(), value); >+ } >+ >+ case LoadOpType::I32Load8U: { >+ return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load8Z), origin(), pointer, offset); >+ } >+ >+ case LoadOpType::I64Load8U: { >+ Tmp value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load8Z), origin(), pointer, offset); >+ return m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), value); >+ } >+ >+ case LoadOpType::I32Load16S: { >+ return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load16S), origin(), pointer, offset); >+ } >+ >+ case LoadOpType::I64Load16S: { >+ Tmp value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load16S), origin(), pointer, offset); >+ return m_currentBlock->appendNew<Value>(m_proc, SExt32, origin(), value); >+ } >+ >+ case LoadOpType::I32Load16U: { >+ return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load16Z), origin(), pointer, offset); >+ } >+ >+ case LoadOpType::I64Load16U: { >+ Tmp value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load16Z), origin(), pointer, offset); >+ return m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), value); >+ } >+ >+ case LoadOpType::I32Load: { >+ return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Int32, origin(), pointer, offset); >+ } >+ >+ case LoadOpType::I64Load32U: { >+ Tmp value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Int32, origin(), pointer, offset); >+ return m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), value); >+ } >+ >+ case LoadOpType::I64Load32S: { >+ Tmp value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Int32, origin(), pointer, offset); >+ return m_currentBlock->appendNew<Value>(m_proc, SExt32, origin(), value); >+ } >+ >+ case LoadOpType::I64Load: { >+ return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Int64, origin(), pointer, offset); >+ } >+ >+ case LoadOpType::F32Load: { >+ return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Float, origin(), pointer, offset); >+ } >+ >+ case LoadOpType::F64Load: { >+ return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Double, origin(), pointer, offset); >+ } >+ } >+ RELEASE_ASSERT_NOT_REACHED(); >+} >+ >+auto AirIRGenerator::load(LoadOpType op, ExpressionType pointer, ExpressionType& result, uint32_t offset) -> PartialResult >+{ >+ ASSERT(pointer->type() == Int32); >+ >+ if (UNLIKELY(sumOverflows<uint32_t>(offset, sizeOfLoadOp(op)))) { >+ // FIXME: Even though this is provably out of bounds, it's not a validation error, so we have to handle it >+ // as a runtime exception. However, this may change: https://bugs.webkit.org/show_bug.cgi?id=166435 >+ B3::PatchpointTmp throwException = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, B3::Void, origin()); >+ throwException->setGenerator([this] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsMemoryAccess); >+ }); >+ >+ switch (op) { >+ case LoadOpType::I32Load8S: >+ case LoadOpType::I32Load16S: >+ case LoadOpType::I32Load: >+ case LoadOpType::I32Load16U: >+ case LoadOpType::I32Load8U: >+ result = constant(Int32, 0); >+ break; >+ case LoadOpType::I64Load8S: >+ case LoadOpType::I64Load8U: >+ case LoadOpType::I64Load16S: >+ case LoadOpType::I64Load32U: >+ case LoadOpType::I64Load32S: >+ case LoadOpType::I64Load: >+ case LoadOpType::I64Load16U: >+ result = constant(Int64, 0); >+ break; >+ case LoadOpType::F32Load: >+ result = constant(Float, 0); >+ break; >+ case LoadOpType::F64Load: >+ result = constant(Double, 0); >+ break; >+ } >+ >+ } else >+ result = emitLoadOp(op, emitCheckAndPreparePointer(pointer, offset, sizeOfLoadOp(op)), offset); >+ >+ return { }; >+} >+ >+inline uint32_t sizeOfStoreOp(StoreOpType op) >+{ >+ switch (op) { >+ case StoreOpType::I32Store8: >+ case StoreOpType::I64Store8: >+ return 1; >+ case StoreOpType::I32Store16: >+ case StoreOpType::I64Store16: >+ return 2; >+ case StoreOpType::I32Store: >+ case StoreOpType::I64Store32: >+ case StoreOpType::F32Store: >+ return 4; >+ case StoreOpType::I64Store: >+ case StoreOpType::F64Store: >+ return 8; >+ } >+ RELEASE_ASSERT_NOT_REACHED(); >+} >+ >+ >+inline void AirIRGenerator::emitStoreOp(StoreOpType op, ExpressionType pointer, ExpressionType value, uint32_t uoffset) >+{ >+ int32_t offset = fixupPointerPlusOffset(pointer, uoffset); >+ >+ switch (op) { >+ case StoreOpType::I64Store8: >+ value = m_currentBlock->appendNew<Value>(m_proc, Trunc, origin(), value); >+ FALLTHROUGH; >+ >+ case StoreOpType::I32Store8: >+ m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Store8), origin(), value, pointer, offset); >+ return; >+ >+ case StoreOpType::I64Store16: >+ value = m_currentBlock->appendNew<Value>(m_proc, Trunc, origin(), value); >+ FALLTHROUGH; >+ >+ case StoreOpType::I32Store16: >+ m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Store16), origin(), value, pointer, offset); >+ return; >+ >+ case StoreOpType::I64Store32: >+ value = m_currentBlock->appendNew<Value>(m_proc, Trunc, origin(), value); >+ FALLTHROUGH; >+ >+ case StoreOpType::I64Store: >+ case StoreOpType::I32Store: >+ case StoreOpType::F32Store: >+ case StoreOpType::F64Store: >+ m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Store), origin(), value, pointer, offset); >+ return; >+ } >+ RELEASE_ASSERT_NOT_REACHED(); >+} >+ >+auto AirIRGenerator::store(StoreOpType op, ExpressionType pointer, ExpressionType value, uint32_t offset) -> PartialResult >+{ >+ ASSERT(pointer->type() == Int32); >+ >+ if (UNLIKELY(sumOverflows<uint32_t>(offset, sizeOfStoreOp(op)))) { >+ // FIXME: Even though this is provably out of bounds, it's not a validation error, so we have to handle it >+ // as a runtime exception. However, this may change: https://bugs.webkit.org/show_bug.cgi?id=166435 >+ B3::PatchpointValue* throwException = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, B3::Void, origin()); >+ throwException->setGenerator([this] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsMemoryAccess); >+ }); >+ } else >+ emitStoreOp(op, emitCheckAndPreparePointer(pointer, offset, sizeOfStoreOp(op)), value, offset); >+ >+ return { }; >+} >+ >+auto AirIRGenerator::addSelect(ExpressionType condition, ExpressionType nonZero, ExpressionType zero, ExpressionType& result) -> PartialResult >+{ >+ result = m_currentBlock->appendNew<Value>(m_proc, B3::Select, origin(), condition, nonZero, zero); >+ return { }; >+} >+ >+AirIRGenerator::ExpressionType AirIRGenerator::addConstant(Type type, uint64_t value) >+{ >+ return constant(toB3Type(type), value); >+} >+ >+void AirIRGenerator::emitTierUpCheck(uint32_t decrementCount, Origin origin) >+{ >+ if (!m_tierUp) >+ return; >+ >+ Tmp countDownLocation = constant(pointerType(), reinterpret_cast<uint64_t>(m_tierUp), origin); >+ Tmp oldCountDown = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, origin, countDownLocation); >+ Tmp newCountDown = m_currentBlock->appendNew<Value>(m_proc, Sub, origin, oldCountDown, constant(Int32, decrementCount, origin)); >+ m_currentBlock->appendNew<MemoryValue>(m_proc, Store, origin, newCountDown, countDownLocation); >+ >+ PatchpointValue* patch = m_currentBlock->appendNew<PatchpointValue>(m_proc, B3::Void, origin); >+ Effects effects = Effects::none(); >+ // FIXME: we should have a more precise heap range for the tier up count. >+ effects.reads = B3::HeapRange::top(); >+ effects.writes = B3::HeapRange::top(); >+ patch->effects = effects; >+ >+ patch->append(newCountDown, ValueRep::SomeRegister); >+ patch->append(oldCountDown, ValueRep::SomeRegister); >+ patch->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ MacroAssembler::Jump tierUp = jit.branch32(MacroAssembler::Above, params[0].gpr(), params[1].gpr()); >+ MacroAssembler::Label tierUpResume = jit.label(); >+ >+ params.addLatePath([=] (CCallHelpers& jit) { >+ tierUp.link(&jit); >+ >+ const unsigned extraPaddingBytes = 0; >+ RegisterSet registersToSpill = { }; >+ registersToSpill.add(GPRInfo::argumentGPR1); >+ unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(jit, registersToSpill, extraPaddingBytes); >+ >+ jit.move(MacroAssembler::TrustedImm32(m_functionIndex), GPRInfo::argumentGPR1); >+ MacroAssembler::Call call = jit.nearCall(); >+ >+ ScratchRegisterAllocator::restoreRegistersFromStackForCall(jit, registersToSpill, RegisterSet(), numberOfStackBytesUsedForRegisterPreservation, extraPaddingBytes); >+ jit.jump(tierUpResume); >+ >+ jit.addLinkTask([=] (LinkBuffer& linkBuffer) { >+ MacroAssembler::repatchNearCall(linkBuffer.locationOfNearCall<NoPtrTag>(call), CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(triggerOMGTierUpThunkGenerator).code())); >+ >+ }); >+ }); >+ }); >+} >+ >+AirIRGenerator::ControlData AirIRGenerator::addLoop(Type signature) >+{ >+ BasicBlock* body = m_proc.addBlock(); >+ BasicBlock* continuation = m_proc.addBlock(); >+ >+ m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), body); >+ >+ m_currentBlock = body; >+ emitTierUpCheck(TierUpCount::loopDecrement(), origin()); >+ >+ return ControlData(m_proc, origin(), signature, BlockType::Loop, continuation, body); >+} >+ >+AirIRGenerator::ControlData AirIRGenerator::addTopLevel(Type signature) >+{ >+ return ControlData(m_proc, Origin(), signature, BlockType::TopLevel, m_proc.addBlock()); >+} >+ >+AirIRGenerator::ControlData AirIRGenerator::addBlock(Type signature) >+{ >+ return ControlData(m_proc, origin(), signature, BlockType::Block, m_proc.addBlock()); >+} >+ >+auto AirIRGenerator::addIf(ExpressionType condition, Type signature, ControlType& result) -> PartialResult >+{ >+ BasicBlock* taken = m_proc.addBlock(); >+ BasicBlock* notTaken = m_proc.addBlock(); >+ BasicBlock* continuation = m_proc.addBlock(); >+ >+ m_currentBlock->appendNew<Value>(m_proc, B3::Branch, origin(), condition); >+ m_currentBlock->setSuccessors(FrequentedBlock(taken), FrequentedBlock(notTaken)); >+ taken->addPredecessor(m_currentBlock); >+ notTaken->addPredecessor(m_currentBlock); >+ >+ m_currentBlock = taken; >+ result = ControlData(m_proc, origin(), signature, BlockType::If, continuation, notTaken); >+ return { }; >+} >+ >+auto AirIRGenerator::addElse(ControlData& data, const ExpressionList& currentStack) -> PartialResult >+{ >+ unifyValuesWithBlock(currentStack, data.result); >+ m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), data.continuation); >+ return addElseToUnreachable(data); >+} >+ >+auto AirIRGenerator::addElseToUnreachable(ControlData& data) -> PartialResult >+{ >+ ASSERT(data.type() == BlockType::If); >+ m_currentBlock = data.special; >+ data.convertIfToBlock(); >+ return { }; >+} >+ >+auto AirIRGenerator::addReturn(const ControlData&, const ExpressionList& returnValues) -> PartialResult >+{ >+ ASSERT(returnValues.size() <= 1); >+ if (returnValues.size()) >+ m_currentBlock->appendNewControlValue(m_proc, B3::Return, origin(), returnValues[0]); >+ else >+ m_currentBlock->appendNewControlValue(m_proc, B3::Return, origin()); >+ return { }; >+} >+ >+auto AirIRGenerator::addBranch(ControlData& data, ExpressionType condition, const ExpressionList& returnValues) -> PartialResult >+{ >+ unifyValuesWithBlock(returnValues, data.resultForBranch()); >+ >+ BasicBlock* target = data.targetBlockForBranch(); >+ if (condition) { >+ BasicBlock* continuation = m_proc.addBlock(); >+ m_currentBlock->appendNew<Value>(m_proc, B3::Branch, origin(), condition); >+ m_currentBlock->setSuccessors(FrequentedBlock(target), FrequentedBlock(continuation)); >+ target->addPredecessor(m_currentBlock); >+ continuation->addPredecessor(m_currentBlock); >+ m_currentBlock = continuation; >+ } else { >+ m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), FrequentedBlock(target)); >+ target->addPredecessor(m_currentBlock); >+ } >+ >+ return { }; >+} >+ >+auto AirIRGenerator::addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const ExpressionList& expressionStack) -> PartialResult >+{ >+ for (size_t i = 0; i < targets.size(); ++i) >+ unifyValuesWithBlock(expressionStack, targets[i]->resultForBranch()); >+ unifyValuesWithBlock(expressionStack, defaultTarget.resultForBranch()); >+ >+ SwitchValue* switchValue = m_currentBlock->appendNew<SwitchValue>(m_proc, origin(), condition); >+ switchValue->setFallThrough(FrequentedBlock(defaultTarget.targetBlockForBranch())); >+ for (size_t i = 0; i < targets.size(); ++i) >+ switchValue->appendCase(SwitchCase(i, FrequentedBlock(targets[i]->targetBlockForBranch()))); >+ >+ return { }; >+} >+ >+auto AirIRGenerator::endBlock(ControlEntry& entry, ExpressionList& expressionStack) -> PartialResult >+{ >+ ControlData& data = entry.controlData; >+ >+ unifyValuesWithBlock(expressionStack, data.result); >+ m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), data.continuation); >+ data.continuation->addPredecessor(m_currentBlock); >+ >+ return addEndToUnreachable(entry); >+} >+ >+ >+auto AirIRGenerator::addEndToUnreachable(ControlEntry& entry) -> PartialResult >+{ >+ ControlData& data = entry.controlData; >+ m_currentBlock = data.continuation; >+ >+ if (data.type() == BlockType::If) { >+ data.special->appendNewControlValue(m_proc, Jump, origin(), m_currentBlock); >+ m_currentBlock->addPredecessor(data.special); >+ } >+ >+ for (Tmp result : data.result) { >+ m_currentBlock->append(result); >+ entry.enclosedExpressionStack.append(result); >+ } >+ >+ // TopLevel does not have any code after this so we need to make sure we emit a return here. >+ if (data.type() == BlockType::TopLevel) >+ return addReturn(entry.controlData, entry.enclosedExpressionStack); >+ >+ return { }; >+} >+ >+auto AirIRGenerator::addCall(uint32_t functionIndex, const Signature& signature, Vector<ExpressionType>& args, ExpressionType& result) -> PartialResult >+{ >+ ASSERT(signature.argumentCount() == args.size()); >+ >+ m_makesCalls = true; >+ >+ Type returnType = signature.returnType(); >+ Vector<UnlinkedWasmToWasmCall>* unlinkedWasmToWasmCalls = &m_unlinkedWasmToWasmCalls; >+ >+ if (m_info.isImportedFunctionFromFunctionIndexSpace(functionIndex)) { >+ m_maxNumJSCallArguments = std::max(m_maxNumJSCallArguments, static_cast<uint32_t>(args.size())); >+ >+ // FIXME imports can be linked here, instead of generating a patchpoint, because all import stubs are generated before B3 compilation starts. https://bugs.webkit.org/show_bug.cgi?id=166462 >+ Tmp targetInstance = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfTargetInstance(functionIndex))); >+ // The target instance is 0 unless the call is wasm->wasm. >+ Tmp isWasmCall = m_currentBlock->appendNew<Value>(m_proc, NotEqual, origin(), targetInstance, m_currentBlock->appendNew<Const64Value>(m_proc, origin(), 0)); >+ >+ BasicBlock* isWasmBlock = m_proc.addBlock(); >+ BasicBlock* isEmbedderBlock = m_proc.addBlock(); >+ BasicBlock* continuation = m_proc.addBlock(); >+ m_currentBlock->appendNewControlValue(m_proc, B3::Branch, origin(), isWasmCall, FrequentedBlock(isWasmBlock), FrequentedBlock(isEmbedderBlock)); >+ >+ Tmp wasmCallResult = wasmCallingConvention().setupCall(m_proc, isWasmBlock, origin(), args, toB3Type(returnType), >+ [=] (PatchpointValue* patchpoint) { >+ patchpoint->effects.writesPinned = true; >+ patchpoint->effects.readsPinned = true; >+ // We need to clobber all potential pinned registers since we might be leaving the instance. >+ // We pessimistically assume we could be calling to something that is bounds checking. >+ // FIXME: We shouldn't have to do this: https://bugs.webkit.org/show_bug.cgi?id=172181 >+ patchpoint->clobberLate(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking)); >+ patchpoint->setGenerator([unlinkedWasmToWasmCalls, functionIndex] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ CCallHelpers::Call call = jit.threadSafePatchableNearCall(); >+ jit.addLinkTask([unlinkedWasmToWasmCalls, call, functionIndex] (LinkBuffer& linkBuffer) { >+ unlinkedWasmToWasmCalls->append({ linkBuffer.locationOfNearCall<WasmEntryPtrTag>(call), functionIndex }); >+ }); >+ }); >+ }); >+ UpsilonValue* wasmCallResultUpsilon = returnType == Void ? nullptr : isWasmBlock->appendNew<UpsilonValue>(m_proc, origin(), wasmCallResult); >+ isWasmBlock->appendNewControlValue(m_proc, Jump, origin(), continuation); >+ >+ // FIXME: Let's remove this indirection by creating a PIC friendly IC >+ // for calls out to the embedder. This shouldn't be that hard to do. We could probably >+ // implement the IC to be over Context*. >+ // https://bugs.webkit.org/show_bug.cgi?id=170375 >+ Tmp jumpDestination = isEmbedderBlock->appendNew<MemoryValue>(m_proc, >+ Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfWasmToEmbedderStub(functionIndex))); >+ if (Options::usePoisoning()) >+ jumpDestination = isEmbedderBlock->appendNew<Value>(m_proc, BitXor, origin(), jumpDestination, isEmbedderBlock->appendNew<Const64Value>(m_proc, origin(), g_JITCodePoison)); >+ >+ Tmp embedderCallResult = wasmCallingConvention().setupCall(m_proc, isEmbedderBlock, origin(), args, toB3Type(returnType), >+ [=] (PatchpointValue* patchpoint) { >+ patchpoint->effects.writesPinned = true; >+ patchpoint->effects.readsPinned = true; >+ patchpoint->append(jumpDestination, ValueRep::SomeRegister); >+ // We need to clobber all potential pinned registers since we might be leaving the instance. >+ // We pessimistically assume we could be calling to something that is bounds checking. >+ // FIXME: We shouldn't have to do this: https://bugs.webkit.org/show_bug.cgi?id=172181 >+ patchpoint->clobberLate(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking)); >+ patchpoint->setGenerator([returnType] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ jit.call(params[returnType == Void ? 0 : 1].gpr(), WasmEntryPtrTag); >+ }); >+ }); >+ UpsilonValue* embedderCallResultUpsilon = returnType == Void ? nullptr : isEmbedderBlock->appendNew<UpsilonValue>(m_proc, origin(), embedderCallResult); >+ isEmbedderBlock->appendNewControlValue(m_proc, Jump, origin(), continuation); >+ >+ m_currentBlock = continuation; >+ >+ if (returnType == Void) >+ result = nullptr; >+ else { >+ result = continuation->appendNew<Value>(m_proc, Phi, toB3Type(returnType), origin()); >+ wasmCallResultUpsilon->setPhi(result); >+ embedderCallResultUpsilon->setPhi(result); >+ } >+ >+ // The call could have been to another WebAssembly instance, and / or could have modified our Memory. >+ restoreWebAssemblyGlobalState(RestoreCachedStackLimit::Yes, m_info.memory, instanceValue(), m_proc, continuation); >+ } else { >+ result = wasmCallingConvention().setupCall(m_proc, m_currentBlock, origin(), args, toB3Type(returnType), >+ [=] (PatchpointValue* patchpoint) { >+ patchpoint->effects.writesPinned = true; >+ patchpoint->effects.readsPinned = true; >+ >+ patchpoint->setGenerator([unlinkedWasmToWasmCalls, functionIndex] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ CCallHelpers::Call call = jit.threadSafePatchableNearCall(); >+ jit.addLinkTask([unlinkedWasmToWasmCalls, call, functionIndex] (LinkBuffer& linkBuffer) { >+ unlinkedWasmToWasmCalls->append({ linkBuffer.locationOfNearCall<WasmEntryPtrTag>(call), functionIndex }); >+ }); >+ }); >+ }); >+ } >+ >+ return { }; >+} >+ >+auto AirIRGenerator::addCallIndirect(const Signature& signature, Vector<ExpressionType>& args, ExpressionType& result) -> PartialResult >+{ >+ ExpressionType calleeIndex = args.takeLast(); >+ ASSERT(signature.argumentCount() == args.size()); >+ >+ m_makesCalls = true; >+ // Note: call indirect can call either WebAssemblyFunction or WebAssemblyWrapperFunction. Because >+ // WebAssemblyWrapperFunction is like calling into the embedder, we conservatively assume all call indirects >+ // can be to the embedder for our stack check calculation. >+ m_maxNumJSCallArguments = std::max(m_maxNumJSCallArguments, static_cast<uint32_t>(args.size())); >+ >+ ExpressionType callableFunctionBuffer; >+ ExpressionType instancesBuffer; >+ ExpressionType callableFunctionBufferLength; >+ ExpressionType mask; >+ { >+ ExpressionType table = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), >+ instanceValue(), safeCast<int32_t>(Instance::offsetOfTable())); >+ callableFunctionBuffer = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), >+ table, safeCast<int32_t>(Table::offsetOfFunctions())); >+ instancesBuffer = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), >+ table, safeCast<int32_t>(Table::offsetOfInstances())); >+ callableFunctionBufferLength = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, origin(), >+ table, safeCast<int32_t>(Table::offsetOfLength())); >+ mask = m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), >+ m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, origin(), >+ table, safeCast<int32_t>(Table::offsetOfMask()))); >+ } >+ >+ // Check the index we are looking for is valid. >+ { >+ CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, AboveEqual, origin(), calleeIndex, callableFunctionBufferLength)); >+ >+ check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsCallIndirect); >+ }); >+ } >+ >+ calleeIndex = m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), calleeIndex); >+ >+ if (Options::enableSpectreMitigations()) >+ calleeIndex = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), mask, calleeIndex); >+ >+ ExpressionType callableFunction; >+ { >+ // Compute the offset in the table index space we are looking for. >+ ExpressionType offset = m_currentBlock->appendNew<Value>(m_proc, Mul, origin(), >+ calleeIndex, constant(pointerType(), sizeof(WasmToWasmImportableFunction))); >+ callableFunction = m_currentBlock->appendNew<Value>(m_proc, Add, origin(), callableFunctionBuffer, offset); >+ >+ // Check that the WasmToWasmImportableFunction is initialized. We trap if it isn't. An "invalid" SignatureIndex indicates it's not initialized. >+ // FIXME: when we have trap handlers, we can just let the call fail because Signature::invalidIndex is 0. https://bugs.webkit.org/show_bug.cgi?id=177210 >+ static_assert(sizeof(WasmToWasmImportableFunction::signatureIndex) == sizeof(uint64_t), "Load codegen assumes i64"); >+ ExpressionType calleeSignatureIndex = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int64, origin(), callableFunction, safeCast<int32_t>(WasmToWasmImportableFunction::offsetOfSignatureIndex())); >+ { >+ CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), >+ calleeSignatureIndex, >+ m_currentBlock->appendNew<Const64Value>(m_proc, origin(), Signature::invalidIndex))); >+ >+ check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::NullTableEntry); >+ }); >+ } >+ >+ // Check the signature matches the value we expect. >+ { >+ ExpressionType expectedSignatureIndex = m_currentBlock->appendNew<Const64Value>(m_proc, origin(), SignatureInformation::get(signature)); >+ CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, NotEqual, origin(), calleeSignatureIndex, expectedSignatureIndex)); >+ >+ check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::BadSignature); >+ }); >+ } >+ } >+ >+ // Do a context switch if needed. >+ { >+ Tmp offset = m_currentBlock->appendNew<Value>(m_proc, Mul, origin(), >+ calleeIndex, constant(pointerType(), sizeof(Instance*))); >+ Tmp newContextInstance = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), >+ m_currentBlock->appendNew<Value>(m_proc, Add, origin(), instancesBuffer, offset)); >+ >+ BasicBlock* continuation = m_proc.addBlock(); >+ BasicBlock* doContextSwitch = m_proc.addBlock(); >+ >+ Tmp isSameContextInstance = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), >+ newContextInstance, instanceValue()); >+ m_currentBlock->appendNewControlValue(m_proc, B3::Branch, origin(), >+ isSameContextInstance, FrequentedBlock(continuation), FrequentedBlock(doContextSwitch)); >+ >+ PatchpointValue* patchpoint = doContextSwitch->appendNew<PatchpointValue>(m_proc, B3::Void, origin()); >+ patchpoint->effects.writesPinned = true; >+ // We pessimistically assume we're calling something with BoundsChecking memory. >+ // FIXME: We shouldn't have to do this: https://bugs.webkit.org/show_bug.cgi?id=172181 >+ patchpoint->clobber(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking)); >+ patchpoint->clobber(RegisterSet::macroScratchRegisters()); >+ patchpoint->append(newContextInstance, ValueRep::SomeRegister); >+ patchpoint->append(instanceValue(), ValueRep::SomeRegister); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ GPRReg newContextInstance = params[0].gpr(); >+ GPRReg oldContextInstance = params[1].gpr(); >+ const PinnedRegisterInfo& pinnedRegs = PinnedRegisterInfo::get(); >+ const auto& sizeRegs = pinnedRegs.sizeRegisters; >+ GPRReg baseMemory = pinnedRegs.baseMemoryPointer; >+ ASSERT(newContextInstance != baseMemory); >+ jit.loadPtr(CCallHelpers::Address(oldContextInstance, Instance::offsetOfCachedStackLimit()), baseMemory); >+ jit.storePtr(baseMemory, CCallHelpers::Address(newContextInstance, Instance::offsetOfCachedStackLimit())); >+ jit.storeWasmContextInstance(newContextInstance); >+ ASSERT(sizeRegs[0].sizeRegister != baseMemory); >+ // FIXME: We should support more than one memory size register >+ // see: https://bugs.webkit.org/show_bug.cgi?id=162952 >+ ASSERT(sizeRegs.size() == 1); >+ ASSERT(sizeRegs[0].sizeRegister != newContextInstance); >+ ASSERT(!sizeRegs[0].sizeOffset); >+ jit.loadPtr(CCallHelpers::Address(newContextInstance, Instance::offsetOfCachedMemorySize()), sizeRegs[0].sizeRegister); // Memory size. >+ jit.loadPtr(CCallHelpers::Address(newContextInstance, Instance::offsetOfCachedMemory()), baseMemory); // Memory::void*. >+ }); >+ doContextSwitch->appendNewControlValue(m_proc, Jump, origin(), continuation); >+ >+ m_currentBlock = continuation; >+ } >+ >+ ExpressionType calleeCode = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), >+ m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), callableFunction, >+ safeCast<int32_t>(WasmToWasmImportableFunction::offsetOfEntrypointLoadLocation()))); >+ if (Options::usePoisoning()) >+ calleeCode = m_currentBlock->appendNew<Value>(m_proc, BitXor, origin(), calleeCode, m_currentBlock->appendNew<Const64Value>(m_proc, origin(), g_JITCodePoison)); >+ >+ Type returnType = signature.returnType(); >+ result = wasmCallingConvention().setupCall(m_proc, m_currentBlock, origin(), args, toB3Type(returnType), >+ [=] (PatchpointValue* patchpoint) { >+ patchpoint->effects.writesPinned = true; >+ patchpoint->effects.readsPinned = true; >+ // We need to clobber all potential pinned registers since we might be leaving the instance. >+ // We pessimistically assume we're always calling something that is bounds checking so >+ // because the wasm->wasm thunk unconditionally overrides the size registers. >+ // FIXME: We should not have to do this, but the wasm->wasm stub assumes it can >+ // use all the pinned registers as scratch: https://bugs.webkit.org/show_bug.cgi?id=172181 >+ patchpoint->clobberLate(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking)); >+ >+ patchpoint->append(calleeCode, ValueRep::SomeRegister); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ jit.call(params[returnType == Void ? 0 : 1].gpr(), WasmEntryPtrTag); >+ }); >+ }); >+ >+ // The call could have been to another WebAssembly instance, and / or could have modified our Memory. >+ restoreWebAssemblyGlobalState(RestoreCachedStackLimit::Yes, m_info.memory, instanceValue(), m_proc, m_currentBlock); >+ >+ return { }; >+} >+ >+void AirIRGenerator::unify(const ExpressionType phi, const ExpressionType source) >+{ >+ m_currentBlock->appendNew<UpsilonValue>(m_proc, origin(), source, phi); >+} >+ >+void AirIRGenerator::unifyValuesWithBlock(const ExpressionList& resultStack, const ResultList& result) >+{ >+ ASSERT(result.size() <= resultStack.size()); >+ >+ for (size_t i = 0; i < result.size(); ++i) >+ unify(result[result.size() - 1 - i], resultStack[resultStack.size() - 1 - i]); >+} >+ >+static void dumpExpressionStack(const CommaPrinter& comma, const AirIRGenerator::ExpressionList& expressionStack) >+{ >+ dataLog(comma, "ExpressionStack:"); >+ for (const auto& expression : expressionStack) >+ dataLog(comma, *expression); >+} >+ >+void AirIRGenerator::dump(const Vector<ControlEntry>& controlStack, const ExpressionList* expressionStack) >+{ >+ // OOPS! >+ /* >+ dataLogLn("Processing Graph:"); >+ dataLog(m_code); >+ dataLogLn("With current block:", *m_currentBlock); >+ dataLogLn("Control stack:"); >+ ASSERT(controlStack.size()); >+ for (size_t i = controlStack.size(); i--;) { >+ dataLog(" ", controlStack[i].controlData, ": "); >+ CommaPrinter comma(", ", ""); >+ dumpExpressionStack(comma, *expressionStack); >+ expressionStack = &controlStack[i].enclosedExpressionStack; >+ dataLogLn(); >+ } >+ dataLogLn(); >+ */ >+} >+ >+auto AirIRGenerator::origin() -> Origin >+{ >+ OpcodeOrigin origin(m_parser->currentOpcode(), m_parser->currentOpcodeStartingOffset()); >+ ASSERT(isValidOpType(static_cast<uint8_t>(origin.opcode()))); >+ return bitwise_cast<Origin>(origin); >+} >+ >+Expected<std::unique_ptr<InternalFunction>, String> parseAndCompile(CompilationContext& compilationContext, const uint8_t* functionStart, size_t functionLength, const Signature& signature, Vector<UnlinkedWasmToWasmCall>& unlinkedWasmToWasmCalls, const ModuleInformation& info, MemoryMode mode, CompilationMode compilationMode, uint32_t functionIndex, TierUpCount* tierUp, ThrowWasmException throwWasmException) >+{ >+ auto result = std::make_unique<InternalFunction>(); >+ >+ compilationContext.embedderEntrypointJIT = std::make_unique<CCallHelpers>(); >+ compilationContext.wasmEntrypointJIT = std::make_unique<CCallHelpers>(); >+ >+ B3::Procedure procedure; >+ Code& code = procedure.code(); >+ >+ procedure.setOriginPrinter([] (PrintStream& out, Origin origin) { >+ if (origin.data()) >+ out.print("Wasm: ", bitwise_cast<OpcodeOrigin>(origin)); >+ }); >+ >+ // This means we cannot use either StackmapGenerationParams::usedRegisters() or >+ // StackmapGenerationParams::unavailableRegisters(). In exchange for this concession, we >+ // don't strictly need to run Air::reportUsedRegisters(), which saves a bit of CPU time at >+ // optLevel=1. >+ // OOPS: Do we need this? Should we just set it directly on Code? >+ procedure.setNeedsUsedRegisters(false); >+ >+ procedure.setOptLevel(compilationMode == CompilationMode::BBQMode >+ ? Options::webAssemblyBBQOptimizationLevel() >+ : Options::webAssemblyOMGOptimizationLevel()); >+ >+ AirIRGenerator irGenerator(info, procedure, result.get(), unlinkedWasmToWasmCalls, mode, compilationMode, functionIndex, tierUp, throwWasmException); >+ FunctionParser<AirIRGenerator> parser(irGenerator, functionStart, functionLength, signature, info); >+ WASM_FAIL_IF_HELPER_FAILS(parser.parse()); >+ >+ >+ for (BasicBlock* block : code) { >+ for (BasicBlock* successor : block->successors()) >+ successor->addPredecessor(block); >+ } >+ // OOPS: Validate 'code' here! >+ >+ { >+ Air::generate(proc.code(), *compilationContext.wasmEntrypointJIT); >+ compilationContext.wasmEntrypointByproducts = procedure.releaseByproducts(); >+ result->entrypoint.calleeSaveRegisters = code.calleeSaveRegisterAtOffsetList(); >+ } >+ >+ return WTFMove(result); >+} >+ >+void AirIRGenerator::emitChecksForModOrDiv(B3::Opcode operation, ExpressionType left, ExpressionType right) >+{ >+ ASSERT(operation == Div || operation == Mod || operation == UDiv || operation == UMod); >+ const B3::Type type = left->type(); >+ >+ { >+ emitCheck([&] { >+ Kind op = is64BitOp(operation) ? BranchTest64 : BranchTest32; >+ append(op, Arg::resCond(MacroAssembler::Zero), right, right); >+ }, [=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::DivisionByZero); >+ }); >+ } >+ >+ if (operation == Div) { >+ int64_t min = type == Int32 ? std::numeric_limits<int32_t>::min() : std::numeric_limits<int64_t>::min(); >+ emitCheck([&] { >+ // OOPS: Try to emit nice compare w/ imms. >+ Tmp minTmp = gpr(); >+ Tmp negOne = gpr(); >+ >+ Kind op = is64BitOp(operation) ? Compare64 : Compare32; >+ append(Move, Arg::bigImm(min), minTmp); >+ append(op, Arg::relCond(MacroAssembler::Equal), left, minTmp, minTmp); >+ append(Move, Arg::imm(-1), negOne); >+ append(op, Arg::relCond(MacroAssembler::Equal), right, negOne, negOne); >+ append(BranchTest32, Arg::resCond(MacroAssembler::NotZero), minTmp, negOne); >+ }, >+ [=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::IntegerOverflow); >+ }); >+ } >+} >+ >+// OOPS: Implement these div ops!!! >+template<> >+auto AirIRGenerator::addOp<OpType::I32DivS>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult >+{ >+ const B3::Opcode op = Div; >+ emitChecksForModOrDiv(op, left, right); >+ result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32RemS>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult >+{ >+ const B3::Opcode op = Mod; >+ emitChecksForModOrDiv(op, left, right); >+ result = m_currentBlock->appendNew<Value>(m_proc, chill(op), origin(), left, right); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32DivU>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult >+{ >+ const B3::Opcode op = UDiv; >+ emitChecksForModOrDiv(op, left, right); >+ result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32RemU>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult >+{ >+ const B3::Opcode op = UMod; >+ emitChecksForModOrDiv(op, left, right); >+ result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64DivS>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult >+{ >+ const B3::Opcode op = Div; >+ emitChecksForModOrDiv(op, left, right); >+ result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64RemS>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult >+{ >+ const B3::Opcode op = Mod; >+ emitChecksForModOrDiv(op, left, right); >+ result = m_currentBlock->appendNew<Value>(m_proc, chill(op), origin(), left, right); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64DivU>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult >+{ >+ const B3::Opcode op = UDiv; >+ emitChecksForModOrDiv(op, left, right); >+ result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64RemU>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult >+{ >+ const B3::Opcode op = UMod; >+ emitChecksForModOrDiv(op, left, right); >+ result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32Ctz>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ PatchpointValue* patchpoint = addPatchpoint(B3::Int32); >+ patchpoint->effects = Effects::none(); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.countTrailingZeros32(params[1].gpr(), params[0].gpr()); >+ }); >+ result = gpr(); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64Ctz>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ PatchpointValue* patchpoint = addPatchpoint(B3::Int64); >+ patchpoint->effects = Effects::none(); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.countTrailingZeros64(params[1].gpr(), params[0].gpr()); >+ }); >+ result = gpr(); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32Popcnt>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ >+#if CPU(X86_64) >+ if (MacroAssembler::supportsCountPopulation()) { >+ PatchpointValue* patchpoint = addPatchpoint(B3::Int32); >+ patchpoint->effects = Effects::none(); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.countPopulation32(params[1].gpr(), params[0].gpr()); >+ }); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+ } >+#endif >+ >+ uint32_t (*popcount)(int32_t) = [] (int32_t value) -> uint32_t { return __builtin_popcount(value); }; >+ emitCCall(popcount, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64Popcnt>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ >+#if CPU(X86_64) >+ if (MacroAssembler::supportsCountPopulation()) { >+ PatchpointValue* patchpoint = addPatchpoint(B3::Int64); >+ patchpoint->effects = Effects::none(); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.countPopulation64(params[1].gpr(), params[0].gpr()); >+ }); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+ } >+#endif >+ >+ uint64_t (*popcount)(int64_t) = [] (int64_t value) -> uint64_t { return __builtin_popcountll(value); }; >+ emitCCall(popcount, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<F64ConvertUI64>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ PatchpointValue* patchpoint = addPatchpoint(B3::Double); >+ patchpoint->effects = Effects::none(); >+ if (isX86()) >+ patchpoint->numGPScratchRegisters = 1; >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+#if CPU(X86_64) >+ jit.convertUInt64ToDouble(params[1].gpr(), params[0].fpr(), params.gpScratch(0)); >+#else >+ jit.convertUInt64ToDouble(params[1].gpr(), params[0].fpr()); >+#endif >+ }); >+ result = fpr(); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::F32ConvertUI64>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ PatchpointValue* patchpoint = addPatchpoint(B3::Float); >+ patchpoint->effects = Effects::none(); >+ if (isX86()) >+ patchpoint->numGPScratchRegisters = 1; >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+#if CPU(X86_64) >+ jit.convertUInt64ToFloat(params[1].gpr(), params[0].fpr(), params.gpScratch(0)); >+#else >+ jit.convertUInt64ToFloat(params[1].gpr(), params[0].fpr()); >+#endif >+ }); >+ result = fpr(); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::F64Nearest>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ PatchpointValue* patchpoint = addPatchpoint(B3::Double); >+ patchpoint->effects = Effects::none(); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.roundTowardNearestIntDouble(params[1].fpr(), params[0].fpr()); >+ }); >+ result = fpr(); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::F32Nearest>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ PatchpointValue* patchpoint = addPatchpoint(B3::Float); >+ patchpoint->effects = Effects::none(); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.roundTowardNearestIntFloat(params[1].fpr(), params[0].fpr()); >+ }); >+ result = fpr(); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::F64Trunc>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ PatchpointValue* patchpoint = addPatchpoint(B3::Double); >+ patchpoint->effects = Effects::none(); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.roundTowardZeroDouble(params[1].fpr(), params[0].fpr()); >+ }); >+ result = fpr(); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::F32Trunc>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ B3::PatchpointValue* patchpoint = addPatchpoint(B3::Float); >+ patchpoint->effects = Effects::none(); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.roundTowardZeroFloat(params[1].fpr(), params[0].fpr()); >+ }); >+ result = fpr(); >+ emitPatchpoint(patchpoint, result, arg); >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32TruncSF64>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ bool b = true; >+ if (b) >+ CRASH(); // OOPS: implement this! >+ >+ /* >+ Tmp max = constant(Double, bitwise_cast<uint64_t>(-static_cast<double>(std::numeric_limits<int32_t>::min()))); >+ Tmp min = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<int32_t>::min()))); >+ Tmp outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), >+ m_currentBlock->appendNew<Value>(m_proc, GreaterEqual, origin(), arg, min)); >+ outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); >+ CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); >+ trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); >+ }); >+ PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); >+ patchpoint->append(arg, ValueRep::SomeRegister); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.truncateDoubleToInt32(params[1].fpr(), params[0].gpr()); >+ }); >+ patchpoint->effects = Effects::none(); >+ result = patchpoint; >+ */ >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32TruncSF32>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ bool b = true; >+ if (b) >+ CRASH(); // OOPS: implement this! >+ >+ /* >+ Tmp max = constant(Float, bitwise_cast<uint32_t>(-static_cast<float>(std::numeric_limits<int32_t>::min()))); >+ Tmp min = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<int32_t>::min()))); >+ Tmp outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), >+ m_currentBlock->appendNew<Value>(m_proc, GreaterEqual, origin(), arg, min)); >+ outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); >+ CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); >+ trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); >+ }); >+ PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); >+ patchpoint->append(arg, ValueRep::SomeRegister); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.truncateFloatToInt32(params[1].fpr(), params[0].gpr()); >+ }); >+ patchpoint->effects = Effects::none(); >+ result = patchpoint; >+ */ >+ return { }; >+} >+ >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32TruncUF64>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ bool b = true; >+ if (b) >+ CRASH(); // OOPS: implement this! >+ >+ /* >+ Tmp max = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<int32_t>::min()) * -2.0)); >+ Tmp min = constant(Double, bitwise_cast<uint64_t>(-1.0)); >+ Tmp outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), >+ m_currentBlock->appendNew<Value>(m_proc, GreaterThan, origin(), arg, min)); >+ outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); >+ CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); >+ trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); >+ }); >+ PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); >+ patchpoint->append(arg, ValueRep::SomeRegister); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.truncateDoubleToUint32(params[1].fpr(), params[0].gpr()); >+ }); >+ patchpoint->effects = Effects::none(); >+ result = patchpoint; >+ */ >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I32TruncUF32>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ bool b = true; >+ if (b) >+ CRASH(); // OOPS: implement this! >+ >+ /* >+ Tmp max = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<int32_t>::min()) * static_cast<float>(-2.0))); >+ Tmp min = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(-1.0))); >+ Tmp outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), >+ m_currentBlock->appendNew<Value>(m_proc, GreaterThan, origin(), arg, min)); >+ outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); >+ CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); >+ trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); >+ }); >+ PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); >+ patchpoint->append(arg, ValueRep::SomeRegister); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.truncateFloatToUint32(params[1].fpr(), params[0].gpr()); >+ }); >+ patchpoint->effects = Effects::none(); >+ result = patchpoint; >+ */ >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64TruncSF64>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ bool b = true; >+ if (b) >+ CRASH(); // OOPS: implement this! >+ >+ /* >+ Tmp max = constant(Double, bitwise_cast<uint64_t>(-static_cast<double>(std::numeric_limits<int64_t>::min()))); >+ Tmp min = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<int64_t>::min()))); >+ Tmp outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), >+ m_currentBlock->appendNew<Value>(m_proc, GreaterEqual, origin(), arg, min)); >+ outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); >+ CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); >+ trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); >+ }); >+ PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); >+ patchpoint->append(arg, ValueRep::SomeRegister); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.truncateDoubleToInt64(params[1].fpr(), params[0].gpr()); >+ }); >+ patchpoint->effects = Effects::none(); >+ result = patchpoint; >+ */ >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64TruncUF64>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ bool b = true; >+ if (b) >+ CRASH(); // OOPS: implement this! >+ /* >+ Tmp max = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<int64_t>::min()) * -2.0)); >+ Tmp min = constant(Double, bitwise_cast<uint64_t>(-1.0)); >+ Tmp outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), >+ m_currentBlock->appendNew<Value>(m_proc, GreaterThan, origin(), arg, min)); >+ outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); >+ CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); >+ trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); >+ }); >+ >+ Tmp signBitConstant; >+ if (isX86()) { >+ // Since x86 doesn't have an instruction to convert floating points to unsigned integers, we at least try to do the smart thing if >+ // the numbers are would be positive anyway as a signed integer. Since we cannot materialize constants into fprs we have b3 do it >+ // so we can pool them if needed. >+ signBitConstant = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<uint64_t>::max() - std::numeric_limits<int64_t>::max()))); >+ } >+ PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); >+ patchpoint->append(arg, ValueRep::SomeRegister); >+ if (isX86()) { >+ patchpoint->append(signBitConstant, ValueRep::SomeRegister); >+ patchpoint->numFPScratchRegisters = 1; >+ } >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ FPRReg scratch = InvalidFPRReg; >+ FPRReg constant = InvalidFPRReg; >+ if (isX86()) { >+ scratch = params.fpScratch(0); >+ constant = params[2].fpr(); >+ } >+ jit.truncateDoubleToUint64(params[1].fpr(), params[0].gpr(), scratch, constant); >+ }); >+ patchpoint->effects = Effects::none(); >+ result = patchpoint; >+ */ >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64TruncSF32>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ bool b = true; >+ if (b) >+ CRASH(); // OOPS: Implement this! >+ /* >+ Tmp max = constant(Float, bitwise_cast<uint32_t>(-static_cast<float>(std::numeric_limits<int64_t>::min()))); >+ Tmp min = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<int64_t>::min()))); >+ Tmp outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), >+ m_currentBlock->appendNew<Value>(m_proc, GreaterEqual, origin(), arg, min)); >+ outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); >+ CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); >+ trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); >+ }); >+ PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); >+ patchpoint->append(arg, ValueRep::SomeRegister); >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ jit.truncateFloatToInt64(params[1].fpr(), params[0].gpr()); >+ }); >+ patchpoint->effects = Effects::none(); >+ result = patchpoint; >+ */ >+ return { }; >+} >+ >+template<> >+auto AirIRGenerator::addOp<OpType::I64TruncUF32>(ExpressionType arg, ExpressionType& result) -> PartialResult >+{ >+ bool b = true; >+ if (b) >+ CRASH(); // OOPS: implement this >+ /* >+ Tmp max = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<int64_t>::min()) * static_cast<float>(-2.0))); >+ Tmp min = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(-1.0))); >+ Tmp outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), >+ m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), >+ m_currentBlock->appendNew<Value>(m_proc, GreaterThan, origin(), arg, min)); >+ outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); >+ CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); >+ trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { >+ this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); >+ }); >+ >+ Tmp signBitConstant; >+ if (isX86()) { >+ // Since x86 doesn't have an instruction to convert floating points to unsigned integers, we at least try to do the smart thing if >+ // the numbers would be positive anyway as a signed integer. Since we cannot materialize constants into fprs we have b3 do it >+ // so we can pool them if needed. >+ signBitConstant = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<uint64_t>::max() - std::numeric_limits<int64_t>::max()))); >+ } >+ PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); >+ patchpoint->append(arg, ValueRep::SomeRegister); >+ if (isX86()) { >+ patchpoint->append(signBitConstant, ValueRep::SomeRegister); >+ patchpoint->numFPScratchRegisters = 1; >+ } >+ patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >+ AllowMacroScratchRegisterUsage allowScratch(jit); >+ FPRReg scratch = InvalidFPRReg; >+ FPRReg constant = InvalidFPRReg; >+ if (isX86()) { >+ scratch = params.fpScratch(0); >+ constant = params[2].fpr(); >+ } >+ jit.truncateFloatToUint64(params[1].fpr(), params[0].gpr(), scratch, constant); >+ }); >+ patchpoint->effects = Effects::none(); >+ result = patchpoint; >+ */ >+ return { }; >+} >+ >+PartialResult AirIRGenerator::addShift(Kind kind, ExpressionType value, ExpressionType shift, ExpressionType& result) >+{ >+ result = gpr(); >+ >+ if (isValidForm(kind, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { >+ append(kind, value, shift, result); >+ return { }; >+ } >+ >+ append(Move, value, result); >+ append(Move, shift, m_ecx); >+ append(kind, m_ecx, result); >+ return { }; >+} >+ >+PartialResult AirIRGenerator::addIntegerSub(Kind kind, ExpressionType lhs, ExpressionType rhs, ExpressionType& result) >+{ >+ result = gpr(); >+ >+ if (isValidForm(kind, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { >+ append(kind, lhs, rhs, result); >+ return { }; >+ } >+ >+ RELEASE_ASSERT(isX86()); >+ // Sub a, b >+ // means >+ // b = b Sub a >+ append(Move, lhs, result); >+ append(kind, rhs, result); >+ return { }; >+} >+ >+PartialResult AirIRGenerator::addFloatingPointAbs(Kind kind, ExpressionType value, ExpressionType& result) >+{ >+ RELEASE_ASSERT(kind == AbsFloat || kind == AbsDouble); >+ >+ result = fpr(); >+ >+ if (isValidForm(kind, Arg::Tmp, Arg::Tmp)) { >+ append(kind, value, result); >+ return { }; >+ } >+ >+ RELEASE_ASSERT(isX86()); >+ >+ Tmp constant = gpr(); >+ if (kind == AbsFloat) { >+ append(Move, Arg::imm(~(1 << 31)), constant); >+ append(Move32ToFloat, constant, result); >+ append(AndFloat, value, result); >+ } else { >+ append(Move, Arg::bigImm(~(1 << 63)), constant); >+ append(Move64ToDouble, constant, result); >+ append(AndDouble, value, result); >+ } >+ return { }; >+} >+ >+PartialResult AirIRGenerator::addFloatingPointBinOp(Kind kind, ExpressionType lhs, ExpressionType rhs, ExpressionType& result) >+{ >+ result = fpr(); >+ >+ if (isValidForm(kind, Arg::Tmp, Arg::Tmp, Args::Tmp)) { >+ append(kind, lhs, rhs, result); >+ return { }; >+ } >+ >+ RELEASE_ASSERT(isX86()); >+ >+ // Op a, b >+ // means >+ // b = b Op a >+ // OOPS: verify >+ append(MoveDouble, lhs, result); >+ append(kind, rhs, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Ceil>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(CeilFloat, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Mul>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Mul32, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Sub>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addIntegerSub(Sub32, arg0, arg1); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Le>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareDouble, Arg::relCond(MacroAssembler::DoubleLessThanOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32DemoteF64>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(ConvertDoubleToFloat, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Min>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ >+ BasicBlock* isEqual = m_code.addBlock(); >+ BasicBlock* notEqual = m_code.addBlock(); >+ BasicBlock* greaterThanOrEqual = m_code.addBlock(); >+ BasicBlock* continuation = m_code.addBlock(); >+ >+ append(m_currentBlock, BranchFloat, Arg::relCond(MacroAssembler::DoubleEqual), arg0, arg1); >+ m_currentBlock->setSuccessors(isEqual, notEqual); >+ >+ append(isEqual, OrFloat, arg0, arg1, result); >+ append(isEqual, Jump); >+ isEqual->setSuccessors(continuation); >+ >+ append(notEqual, MoveFloat, arg0, result); >+ append(notEqual, BranchFloat, Arg::relCond(MacroAssembler::DoubleLessThan), arg0, arg1); >+ notEqual->setSuccessors(continuation, greaterThanOrEqual); >+ >+ append(greaterThanOrEqual, MoveFloat, arg1, result); >+ append(greaterThanOrEqual, Jump); >+ greaterThanOrEqual->setSuccessors(continuation); >+ >+ m_currentBlock = continuation; >+ >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Ne>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareDouble, Arg::relCond(MacroAssembler::DoubleNotEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Lt>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareDouble, Arg::relCond(MacroAssembler::DoubleLessThan), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Max>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ >+ BasicBlock* isEqual = m_code.addBlock(); >+ BasicBlock* notEqual = m_code.addBlock(); >+ BasicBlock* lessThan = m_code.addBlock(); >+ BasicBlock* continuation = m_code.addBlock(); >+ >+ append(m_currentBlock, BranchFloat, Arg::relCond(MacroAssembler::DoubleEqual), arg0, arg1); >+ m_currentBlock->setSuccessors(isEqual, notEqual); >+ >+ append(isEqual, AndFloat, arg0, arg1, result); >+ append(isEqual, Jump); >+ isEqual->setSuccessors(continuation); >+ >+ append(notEqual, MoveFloat, arg0, result); >+ append(notEqual, BranchFloat, Arg::relCond(MacroAssembler::DoubleLessThan), arg0, arg1); >+ notEqual->setSuccessors(lessThan, continuation); >+ >+ append(lessThan, MoveFloat, arg1, result); >+ append(lessThan, Jump); >+ lessThan->setSuccessors(continuation); >+ >+ m_currentBlock = continuation; >+ >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Mul>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addFloatingPointBinOp(MulDouble, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Div>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addFloatingPointBinOp(DivFloat, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Clz>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CountLeadingZeros32, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Copysign>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ // OOPS: We can have better codegen here for the imms and two operand forms on x86 >+ result = fpr(); >+ Tmp temp1 = gpr(); >+ Tmp sign = gpr(); >+ Tmp value = gpr(); >+ >+ // OOPS: Can fit in Imm/BitImm instead of BigImm? >+ append(MoveFloatTo32, arg1, temp1); >+ append(Move, Arg::bigImm(0x80000000), sign); >+ append(And32, temp1, sign, sign); >+ >+ append(MoveDoubleTo64, arg0, temp1); >+ append(Move, Arg::bigImm(0x7fffffff), value); >+ append(And32, temp1, value, value); >+ >+ append(Or32, sign, value, value); >+ append(Move32ToFloat, value, result); >+ >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64ConvertUI32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ Tmp temp = tmp(); >+ append(Move32, arg0, temp); >+ append(ConvertInt64ToDouble, temp, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32ReinterpretI32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(Move32ToFloat, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64And>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(And64, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Ne>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareFloat, Arg::relCond(MacroAssembler::DoubleNotEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Gt>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareDouble, Arg::relCond(MacroAssembler::DoubleGreaterThan), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Sqrt>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(SqrtFloat, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Ge>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareDouble, Arg::relCond(MacroAssembler::DoubleGreaterThanOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64GtS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::GreaterThan), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64GtU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::Above), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Eqz>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Test64, Arg::resCond(MacroAssembler::Zero), arg0, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Div>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ // OOPS: what about chill-ness here...? >+ return addFloatingPointBinOp(DivDouble, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Add>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(AddFloat, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Or>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Or64, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32LeU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare32, Arg::relCond(MacroAssembler::BelowOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32LeS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare32, Arg::relCond(MacroAssembler::LessThanOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Ne>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::NotEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Clz>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CountLeadingZeros64, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Neg>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(NegateFloat, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32And>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(And32, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32LtU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare32, Arg::relCond(MacroAssembler::Below), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Rotr>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(RotateRight64, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Abs>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ return addFloatingPointAbs(AbsDouble, arg0, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32LtS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare32, Arg::relCond(MacroAssembler::LessThan), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Eq>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare32, Arg::relCond(MacroAssembler::Equal), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Copysign>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ // OOPS: We can have better codegen here for the imms and two operand forms on x86 >+ result = fpr(); >+ Tmp temp1 = gpr(); >+ Tmp sign = gpr(); >+ Tmp value = gpr(); >+ >+ append(MoveDoubleTo64, arg1, temp1); >+ append(Move, Arg::bigImm(0x8000000000000000), sign); >+ append(And64, temp1, sign, sign); >+ >+ append(MoveDoubleTo64, arg0, temp1); >+ append(Move, Arg::bigImm(0x7fffffffffffffff), value); >+ append(And64, temp1, value, value); >+ >+ append(Or64, sign, value, value); >+ append(Move64ToDouble, value, result); >+ >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32ConvertSI64>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(ConvertInt64ToFloat, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Rotl>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(RotateLeft64, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Lt>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareFloat, Arg::relCond(MacroAssembler::DoubleLessThan), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64ConvertSI32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(ConvertInt32ToDouble, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Eq>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareDouble, Arg::relCond(MacroAssembler::DoubleEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Le>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareFloat, Arg::relCond(MacroAssembler::DoubleLessThanOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Ge>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareFloat, Arg::relCond(MacroAssembler::DoubleGreaterThanOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32ShrU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(Urshift32, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32ConvertUI32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ Tmp temp = tmp(); >+ append(Move32, arg0, temp); >+ append(ConvertInt64ToFloat, temp, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32ShrS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(Rshift32, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32GeU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(Compare32, Arg::relCond(MacroAssembler::AboveOrEqual), arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Ceil>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(CeilDouble, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32GeS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare32, Arg::relCond(MacroAssembler::GreaterThanOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Shl>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(Lshift32, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Floor>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(FloorDouble, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Xor>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Xor32, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Abs>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ return addFloatingPointAbs(AbsFloat, arg0, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Min>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ >+ BasicBlock* isEqual = m_code.addBlock(); >+ BasicBlock* notEqual = m_code.addBlock(); >+ BasicBlock* greaterThanOrEqual = m_code.addBlock(); >+ BasicBlock* continuation = m_code.addBlock(); >+ >+ append(m_currentBlock, BranchDouble, Arg::relCond(MacroAssembler::DoubleEqual), arg0, arg1); >+ m_currentBlock->setSuccessors(isEqual, notEqual); >+ >+ append(isEqual, OrDouble, arg0, arg1, result); >+ append(isEqual, Jump); >+ isEqual->setSuccessors(continuation); >+ >+ append(notEqual, MoveDouble, arg0, result); >+ append(notEqual, BranchDouble, Arg::relCond(MacroAssembler::DoubleLessThan), arg0, arg1); >+ notEqual->setSuccessors(continuation, greaterThanOrEqual); >+ >+ append(greaterThanOrEqual, MoveDouble, arg1, result); >+ append(greaterThanOrEqual, Jump); >+ greaterThanOrEqual->setSuccessors(continuation); >+ >+ m_currentBlock = continuation; >+ >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Mul>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(MulFloat, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Sub>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addIntegerSub(Sub64, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32ReinterpretF32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(MoveFloatTo32, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Add>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Add32, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Sub>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(SubDouble, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Or>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Or32, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64LtU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::Below), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64LtS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::LessThan), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64ConvertSI64>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(ConvertInt64ToDouble, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Xor>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Xor64, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64GeU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::AboveOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Mul>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Mul64, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Sub>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(SubFloat, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64PromoteF32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(ConvertFloatToDouble, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Add>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(AddDouble, arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64GeS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::GreaterThanOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64ExtendUI32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Move32, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Ne>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare32, Arg::relCond(MacroAssembler::NotEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64ReinterpretI64>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(Move64ToDouble, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Eq>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareFloat, Arg::relCond(MacroAssembler::DoubleEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Eq>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::Equal), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Floor>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(FloorFloat, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32ConvertSI32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(ConvertInt32ToFloat, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Eqz>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Test32, Arg::resCond(MacroAssembler::Zero), arg0, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64ReinterpretF64>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(MoveDoubleTo64, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64ShrS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(Rshift64, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64ShrU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(Urshift64, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Sqrt>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(SqrtDouble, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Shl>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(Lshift32, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F32Gt>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(CompareFloat, Arg::relCond(MacroAssembler::DoubleGreaterThan), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32WrapI64>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Move32, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Rotl>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(RotateLeft32, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32Rotr>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ return addShift(RotateRight32, arg0, arg1, result); >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32GtU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare32, Arg::relCond(MacroAssembler::Above), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64ExtendSI32>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(SignExtend32ToPtr, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I32GtS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(NegateDouble, arg0, result); >+ append(Compare32, Arg::relCond(MacroAssembler::GreaterThan), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Neg>(ExpressionType arg0, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ append(NegateDouble, arg0, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::F64Max>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = fpr(); >+ >+ BasicBlock* isEqual = m_code.addBlock(); >+ BasicBlock* notEqual = m_code.addBlock(); >+ BasicBlock* lessThan = m_code.addBlock(); >+ BasicBlock* continuation = m_code.addBlock(); >+ >+ append(m_currentBlock, BranchDouble, Arg::relCond(MacroAssembler::DoubleEqual), arg0, arg1); >+ m_currentBlock->setSuccessors(isEqual, notEqual); >+ >+ append(isEqual, AndDouble, arg0, arg1, result); >+ append(isEqual, Jump); >+ isEqual->setSuccessors(continuation); >+ >+ append(notEqual, MoveDouble, arg0, result); >+ append(notEqual, BranchDouble, Arg::relCond(MacroAssembler::DoubleLessThan), arg0, arg1); >+ notEqual->setSuccessors(lessThan, continuation); >+ >+ append(lessThan, MoveDouble, arg1, result); >+ append(lessThan, Jump); >+ lessThan->setSuccessors(continuation); >+ >+ m_currentBlock = continuation; >+ >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64LeU>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::BelowOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64LeS>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Compare64, Arg::relCond(MacroAssembler::LessThanOrEqual), arg0, arg1, result); >+ return { }; >+} >+ >+template<> auto AirIRGenerator::addOp<OpType::I64Add>(ExpressionType arg0, ExpressionType arg1, ExpressionType& result) -> PartialResult >+{ >+ result = gpr(); >+ append(Add64, arg0, arg1, result); >+ return { }; >+} >+ >+} } // namespace JSC::Wasm >+ >+#endif // ENABLE(WEBASSEMBLY) >Index: Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp (revision 240477) >+++ Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp (working copy) >@@ -1838,7 +1838,7 @@ auto B3IRGenerator::addOp<OpType::I64Tru > patchpoint->numFPScratchRegisters = 1; > } > patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { >- AllowMacroScratchRegisterUsage allowScratch(jit); >+ AllowMacroScratchRegisterUsage allowScratch(jit); // OOPS: This looks super wrong given we don't say we clobber this! > FPRReg scratch = InvalidFPRReg; > FPRReg constant = InvalidFPRReg; > if (isX86()) {
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 191802
:
356297
|
357108
|
357110
|
360063
|
360163
|
360171
|
360204
|
360205
|
360309
|
360310
|
360328
|
360374
|
360382
|
360400
|
360415
|
360420
|
360421
|
360423
|
360444
|
360452
|
360495
|
360498
|
360531
|
360648
|
360652
|
360655
|
360669