WebKit Bugzilla
Attachment 361240 Details for
Bug 194252
: B3ReduceStrength: missing peephole optimizations for binary operations
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
WIP_missing_tests
bug-194252-20190205161426.patch (text/plain), 11.31 KB, created by
Robin Morisset
on 2019-02-05 16:14:27 PST
(
hide
)
Description:
WIP_missing_tests
Filename:
MIME Type:
Creator:
Robin Morisset
Created:
2019-02-05 16:14:27 PST
Size:
11.31 KB
patch
obsolete
>Index: Source/JavaScriptCore/ChangeLog >=================================================================== >--- Source/JavaScriptCore/ChangeLog (revision 240999) >+++ Source/JavaScriptCore/ChangeLog (working copy) >@@ -1,3 +1,13 @@ >+2019-02-05 Robin Morisset <rmorisset@apple.com> >+ >+ B3ReduceStrength: missing peephole optimizations for binary operations >+ https://bugs.webkit.org/show_bug.cgi?id=194252 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ * b3/B3ReduceStrength.cpp: >+ * b3/testb3.cpp: >+ > 2019-02-05 Mark Lam <mark.lam@apple.com> > > Fix DFG's doesGC() for a few more nodes. >Index: Source/JavaScriptCore/b3/B3ReduceStrength.cpp >=================================================================== >--- Source/JavaScriptCore/b3/B3ReduceStrength.cpp (revision 240957) >+++ Source/JavaScriptCore/b3/B3ReduceStrength.cpp (working copy) >@@ -948,6 +948,7 @@ private: > && !(m_value->child(1)->asInt32() & 0xffffff00)) { > m_value->child(0) = m_value->child(0)->child(0); > m_changed = true; >+ break; > } > > // Turn this: BitAnd(SExt16(value), mask) where (mask & 0xffff0000) == 0 >@@ -956,6 +957,7 @@ private: > && !(m_value->child(1)->asInt32() & 0xffff0000)) { > m_value->child(0) = m_value->child(0)->child(0); > m_changed = true; >+ break; > } > > // Turn this: BitAnd(SExt32(value), mask) where (mask & 0xffffffff00000000) == 0 >@@ -966,6 +968,7 @@ private: > m_index, ZExt32, m_value->origin(), > m_value->child(0)->child(0), m_value->child(0)->child(1)); > m_changed = true; >+ break; > } > > // Turn this: BitAnd(Op(value, constant1), constant2) >@@ -987,7 +990,40 @@ private: > default: > break; > } >+ break; >+ } >+ >+ // Turn this: BitAnd(BitXor(x1, allOnes), BitXor(x2, allOnes) >+ // Into this: BitXor(BitOr(x1, x2), allOnes) >+ // By applying De Morgan laws >+ if (m_value->child(0)->opcode() == BitXor >+ && m_value->child(1)->opcode() == BitXor >+ && ((m_value->type() == Int64 >+ && m_value->child(0)->child(1)->isInt(0xffffffffffffffff) >+ && m_value->child(1)->child(1)->isInt(0xffffffffffffffff)) || >+ (m_value->type() == Int32 >+ && m_value->child(0)->child(1)->isInt(0xffffffff) >+ && m_value->child(1)->child(1)->isInt(0xffffffff)))) { >+ Value* bitOr = m_insertionSet.insert<Value>(m_index, BitOr, m_value->origin(), m_value->child(0)->child(0), m_value->child(1)->child(0)); >+ replaceWithNew<Value>(BitXor, m_value->origin(), bitOr, m_value->child(1)->child(1)); >+ break; > } >+ >+ // Turn this: BitAnd(BitXor(x, allOnes), c) >+ // Into this: BitXor(BitOr(x, ~c), allOnes) >+ // This is a variation on the previous optimization, treating c as if it were BitXor(~c, allOnes) >+ // It does not reduce the number of operations, but provides some normalization (we try to get BitXor by allOnes at the outermost point), and some chance to float Xors to a place where they might get eliminated. >+ if (m_value->child(0)->opcode() == BitXor >+ && m_value->child(1)->hasInt() >+ && ((m_value->type() == Int64 >+ && m_value->child(0)->child(1)->isInt(0xffffffffffffffff)) || >+ (m_value->type() == Int32 >+ && m_value->child(0)->child(1)->isInt(0xffffffff)))) { >+ Value* bitOr = m_insertionSet.insert<Value>(m_index, BitOr, m_value->origin(),m_value->child(0)->child(0), m_value->child(1)->bitXorConstant(m_proc, m_value->child(0)->child(1))); >+ replaceWithNew<Value>(BitXor, m_value->origin(), bitOr, m_value->child(0)->child(1)); >+ break; >+ } >+ > break; > > case BitOr: >@@ -1034,6 +1070,40 @@ private: > break; > } > >+ // Turn this: BitOr(BitXor(x1, allOnes), BitXor(x2, allOnes) >+ // Into this: BitXor(BitAnd(x1, x2), allOnes) >+ // By applying De Morgan laws >+ if (m_value->child(0)->opcode() == BitXor >+ && m_value->child(1)->opcode() == BitXor >+ && ((m_value->type() == Int64 >+ && m_value->child(0)->child(1)->isInt(0xffffffffffffffff) >+ && m_value->child(1)->child(1)->isInt(0xffffffffffffffff)) || >+ (m_value->type() == Int32 >+ && m_value->child(0)->child(1)->isInt(0xffffffff) >+ && m_value->child(1)->child(1)->isInt(0xffffffff)))) { >+ Value* bitAnd = m_insertionSet.insert<Value>(m_index, BitAnd, m_value->origin(), m_value->child(0)->child(0), m_value->child(1)->child(0)); >+ replaceWithNew<Value>(BitXor, m_value->origin(), bitAnd, m_value->child(1)->child(1)); >+ break; >+ } >+ >+ // Turn this: BitOr(BitXor(x, allOnes), c) >+ // Into this: BitXor(BitAnd(x, ~c), allOnes) >+ // This is a variation on the previous optimization, treating c as if it were BitXor(~c, allOnes) >+ // It does not reduce the number of operations, but provides some normalization (we try to get BitXor by allOnes at the outermost point), and some chance to float Xors to a place where they might get eliminated. >+ if (m_value->child(0)->opcode() == BitXor >+ && m_value->child(1)->hasInt() >+ && ((m_value->type() == Int64 >+ && m_value->child(0)->child(1)->isInt(0xffffffffffffffff)) || >+ (m_value->type() == Int32 >+ && m_value->child(0)->child(1)->isInt(0xffffffff)))) { >+ Value* bitAnd = m_insertionSet.insert<Value>(m_index, BitAnd, m_value->origin(),m_value->child(0)->child(0), m_value->child(1)->bitXorConstant(m_proc, m_value->child(0)->child(1))); >+ replaceWithNew<Value>(BitXor, m_value->origin(), bitAnd, m_value->child(0)->child(1)); >+ break; >+ } >+ >+ if (handleBitAndDistributivity()) >+ break; >+ > break; > > case BitXor: >@@ -1080,6 +1150,9 @@ private: > replaceWithIdentity(m_value->child(0)); > break; > } >+ >+ if (handleBitAndDistributivity()) >+ break; > > break; > >@@ -2166,6 +2239,69 @@ private: > } > } > >+ // Turn any of these: >+ // Op(BitAnd(x1, x2), BitAnd(x1, x3)) >+ // Op(BitAnd(x2, x1), BitAnd(x1, x3)) >+ // Op(BitAnd(x1, x2), BitAnd(x3, x1)) >+ // Op(BitAnd(x2, x1), BitAnd(x3, x1)) >+ // Into this: BitAnd(Op(x2, x3), x1) >+ // And any of these: >+ // Op(BitAnd(x1, x2), x1) >+ // Op(BitAnd(x2, x1), x1) >+ // Op(x1, BitAnd(x1, x2)) >+ // Op(x1, BitAnd(x2, x1)) >+ // Into this: BitAnd(Op(x2, x1), x1) >+ // This second set is equivalent to doing x1 => BitAnd(x1, x1), and then applying the first set. >+ // It does not reduce the number of operations executed, but provides some useful normalization: we prefer to have BitAnd at the outermost, then BitXor, and finally BitOr at the innermost >+ bool handleBitAndDistributivity() { >+ ASSERT(m_value->opcode() == BitOr || m_value->opcode() == BitXor); >+ Value* x1 = nullptr; >+ Value* x2 = nullptr; >+ Value* x3 = nullptr; >+ if (m_value->child(0)->opcode() == BitAnd && m_value->child(1)->opcode() == BitAnd) { >+ if (m_value->child(0)->child(0) == m_value->child(1)->child(0)) { >+ x1 = m_value->child(0)->child(0); >+ x2 = m_value->child(0)->child(1); >+ x3 = m_value->child(1)->child(1); >+ } else if (m_value->child(0)->child(1) == m_value->child(1)->child(0)) { >+ x1 = m_value->child(0)->child(1); >+ x2 = m_value->child(0)->child(0); >+ x3 = m_value->child(1)->child(1); >+ } else if (m_value->child(0)->child(0) == m_value->child(1)->child(1)) { >+ x1 = m_value->child(0)->child(0); >+ x2 = m_value->child(0)->child(1); >+ x3 = m_value->child(1)->child(0); >+ } else if (m_value->child(0)->child(1) == m_value->child(1)->child(1)) { >+ x1 = m_value->child(0)->child(1); >+ x2 = m_value->child(0)->child(0); >+ x3 = m_value->child(1)->child(0); >+ } >+ } else if (m_value->child(0)->opcode() == BitAnd) { >+ if (m_value->child(0)->child(0) == m_value->child(1)) { >+ x1 = x3 = m_value->child(1); >+ x2 = m_value->child(0)->child(1); >+ } else if (m_value->child(0)->child(1) == m_value->child(1)) { >+ x1 = x3 = m_value->child(1); >+ x2 = m_value->child(0)->child(0); >+ } >+ } else if (m_value->child(1)->opcode() == BitAnd) { >+ if (m_value->child(1)->child(0) == m_value->child(0)) { >+ x1 = x3 = m_value->child(0); >+ x2 = m_value->child(1)->child(1); >+ } else if (m_value->child(1)->child(1) == m_value->child(0)) { >+ x1 = x3 = m_value->child(0); >+ x2 = m_value->child(1)->child(0); >+ } >+ } >+ if (x1 != nullptr) { >+ ASSERT(x2 != nullptr && x3 != nullptr); >+ Value* bitOp = m_insertionSet.insert<Value>(m_index, m_value->opcode(), m_value->origin(), x2, x3); >+ replaceWithNew<Value>(BitAnd, m_value->origin(), bitOp, x1); >+ return true; >+ } >+ return false; >+ } >+ > struct CanonicalizedComparison { > Opcode opcode; > Value* operands[2]; >Index: Source/JavaScriptCore/b3/testb3.cpp >=================================================================== >--- Source/JavaScriptCore/b3/testb3.cpp (revision 240957) >+++ Source/JavaScriptCore/b3/testb3.cpp (working copy) >@@ -16337,6 +16337,22 @@ double negativeZero() > })); \ > } \ > } >+#define RUN_TERNARY(test, valuesA, valuesB, valuesC) \ >+ for (auto a : valuesA) { \ >+ for (auto b : valuesB) { \ >+ for (auto c : valuesC) { \ >+ CString testStr = toCString(#test, "(", a.name, ", ", b.name, ",", c.name, ")"); \ >+ if (!shouldRun(testStr.data())) \ >+ continue; \ >+ tasks.append(createSharedTask<void()>( \ >+ [=] () { \ >+ dataLog(toCString(testStr, "...\n")); \ >+ test(a.value, b.value, c.value); \ >+ dataLog(toCString(testStr, ": OK!\n")); \ >+ })); \ >+ } \ >+ } \ >+ } > > void run(const char* filter) > {
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 194252
:
361240
|
361312
|
362764