V4 JIT: tune generated instructions for inplace binops
authorErik Verbruggen <erik.verbruggen@digia.com>
Fri, 27 Jun 2014 12:37:50 +0000 (14:37 +0200)
committerErik Verbruggen <erik.verbruggen@digia.com>
Tue, 12 Aug 2014 07:50:28 +0000 (09:50 +0200)
Generate better code for in-place binary operations where the right-hand
side is either a constant or a memory address. Now that the JIT can do
this, also tell the register allocator not to un-spill that right-hand
side.

Change-Id: I0ab852f6b92f90dfed99c05fbaf91aad2549ecf4
Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
src/qml/compiler/qv4ssa.cpp
src/qml/jit/qv4assembler.cpp
src/qml/jit/qv4assembler_p.h
src/qml/jit/qv4binop.cpp
src/qml/jit/qv4isel_masm.cpp
src/qml/jit/qv4regalloc.cpp

index 2be073e..15e427b 100644 (file)
@@ -321,6 +321,12 @@ public:
         m_assembler.smull(dest, dataTempRegister, op1, op2);
     }
 
+    void mul32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        mul32(dataTempRegister, dest);
+    }
+
     void neg32(RegisterID srcDest)
     {
         m_assembler.neg(srcDest, srcDest);
@@ -330,6 +336,12 @@ public:
     {
         m_assembler.orr(dest, dest, src);
     }
+
+    void or32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        or32(dataTempRegister, dest);
+    }
     
     void or32(RegisterID src, AbsoluteAddress dest)
     {
@@ -466,6 +478,12 @@ public:
         store32(dataTempRegister, address.m_ptr);
     }
 
+    void xor32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        xor32(dataTempRegister, dest);
+    }
+
     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
     {
         m_assembler.eor(dest, op1, op2);
index 5e9401a..8c5f2e3 100644 (file)
@@ -3887,13 +3887,22 @@ void optimizeSSA(StatementWorklist &W, DefUses &defUses, DominatorTree &df)
                             continue;
                         }
                     }
-                    if (rightConst) { // mask right hand side of shift operations
+                    if (rightConst) {
                         switch (binop->op) {
                         case OpLShift:
                         case OpRShift:
-                        case OpURShift:
-                            rightConst->value = QV4::Primitive::toInt32(rightConst->value) & 0x1f;
-                            rightConst->type = SInt32Type;
+                            if (double v = QV4::Primitive::toInt32(rightConst->value) & 0x1f) {
+                                // mask right hand side of shift operations
+                                rightConst->value = v;
+                                rightConst->type = SInt32Type;
+                            } else {
+                                // shifting a value over 0 bits is a move:
+                                if (rightConst->value == 0) {
+                                    m->source = binop->left;
+                                    W += m;
+                                }
+                            }
+
                             break;
                         default:
                             break;
index cb2279b..1e9669a 100644 (file)
@@ -343,24 +343,9 @@ Assembler::Jump Assembler::genTryDoubleConversion(IR::Expr *src, Assembler::FPRe
     return isNoDbl;
 }
 
-#if !defined(QT_NO_DEBUG) || defined(QT_FORCE_ASSERTS)
-namespace {
-inline bool isPregOrConst(IR::Expr *e)
-{
-    if (IR::Temp *t = e->asTemp())
-        return t->kind == IR::Temp::PhysicalRegister;
-    return e->asConst() != 0;
-}
-} // anonymous namespace
-#endif
-
 Assembler::Jump Assembler::branchDouble(bool invertCondition, IR::AluOp op,
                                                    IR::Expr *left, IR::Expr *right)
 {
-    Q_ASSERT(isPregOrConst(left));
-    Q_ASSERT(isPregOrConst(right));
-    Q_ASSERT(left->asConst() == 0 || right->asConst() == 0);
-
     Assembler::DoubleCondition cond;
     switch (op) {
     case IR::OpGt: cond = Assembler::DoubleGreaterThan; break;
@@ -377,7 +362,7 @@ Assembler::Jump Assembler::branchDouble(bool invertCondition, IR::AluOp op,
     if (invertCondition)
         cond = JSC::MacroAssembler::invert(cond);
 
-    return JSC::MacroAssembler::branchDouble(cond, toDoubleRegister(left), toDoubleRegister(right));
+    return JSC::MacroAssembler::branchDouble(cond, toDoubleRegister(left, FPGpr0), toDoubleRegister(right, FPGpr1));
 }
 
 
index 5e1f162..4c5d2ae 100644 (file)
@@ -283,8 +283,8 @@ public:
         ConstantTable(Assembler *as): _as(as) {}
 
         int add(const QV4::Primitive &v);
-        ImplicitAddress loadValueAddress(IR::Const *c, RegisterID baseReg);
-        ImplicitAddress loadValueAddress(const QV4::Primitive &v, RegisterID baseReg);
+        Address loadValueAddress(IR::Const *c, RegisterID baseReg);
+        Address loadValueAddress(const QV4::Primitive &v, RegisterID baseReg);
         void finalize(JSC::LinkBuffer &linkBuffer, InstructionSelection *isel);
 
     private:
index a19072f..4f231f0 100644 (file)
 using namespace QV4;
 using namespace JIT;
 
-namespace {
-inline bool isPregOrConst(IR::Expr *e)
-{
-    if (IR::Temp *t = e->asTemp())
-        return t->kind == IR::Temp::PhysicalRegister;
-    return e->asConst() != 0;
-}
-} // anonymous namespace
-
-
 #define OP(op) \
     { isel_stringIfy(op), op, 0, 0, 0 }
 #define OPCONTEXT(op) \
@@ -115,8 +105,7 @@ const Binop::OpInfo Binop::operations[IR::LastAluOp + 1] = {
 void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
 {
     if (op != IR::OpMod
-            && lhs->type == IR::DoubleType && rhs->type == IR::DoubleType
-            && isPregOrConst(lhs) && isPregOrConst(rhs)) {
+            && lhs->type == IR::DoubleType && rhs->type == IR::DoubleType) {
         doubleBinop(lhs, rhs, target);
         return;
     }
@@ -158,9 +147,6 @@ void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
 
 void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
 {
-    Q_ASSERT(lhs->asConst() == 0 || rhs->asConst() == 0);
-    Q_ASSERT(isPregOrConst(lhs));
-    Q_ASSERT(isPregOrConst(rhs));
     IR::Temp *targetTemp = target->asTemp();
     Assembler::FPRegisterID targetReg;
     if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister)
@@ -170,58 +156,106 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
 
     switch (op) {
     case IR::OpAdd:
-        as->addDouble(as->toDoubleRegister(lhs), as->toDoubleRegister(rhs),
-                       targetReg);
+        if (lhs->asConst())
+            std::swap(lhs, rhs); // Y = constant + X -> Y = X + constant
+
+#if CPU(X86)
+        if (IR::Const *c = rhs->asConst()) { // Y = X + constant -> Y = X; Y += [constant-address]
+            as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
+            Assembler::Address addr = as->constantTable().loadValueAddress(c, Assembler::ScratchRegister);
+            as->addDouble(addr, targetReg);
+            break;
+        }
+        if (IR::Temp *t = rhs->asTemp()) { // Y = X + [temp-memory-address] -> Y = X; Y += [temp-memory-address]
+            if (t->kind != IR::Temp::PhysicalRegister) {
+                as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
+                as->addDouble(as->loadTempAddress(t), targetReg);
+                break;
+            }
+        }
+#endif
+        as->addDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), as->toDoubleRegister(rhs, Assembler::FPGpr1), targetReg);
         break;
+
     case IR::OpMul:
-        as->mulDouble(as->toDoubleRegister(lhs), as->toDoubleRegister(rhs),
-                       targetReg);
-        break;
-    case IR::OpSub:
-#if CPU(X86) || CPU(X86_64)
-        if (IR::Temp *rightTemp = rhs->asTemp()) {
-            if (rightTemp->kind == IR::Temp::PhysicalRegister && rightTemp->index == targetReg) {
-                as->moveDouble(targetReg, Assembler::FPGpr0);
+        if (lhs->asConst())
+            std::swap(lhs, rhs); // Y = constant * X -> Y = X * constant
+
+#if CPU(X86)
+        if (IR::Const *c = rhs->asConst()) { // Y = X * constant -> Y = X; Y *= [constant-address]
+            as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
+            Assembler::Address addr = as->constantTable().loadValueAddress(c, Assembler::ScratchRegister);
+            as->mulDouble(addr, targetReg);
+            break;
+        }
+        if (IR::Temp *t = rhs->asTemp()) { // Y = X * [temp-memory-address] -> Y = X; Y *= [temp-memory-address]
+            if (t->kind != IR::Temp::PhysicalRegister) {
                 as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
-                as->subDouble(Assembler::FPGpr0, targetReg);
+                as->mulDouble(as->loadTempAddress(t), targetReg);
                 break;
             }
-        } else if (rhs->asConst() && targetReg == Assembler::FPGpr0) {
-            Q_ASSERT(lhs->asTemp());
-            Q_ASSERT(lhs->asTemp()->kind == IR::Temp::PhysicalRegister);
+        }
+#endif
+        as->mulDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), as->toDoubleRegister(rhs, Assembler::FPGpr1), targetReg);
+        break;
+
+    case IR::OpSub:
+#if CPU(X86)
+        if (IR::Const *c = rhs->asConst()) { // Y = X - constant -> Y = X; Y -= [constant-address]
             as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
-            Assembler::FPRegisterID reg = (Assembler::FPRegisterID) lhs->asTemp()->index;
-            as->moveDouble(as->toDoubleRegister(rhs, reg), reg);
-            as->subDouble(reg, targetReg);
+            Assembler::Address addr = as->constantTable().loadValueAddress(c, Assembler::ScratchRegister);
+            as->subDouble(addr, targetReg);
             break;
         }
+        if (IR::Temp *t = rhs->asTemp()) { // Y = X - [temp-memory-address] -> Y = X; Y -= [temp-memory-address]
+            if (t->kind != IR::Temp::PhysicalRegister) {
+                as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
+                as->subDouble(as->loadTempAddress(t), targetReg);
+                break;
+            }
+        }
 #endif
+        if (rhs->asTemp() && rhs->asTemp()->kind == IR::Temp::PhysicalRegister
+                && targetTemp
+                && targetTemp->kind == IR::Temp::PhysicalRegister
+                && targetTemp->index == rhs->asTemp()->index) { // Y = X - Y -> Tmp = Y; Y = X - Tmp
+            as->moveDouble(as->toDoubleRegister(rhs, Assembler::FPGpr1), Assembler::FPGpr1);
+            as->subDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), Assembler::FPGpr1, targetReg);
+            break;
+        }
 
-        as->subDouble(as->toDoubleRegister(lhs), as->toDoubleRegister(rhs),
-                       targetReg);
+        as->subDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), as->toDoubleRegister(rhs, Assembler::FPGpr1), targetReg);
         break;
+
     case IR::OpDiv:
-#if CPU(X86) || CPU(X86_64)
-        if (IR::Temp *rightTemp = rhs->asTemp()) {
-            if (rightTemp->kind == IR::Temp::PhysicalRegister && rightTemp->index == targetReg) {
-                as->moveDouble(targetReg, Assembler::FPGpr0);
+#if CPU(X86)
+        if (IR::Const *c = rhs->asConst()) { // Y = X / constant -> Y = X; Y /= [constant-address]
+            as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
+            Assembler::Address addr = as->constantTable().loadValueAddress(c, Assembler::ScratchRegister);
+            as->divDouble(addr, targetReg);
+            break;
+        }
+        if (IR::Temp *t = rhs->asTemp()) { // Y = X / [temp-memory-address] -> Y = X; Y /= [temp-memory-address]
+            if (t->kind != IR::Temp::PhysicalRegister) {
                 as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
-                as->divDouble(Assembler::FPGpr0, targetReg);
+                as->divDouble(as->loadTempAddress(t), targetReg);
                 break;
             }
-        } else if (rhs->asConst() && targetReg == Assembler::FPGpr0) {
-            Q_ASSERT(lhs->asTemp());
-            Q_ASSERT(lhs->asTemp()->kind == IR::Temp::PhysicalRegister);
-            as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
-            Assembler::FPRegisterID reg = (Assembler::FPRegisterID) lhs->asTemp()->index;
-            as->moveDouble(as->toDoubleRegister(rhs, reg), reg);
-            as->divDouble(reg, targetReg);
-            break;
         }
 #endif
-        as->divDouble(as->toDoubleRegister(lhs), as->toDoubleRegister(rhs),
-                       targetReg);
+
+        if (rhs->asTemp() && rhs->asTemp()->kind == IR::Temp::PhysicalRegister
+                && targetTemp
+                && targetTemp->kind == IR::Temp::PhysicalRegister
+                && targetTemp->index == rhs->asTemp()->index) { // Y = X / Y -> Tmp = Y; Y = X / Tmp
+            as->moveDouble(as->toDoubleRegister(rhs, Assembler::FPGpr1), Assembler::FPGpr1);
+            as->divDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), Assembler::FPGpr1, targetReg);
+            break;
+        }
+
+        as->divDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), as->toDoubleRegister(rhs, Assembler::FPGpr1), targetReg);
         break;
+
     default: {
         Q_ASSERT(target->type == IR::BoolType);
         Assembler::Jump trueCase = as->branchDouble(false, op, lhs, rhs);
@@ -234,173 +268,198 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
     }
 
     if (!targetTemp || targetTemp->kind != IR::Temp::PhysicalRegister)
-        as->storeDouble(Assembler::FPGpr0, target);
+        as->storeDouble(targetReg, target);
 }
 
 
 bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target)
 {
     Q_ASSERT(leftSource->type == IR::SInt32Type);
+    Q_ASSERT(rightSource->type == IR::SInt32Type);
+
+    switch (op) {
+    case IR::OpBitAnd:
+    case IR::OpBitOr:
+    case IR::OpBitXor:
+    case IR::OpAdd:
+    case IR::OpMul:
+        if (leftSource->asConst()) // X = Const op Y -> X = Y op Const
+            std::swap(leftSource, rightSource);
+        else if (IR::Temp *t = leftSource->asTemp()) {
+            if (t->kind != IR::Temp::PhysicalRegister) // X = [address] op Y -> X = Y op [address]
+                std::swap(leftSource, rightSource);
+        }
+        break;
+
+    case IR::OpLShift:
+    case IR::OpRShift:
+    case IR::OpURShift:
+    case IR::OpSub:
+        // handled by this method, but we can't flip operands.
+        break;
+
+    default:
+        return false; // not handled by this method, stop here.
+    }
+
+    bool inplaceOpWithAddress = false;
+
     IR::Temp *targetTemp = target->asTemp();
     Assembler::RegisterID targetReg = Assembler::ReturnValueRegister;
     if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister) {
-        // We try to load leftSource into the target's register, but we can't do that if
-        // the target register is the same as rightSource.
         IR::Temp *rhs = rightSource->asTemp();
-        if (!rhs || rhs->kind != IR::Temp::PhysicalRegister || rhs->index != targetTemp->index)
+        if (!rhs || rhs->kind != IR::Temp::PhysicalRegister || rhs->index != targetTemp->index) {
+            // We try to load leftSource into the target's register, but we can't do that if
+            // the target register is the same as rightSource.
             targetReg = (Assembler::RegisterID) targetTemp->index;
+        } else if (rhs && rhs->kind == IR::Temp::PhysicalRegister && targetTemp->index == rhs->index) {
+            // However, if the target register is the same as the rightSource register, we can flip
+            // the operands for certain operations.
+            switch (op) {
+            case IR::OpBitAnd:
+            case IR::OpBitOr:
+            case IR::OpBitXor:
+            case IR::OpAdd:
+            case IR::OpMul:
+                // X = Y op X -> X = X op Y (or rephrased: X op= Y (so an in-place operation))
+                std::swap(leftSource, rightSource);
+                targetReg = (Assembler::RegisterID) targetTemp->index;
+                break;
+
+            case IR::OpLShift:
+            case IR::OpRShift:
+            case IR::OpURShift:
+            case IR::OpSub:
+                break;
+
+            default:
+                Q_UNREACHABLE();
+                return false;
+            }
+        }
+
+        // determine if we have X op= [address]
+        if (IR::Temp *lhs = leftSource->asTemp()) {
+            if (lhs->kind == IR::Temp::PhysicalRegister && lhs->index == targetTemp->index) {
+                if (IR::Temp *rhs = rightSource->asTemp()) {
+                    if (rhs->kind != IR::Temp::PhysicalRegister) {
+                        switch (op) {
+                        case IR::OpBitAnd:
+                        case IR::OpBitOr:
+                        case IR::OpBitXor:
+                        case IR::OpAdd:
+                        case IR::OpMul:
+                            break;
+                            inplaceOpWithAddress = true;
+                        default:
+                            break;
+                        }
+                    }
+                }
+            }
+        }
     }
 
-    switch (op) {
-    case IR::OpBitAnd: {
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
+    if (op == IR::OpSub) {
         if (rightSource->asTemp() && rightSource->asTemp()->kind == IR::Temp::PhysicalRegister
                 && targetTemp
                 && targetTemp->kind == IR::Temp::PhysicalRegister
                 && targetTemp->index == rightSource->asTemp()->index) {
-            as->and32(as->toInt32Register(leftSource, Assembler::ScratchRegister),
-                       (Assembler::RegisterID) targetTemp->index);
-            return true;
+            // X = Y - X -> Tmp = X; X = Y; X -= Tmp
+            targetReg = (Assembler::RegisterID) targetTemp->index;
+            as->move(targetReg, Assembler::ScratchRegister);
+            as->move(as->toInt32Register(leftSource, targetReg), targetReg);
+            as->sub32(Assembler::ScratchRegister, targetReg);
+        } else {
+            as->move(as->toInt32Register(leftSource, targetReg), targetReg);
+            as->sub32(as->toInt32Register(rightSource, Assembler::ScratchRegister), targetReg);
         }
-
-        as->and32(as->toInt32Register(leftSource, targetReg),
-                   as->toInt32Register(rightSource, Assembler::ScratchRegister),
-                   targetReg);
         as->storeInt32(targetReg, target);
-    } return true;
-    case IR::OpBitOr: {
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
-        if (rightSource->asTemp() && rightSource->asTemp()->kind == IR::Temp::PhysicalRegister
-                && targetTemp
-                && targetTemp->kind == IR::Temp::PhysicalRegister
-                && targetTemp->index == rightSource->asTemp()->index) {
-            as->or32(as->toInt32Register(leftSource, Assembler::ScratchRegister),
-                       (Assembler::RegisterID) targetTemp->index);
+        return true;
+    }
+
+    Assembler::RegisterID l = as->toInt32Register(leftSource, targetReg);
+    if (IR::Const *c = rightSource->asConst()) { // All cases of Y = X op Const
+        Assembler::TrustedImm32 r(int(c->value));
+
+        switch (op) {
+        case IR::OpBitAnd: as->and32(r, l, targetReg); break;
+        case IR::OpBitOr:  as->or32 (r, l, targetReg); break;
+        case IR::OpBitXor: as->xor32(r, l, targetReg); break;
+        case IR::OpAdd:    as->add32(r, l, targetReg); break;
+        case IR::OpMul:    as->mul32(r, l, targetReg); break;
+
+        case IR::OpLShift:  r.m_value &= 0x1f; as->lshift32(l, r, targetReg); break;
+        case IR::OpRShift:  r.m_value &= 0x1f; as->rshift32(l, r, targetReg); break;
+        case IR::OpURShift: r.m_value &= 0x1f; as->urshift32(l, r, targetReg);
+            as->storeUInt32(targetReg, target); // IMPORTANT: do NOT do a break here! The stored type of an urshift is different from the other binary operations!
             return true;
+
+        case IR::OpSub: // already handled before
+        default: // not handled by this method:
+            Q_UNREACHABLE();
+            return false;
         }
+    } else if (inplaceOpWithAddress) { // All cases of X = X op [address-of-Y]
+        Assembler::Pointer rhsAddr = as->loadAddress(Assembler::ScratchRegister, rightSource);
+        switch (op) {
+        case IR::OpBitAnd: as->and32(rhsAddr, targetReg); break;
+        case IR::OpBitOr:  as->or32 (rhsAddr, targetReg); break;
+        case IR::OpBitXor: as->xor32(rhsAddr, targetReg); break;
+        case IR::OpAdd:    as->add32(rhsAddr, targetReg); break;
+        case IR::OpMul:    as->mul32(rhsAddr, targetReg); break;
+            break;
 
-        as->or32(as->toInt32Register(leftSource, targetReg),
-                  as->toInt32Register(rightSource, Assembler::ScratchRegister),
-                  targetReg);
-        as->storeInt32(targetReg, target);
-    } return true;
-    case IR::OpBitXor: {
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
-        if (rightSource->asTemp() && rightSource->asTemp()->kind == IR::Temp::PhysicalRegister
-                && targetTemp
-                && targetTemp->kind == IR::Temp::PhysicalRegister
-                && targetTemp->index == rightSource->asTemp()->index) {
-            as->xor32(as->toInt32Register(leftSource, Assembler::ScratchRegister),
-                       (Assembler::RegisterID) targetTemp->index);
-            return true;
+        default: // not handled by this method:
+            Q_UNREACHABLE();
+            return false;
         }
+    } else { // All cases of Z = X op Y
+        Assembler::RegisterID r = as->toInt32Register(rightSource, Assembler::ScratchRegister);
+        switch (op) {
+        case IR::OpBitAnd: as->and32(l, r, targetReg); break;
+        case IR::OpBitOr:  as->or32 (l, r, targetReg); break;
+        case IR::OpBitXor: as->xor32(l, r, targetReg); break;
+        case IR::OpAdd:    as->add32(l, r, targetReg); break;
+        case IR::OpMul:    as->mul32(l, r, targetReg); break;
 
-        as->xor32(as->toInt32Register(leftSource, targetReg),
-                   as->toInt32Register(rightSource, Assembler::ScratchRegister),
-                   targetReg);
-        as->storeInt32(targetReg, target);
-    } return true;
-    case IR::OpLShift: {
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
-
-        if (IR::Const *c = rightSource->asConst()) {
-            if (int(c->value) == 0)
-                as->move(as->toInt32Register(leftSource, Assembler::ReturnValueRegister), targetReg);
-            else
-                as->lshift32(as->toInt32Register(leftSource, Assembler::ReturnValueRegister),
-                             Assembler::TrustedImm32(int(c->value) & 0x1f), targetReg);
-        } else {
-            as->move(as->toInt32Register(rightSource, Assembler::ScratchRegister),
-                      Assembler::ScratchRegister);
 #if CPU(ARM) || CPU(X86) || CPU(X86_64)
-            // The ARM assembler will generate this for us, and Intel will do it on the CPU.
+            // The ARM assembler will generate an and with 0x1f for us, and Intel will do it on the CPU.
+
+        case IR::OpLShift:  as->lshift32(l, r, targetReg); break;
+        case IR::OpRShift:  as->rshift32(l, r, targetReg); break;
+        case IR::OpURShift: as->urshift32(l, r, targetReg);
+            as->storeUInt32(targetReg, target); // IMPORTANT: do NOT do a break here! The stored type of an urshift is different from the other binary operations!
+            return true;
 #else
+        case IR::OpLShift:
+            as->move(r, Assembler::ScratchRegister);
             as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
-#endif
-            as->lshift32(as->toInt32Register(leftSource, targetReg), Assembler::ScratchRegister, targetReg);
-        }
-        as->storeInt32(targetReg, target);
-    } return true;
-    case IR::OpRShift: {
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
-
-        if (IR::Const *c = rightSource->asConst()) {
-            if (int(c->value) == 0)
-                as->move(as->toInt32Register(leftSource, Assembler::ReturnValueRegister), targetReg);
-            else
-                as->rshift32(as->toInt32Register(leftSource, Assembler::ReturnValueRegister),
-                             Assembler::TrustedImm32(int(c->value) & 0x1f), targetReg);
-        } else {
-            as->move(as->toInt32Register(rightSource, Assembler::ScratchRegister),
-                      Assembler::ScratchRegister);
-#if CPU(ARM) || CPU(X86) || CPU(X86_64)
-            // The ARM assembler will generate this for us, and Intel will do it on the CPU.
-#else
+            as->lshift32(l, Assembler::ScratchRegister, targetReg);
+            break;
+
+        case IR::OpLShift:
+            as->move(r, Assembler::ScratchRegister);
             as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
-#endif
-            as->rshift32(as->toInt32Register(leftSource, targetReg), Assembler::ScratchRegister, targetReg);
-        }
-        as->storeInt32(targetReg, target);
-    } return true;
-    case IR::OpURShift:
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
-
-        if (IR::Const *c = rightSource->asConst()) {
-            if (int(c->value) == 0)
-                as->move(as->toInt32Register(leftSource, Assembler::ReturnValueRegister), targetReg);
-            else
-                as->urshift32(as->toInt32Register(leftSource, Assembler::ReturnValueRegister),
-                              Assembler::TrustedImm32(int(c->value) & 0x1f), targetReg);
-        } else {
-            as->move(as->toInt32Register(rightSource, Assembler::ScratchRegister),
-                      Assembler::ScratchRegister);
-#if CPU(ARM) || CPU(X86) || CPU(X86_64)
-            // The ARM assembler will generate this for us, and Intel will do it on the CPU.
-#else
+            as->rshift32(l, Assembler::ScratchRegister, targetReg);
+            break;
+
+        case IR::OpLShift:
+            as->move(r, Assembler::ScratchRegister);
             as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
+            as->storeUInt32(targetReg, target); // IMPORTANT: do NOT do a break here! The stored type of an urshift is different from the other binary operations!
+            return true;
 #endif
-            as->urshift32(as->toInt32Register(leftSource, targetReg), Assembler::ScratchRegister, targetReg);
-        }
-        as->storeUInt32(targetReg, target);
-        return true;
-    case IR::OpAdd: {
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
-
-        as->add32(as->toInt32Register(leftSource, targetReg),
-                   as->toInt32Register(rightSource, Assembler::ScratchRegister),
-                   targetReg);
-        as->storeInt32(targetReg, target);
-    } return true;
-    case IR::OpSub: {
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
 
-        if (rightSource->asTemp() && rightSource->asTemp()->kind == IR::Temp::PhysicalRegister
-                && targetTemp
-                && targetTemp->kind == IR::Temp::PhysicalRegister
-                && targetTemp->index == rightSource->asTemp()->index) {
-            Assembler::RegisterID targetReg = (Assembler::RegisterID) targetTemp->index;
-            as->move(targetReg, Assembler::ScratchRegister);
-            as->move(as->toInt32Register(leftSource, targetReg), targetReg);
-            as->sub32(Assembler::ScratchRegister, targetReg);
-            as->storeInt32(targetReg, target);
-            return true;
+        case IR::OpSub: // already handled before
+        default: // not handled by this method:
+            Q_UNREACHABLE();
+            return false;
         }
-
-        as->move(as->toInt32Register(leftSource, targetReg), targetReg);
-        as->sub32(as->toInt32Register(rightSource, Assembler::ScratchRegister), targetReg);
-        as->storeInt32(targetReg, target);
-    } return true;
-    case IR::OpMul: {
-        Q_ASSERT(rightSource->type == IR::SInt32Type);
-
-        as->mul32(as->toInt32Register(leftSource, targetReg),
-                   as->toInt32Register(rightSource, Assembler::ScratchRegister),
-                   targetReg);
-        as->storeInt32(targetReg, target);
-    } return true;
-    default:
-        return false;
     }
+
+    as->storeInt32(targetReg, target);
+    return true;
 }
 
 static inline Assembler::FPRegisterID getFreeFPReg(IR::Expr *shouldNotOverlap, unsigned hint)
index 8449ff6..b91b5ea 100644 (file)
@@ -1541,17 +1541,15 @@ int Assembler::ConstantTable::add(const Primitive &v)
     return idx;
 }
 
-Assembler::ImplicitAddress Assembler::ConstantTable::loadValueAddress(IR::Const *c,
-                                                                      RegisterID baseReg)
+Assembler::Address Assembler::ConstantTable::loadValueAddress(IR::Const *c, RegisterID baseReg)
 {
     return loadValueAddress(convertToValue(c), baseReg);
 }
 
-Assembler::ImplicitAddress Assembler::ConstantTable::loadValueAddress(const Primitive &v,
-                                                                      RegisterID baseReg)
+Assembler::Address Assembler::ConstantTable::loadValueAddress(const Primitive &v, RegisterID baseReg)
 {
     _toPatch.append(_as->moveWithPatch(TrustedImmPtr(0), baseReg));
-    ImplicitAddress addr(baseReg);
+    Address addr(baseReg);
     addr.offset = add(v) * sizeof(QV4::Primitive);
     Q_ASSERT(addr.offset >= 0);
     return addr;
index 8ba3320..cccc07a 100644 (file)
@@ -649,16 +649,35 @@ protected: // IRDecoder
         } else {
             addUses(leftSource->asTemp(), Use::MustHaveRegister);
             addHint(target, leftSource->asTemp());
+            addHint(target, rightSource->asTemp());
 
-            addUses(rightSource->asTemp(), Use::MustHaveRegister);
+#if CPU(X86) || CPU(X86_64)
             switch (oper) {
+                // The rhs operand can be a memory address
             case OpAdd:
+            case OpSub:
             case OpMul:
-                addHint(target, rightSource->asTemp());
+            case OpDiv:
+#if CPU(X86_64)
+                if (leftSource->type == DoubleType || rightSource->type == DoubleType) {
+                    // well, on 64bit the doubles are mangled, so they must first be loaded in a register and demangled, so...:
+                    addUses(rightSource->asTemp(), Use::MustHaveRegister);
+                    break;
+                }
+#endif
+            case OpBitAnd:
+            case OpBitOr:
+            case OpBitXor:
+                addUses(rightSource->asTemp(), Use::CouldHaveRegister);
                 break;
+
             default:
+                addUses(rightSource->asTemp(), Use::MustHaveRegister);
                 break;
             }
+#else
+            addUses(rightSource->asTemp(), Use::MustHaveRegister);
+#endif
         }
     }