}
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::xorps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
}
-void Assembler::andps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
// SSE instructions
void andps(XMMRegister dst, XMMRegister src);
void xorps(XMMRegister dst, XMMRegister src);
+ void orps(XMMRegister dst, XMMRegister src);
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
__ movsd(double_scratch, Operand::StaticArray(
temp2, times_8, ExternalReference::math_exp_log_table()));
- __ por(input, double_scratch);
+ __ orps(input, double_scratch);
__ mulsd(result, input);
__ bind(&done);
}
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0x56) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("orps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (f0byte == 0x57) {
data += 2;
int mod, regop, rm;
XMMRegister xmm_scratch = double_scratch0();
__ Set(temp, Immediate(lower));
__ movd(xmm_scratch, Operand(temp));
- __ por(res, xmm_scratch);
+ __ orps(res, xmm_scratch);
}
}
}
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
- __ movsd(left_reg, right_reg);
+ __ movaps(left_reg, right_reg);
__ bind(&return_left);
}
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
- __ movsd(input_temp, input_reg); // Do not alter input_reg.
+ __ movaps(input_temp, input_reg); // Do not alter input_reg.
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
__ movsd(xmm0, other);
__ movsd(other, reg);
- __ movsd(reg, Operand(xmm0));
+ __ movaps(reg, xmm0);
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
}
+void Assembler::orps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
}
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
void cvttss2si(Register dst, XMMRegister src);
void cvtlsi2ss(XMMRegister dst, Register src);
- void xorps(XMMRegister dst, XMMRegister src);
void andps(XMMRegister dst, XMMRegister src);
+ void orps(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
} else if (opcode == 0x54) {
- // xorps xmm, xmm/m128
+ // andps xmm, xmm/m128
int mod, regop, rm;
get_modrm(*current, &mod, ®op, &rm);
AppendToBuffer("andps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x56) {
+ // orps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("orps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
} else if (opcode == 0x57) {
// xorps xmm, xmm/m128
int mod, regop, rm;
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
- __ orpd(left_reg, right_reg);
+ __ orps(left_reg, right_reg);
} else {
// Since we operate on +0 and/or -0, addsd and andsd have the same effect.
__ addsd(left_reg, right_reg);
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear);
__ bind(&return_right);
- __ movsd(left_reg, right_reg);
+ __ movaps(left_reg, right_reg);
__ bind(&return_left);
}
Operand other_operand = cgen_->ToOperand(other);
__ movsd(xmm0, other_operand);
__ movsd(other_operand, reg);
- __ movsd(reg, xmm0);
+ __ movaps(reg, xmm0);
} else {
// No other combinations are possible.
__ cmpltsd(xmm0, xmm1);
__ andps(xmm0, xmm1);
+ __ orps(xmm0, xmm1);
__ andpd(xmm0, xmm1);
__ psllq(xmm0, 17);
__ psllq(xmm0, xmm1);
__ movaps(xmm0, xmm1);
__ andps(xmm0, xmm1);
+ __ orps(xmm0, xmm1);
+ __ xorps(xmm0, xmm1);
}
// SSE 2 instructions
{