}
+void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x22);
+ emit_sse_operand(dst, src);
+ EMIT(offset);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
get_modrm(*data, &mod, ®op, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pextrd %s,%s,%d",
- NameOfXMMRegister(regop),
+ NameOfCPURegister(regop),
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x22) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("pinsrd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfCPURegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
} else {
UnimplementedInstruction();
}
if (BitCast<uint64_t, double>(v) == 0) {
__ xorpd(res, res);
} else {
- int32_t v_int32 = static_cast<int32_t>(v);
- if (static_cast<double>(v_int32) == v) {
- __ push_imm32(v_int32);
- __ cvtsi2sd(res, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kPointerSize));
+ Register temp = ToRegister(instr->TempAt(0));
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatures::Scope scope(SSE4_1);
+ if (lower != 0) {
+ __ Set(temp, Immediate(lower));
+ __ movd(res, Operand(temp));
+ __ Set(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ } else {
+ __ xorpd(res, res);
+ __ Set(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ }
} else {
- uint64_t int_val = BitCast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- __ push_imm32(upper);
- __ push_imm32(lower);
- __ movdbl(res, Operand(esp, 0));
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ Set(temp, Immediate(upper));
+ __ movd(res, Operand(temp));
+ __ psllq(res, 32);
+ if (lower != 0) {
+ __ Set(temp, Immediate(lower));
+ __ movd(xmm0, Operand(temp));
+ __ por(res, xmm0);
+ }
}
}
}
if (r.IsInteger32()) {
return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
- return DefineAsRegister(new LConstantD);
+ double value = instr->DoubleValue();
+ LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
+ ? TempRegister()
+ : NULL;
+ return DefineAsRegister(new LConstantD(temp));
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT);
} else {
};
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
+class LConstantD: public LTemplateInstruction<1, 0, 1> {
public:
+ explicit LConstantD(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
}
}
+ {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatures::Scope scope(SSE4_1);
+ __ pextrd(Operand(eax), xmm0, 1);
+ __ pinsrd(xmm1, Operand(eax), 0);
+ }
+ }
+
__ ret(0);
CodeDesc desc;