DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVldr32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vldr(scratch, i.InputOffset());
+ __ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVstr32: {
+ int index = 0;
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ MemOperand operand = i.InputOffset(&index);
+ __ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
+ __ vstr(scratch, operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
case kArmVldr64:
__ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
+ V(ArmVldr32) \
+ V(ArmVstr32) \
V(ArmVldr64) \
V(ArmVstr64) \
V(ArmLdrb) \
case kArmRsb:
return ImmediateFitsAddrMode1Instruction(value);
+ case kArmVldr32:
+ case kArmVstr32:
case kArmVldr64:
case kArmVstr64:
return value >= -1020 && value <= 1020 && (value % 4) == 0;
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- InstructionOperand* result = rep == kRepFloat64
+ InstructionOperand* result = (rep == kRepFloat32 || rep == kRepFloat64)
? g.DefineAsDoubleRegister(node)
: g.DefineAsRegister(node);
ArchOpcode opcode;
switch (rep) {
+ case kRepFloat32:
+ opcode = kArmVldr32;
+ break;
case kRepFloat64:
opcode = kArmVldr64;
break;
return;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
- InstructionOperand* val =
- rep == kRepFloat64 ? g.UseDoubleRegister(value) : g.UseRegister(value);
+ InstructionOperand* val = (rep == kRepFloat32 || rep == kRepFloat64)
+ ? g.UseDoubleRegister(value)
+ : g.UseRegister(value);
ArchOpcode opcode;
switch (rep) {
+ case kRepFloat32:
+ opcode = kArmVstr32;
+ break;
case kRepFloat64:
opcode = kArmVstr64;
break;
case kArm64Str:
__ Str(i.InputRegister(2), i.MemoryOperand());
break;
+ case kArm64LdrS: {
+ UseScratchRegisterScope scope(masm());
+ FPRegister scratch = scope.AcquireS();
+ __ Ldr(scratch, i.MemoryOperand());
+ __ Fcvt(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kArm64StrS: {
+ UseScratchRegisterScope scope(masm());
+ FPRegister scratch = scope.AcquireS();
+ __ Fcvt(scratch, i.InputDoubleRegister(2));
+ __ Str(scratch, i.MemoryOperand());
+ break;
+ }
case kArm64LdrD:
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \
V(Arm64Uint32ToFloat64) \
+ V(Arm64LdrS) \
+ V(Arm64StrS) \
V(Arm64LdrD) \
V(Arm64StrD) \
V(Arm64Ldrb) \
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- InstructionOperand* result = rep == kRepFloat64
+ InstructionOperand* result = (rep == kRepFloat32 || rep == kRepFloat64)
? g.DefineAsDoubleRegister(node)
: g.DefineAsRegister(node);
ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads
switch (rep) {
+ case kRepFloat32:
+ opcode = kArm64LdrS;
+ break;
case kRepFloat64:
opcode = kArm64LdrD;
break;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
InstructionOperand* val;
- if (rep == kRepFloat64) {
+ if (rep == kRepFloat32 || rep == kRepFloat64) {
val = g.UseDoubleRegister(value);
} else {
val = g.UseRegister(value);
}
ArchOpcode opcode;
switch (rep) {
+ case kRepFloat32:
+ opcode = kArm64StrS;
+ break;
case kRepFloat64:
opcode = kArm64StrD;
break;
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
+ case kIA32Movss:
+ if (instr->HasOutput()) {
+ __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
+ __ movss(operand, xmm0);
+ }
+ break;
case kIA32StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
V(IA32Movzxwl) \
V(IA32Movw) \
V(IA32Movl) \
+ V(IA32Movss) \
V(IA32Movsd) \
V(IA32StoreWriteBarrier)
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- InstructionOperand* output = rep == kRepFloat64
+ InstructionOperand* output = (rep == kRepFloat32 || rep == kRepFloat64)
? g.DefineAsDoubleRegister(node)
: g.DefineAsRegister(node);
ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads
switch (rep) {
+ case kRepFloat32:
+ opcode = kIA32Movss;
+ break;
case kRepFloat64:
opcode = kIA32Movsd;
break;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
InstructionOperand* val;
- if (rep == kRepFloat64) {
+ if (rep == kRepFloat32 || rep == kRepFloat64) {
val = g.UseDoubleRegister(value);
} else {
if (g.CanBeImmediate(value)) {
}
ArchOpcode opcode;
switch (rep) {
+ case kRepFloat32:
+ opcode = kIA32Movss;
+ break;
case kRepFloat64:
opcode = kIA32Movsd;
break;
void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
DCHECK_NOT_NULL(node);
- if (RepresentationOf(rep) == kRepFloat64) MarkAsDouble(node);
- if (RepresentationOf(rep) == kRepTagged) MarkAsReference(node);
+ switch (RepresentationOf(rep)) {
+ case kRepFloat32:
+ case kRepFloat64:
+ MarkAsDouble(node);
+ break;
+ case kRepTagged:
+ MarkAsReference(node);
+ break;
+ default:
+ break;
+ }
}
PRINT(kRepWord16);
PRINT(kRepWord32);
PRINT(kRepWord64);
+ PRINT(kRepFloat32);
PRINT(kRepFloat64);
PRINT(kRepTagged);
kRepWord16 = 1 << 2,
kRepWord32 = 1 << 3,
kRepWord64 = 1 << 4,
- kRepFloat64 = 1 << 5,
- kRepTagged = 1 << 6,
+ kRepFloat32 = 1 << 5,
+ kRepFloat64 = 1 << 6,
+ kRepTagged = 1 << 7,
// Types.
- kTypeBool = 1 << 7,
- kTypeInt32 = 1 << 8,
- kTypeUint32 = 1 << 9,
- kTypeInt64 = 1 << 10,
- kTypeUint64 = 1 << 11,
- kTypeNumber = 1 << 12,
- kTypeAny = 1 << 13
+ kTypeBool = 1 << 8,
+ kTypeInt32 = 1 << 9,
+ kTypeUint32 = 1 << 10,
+ kTypeInt64 = 1 << 11,
+ kTypeUint64 = 1 << 12,
+ kTypeNumber = 1 << 13,
+ kTypeAny = 1 << 14
};
OStream& operator<<(OStream& os, const MachineType& type);
// Globally useful machine types and constants.
const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 |
- kRepWord32 | kRepWord64 | kRepFloat64 |
- kRepTagged;
+ kRepWord32 | kRepWord64 | kRepFloat32 |
+ kRepFloat64 | kRepTagged;
const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 |
kTypeInt64 | kTypeUint64 | kTypeNumber |
kTypeAny;
const MachineType kMachNone = static_cast<MachineType>(0);
+const MachineType kMachFloat32 =
+ static_cast<MachineType>(kRepFloat32 | kTypeNumber);
const MachineType kMachFloat64 =
static_cast<MachineType>(kRepFloat64 | kTypeNumber);
const MachineType kMachInt8 = static_cast<MachineType>(kRepWord8 | kTypeInt32);
case kRepWord16:
return 2;
case kRepWord32:
+ case kRepFloat32:
return 4;
case kRepWord64:
case kRepFloat64:
public:
// Information for each node tracked during the fixpoint.
struct NodeInfo {
- MachineTypeUnion use : 14; // Union of all usages for the node.
+ MachineTypeUnion use : 15; // Union of all usages for the node.
bool queued : 1; // Bookkeeping for the traversal.
bool visited : 1; // Bookkeeping for the traversal.
- MachineTypeUnion output : 14; // Output type of the node.
+ MachineTypeUnion output : 15; // Output type of the node.
};
RepresentationSelector(JSGraph* jsgraph, Zone* zone,
break;
}
- case kX64Movsd:
- if (instr->HasOutput()) {
- __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
- } else {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movsd(operand, i.InputDoubleRegister(index));
- }
- break;
case kX64Movsxbl:
__ movsxbl(i.OutputRegister(), i.MemoryOperand());
break;
}
}
break;
+ case kX64Movss:
+ if (instr->HasOutput()) {
+ __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
+ __ movss(operand, xmm0);
+ }
+ break;
+ case kX64Movsd:
+ if (instr->HasOutput()) {
+ __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movsd(operand, i.InputDoubleRegister(index));
+ }
+ break;
case kX64StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
- V(X64Movsd) \
V(X64Movsxbl) \
V(X64Movzxbl) \
V(X64Movb) \
V(X64Movl) \
V(X64Movsxlq) \
V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
V(X64StoreWriteBarrier)
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- InstructionOperand* output = rep == kRepFloat64
+ InstructionOperand* output = (rep == kRepFloat32 || rep == kRepFloat64)
? g.DefineAsDoubleRegister(node)
: g.DefineAsRegister(node);
ArchOpcode opcode;
// TODO(titzer): signed/unsigned small loads
switch (rep) {
+ case kRepFloat32:
+ opcode = kX64Movss;
+ break;
case kRepFloat64:
opcode = kX64Movsd;
break;
}
DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
InstructionOperand* val;
- if (rep == kRepFloat64) {
+ if (rep == kRepFloat32 || rep == kRepFloat64) {
val = g.UseDoubleRegister(value);
} else {
if (g.CanBeImmediate(value)) {
}
ArchOpcode opcode;
switch (rep) {
+ case kRepFloat32:
+ opcode = kX64Movss;
+ break;
case kRepFloat64:
opcode = kX64Movsd;
break;
RunLoadStore<int32_t>(kMachInt32);
RunLoadStore<uint32_t>(kMachUint32);
RunLoadStore<void*>(kMachAnyTagged);
+ RunLoadStore<float>(kMachFloat32);
RunLoadStore<double>(kMachFloat64);
}
MachineType type;
ArchOpcode ldr_opcode;
ArchOpcode str_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
const int32_t immediates[40];
};
{kMachInt8,
kArmLdrsb,
kArmStrb,
+ &InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
{kMachUint8,
kArmLdrb,
kArmStrb,
+ &InstructionSelectorTest::Stream::IsInteger,
{-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
-127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
{kMachInt16,
kArmLdrsh,
kArmStrh,
+ &InstructionSelectorTest::Stream::IsInteger,
{-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
-98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
{kMachUint16,
kArmLdrh,
kArmStrh,
+ &InstructionSelectorTest::Stream::IsInteger,
{-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
-32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
{kMachInt32,
kArmLdr,
kArmStr,
+ &InstructionSelectorTest::Stream::IsInteger,
{-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
-80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
+ {kMachFloat32,
+ kArmVldr32,
+ kArmVstr32,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
+ -84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
+ 24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
{kMachFloat64,
kArmVldr64,
kArmVstr64,
+ &InstructionSelectorTest::Stream::IsDouble,
{-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
-96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
}
ASSERT_EQ(2U, s[0]->InputCount());
ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
- EXPECT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
}
}
{kMachUint32, kArm64LdrW, kArm64StrW},
{kMachInt64, kArm64Ldr, kArm64Str},
{kMachUint64, kArm64Ldr, kArm64Str},
+ {kMachFloat32, kArm64LdrS, kArm64StrS},
{kMachFloat64, kArm64LdrD, kArm64StrD}};
{kMachUint16, kIA32Movzxwl, kIA32Movw},
{kMachInt32, kIA32Movl, kIA32Movl},
{kMachUint32, kIA32Movl, kIA32Movl},
+ {kMachFloat32, kIA32Movss, kIA32Movss},
{kMachFloat64, kIA32Movsd, kIA32Movsd}};
} // namespace
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
ASSERT_TRUE(s[0]->Output()->IsUnallocated());
- EXPECT_EQ(param->id(),
- UnallocatedOperand::cast(s[0]->Output())->virtual_register());
+ EXPECT_EQ(param->id(), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kArchNop, s[1]->arch_opcode());
ASSERT_EQ(1U, s[1]->InputCount());
ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated());
- EXPECT_EQ(param->id(),
- UnallocatedOperand::cast(s[1]->InputAt(0))->virtual_register());
+ EXPECT_EQ(param->id(), s.ToVreg(s[1]->InputAt(0)));
ASSERT_EQ(1U, s[1]->OutputCount());
ASSERT_TRUE(s[1]->Output()->IsUnallocated());
EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy());
- EXPECT_EQ(finish->id(),
- UnallocatedOperand::cast(s[1]->Output())->virtual_register());
+ EXPECT_EQ(finish->id(), s.ToVreg(s[1]->Output()));
}
base::RandomNumberGenerator* rng() { return &rng_; }
- protected:
class Stream;
enum StreamBuilderMode { kAllInstructions, kTargetInstructions };
return instructions_[index];
}
+ bool IsDouble(const InstructionOperand* operand) const {
+ return IsDouble(ToVreg(operand));
+ }
bool IsDouble(int virtual_register) const {
return doubles_.find(virtual_register) != doubles_.end();
}
+ bool IsInteger(const InstructionOperand* operand) const {
+ return IsInteger(ToVreg(operand));
+ }
+ bool IsInteger(int virtual_register) const {
+ return !IsDouble(virtual_register) && !IsReference(virtual_register);
+ }
+
+ bool IsReference(const InstructionOperand* operand) const {
+ return IsReference(ToVreg(operand));
+ }
bool IsReference(int virtual_register) const {
return references_.find(virtual_register) != references_.end();
}
}
int ToVreg(const InstructionOperand* operand) const {
+ if (operand->IsConstant()) return operand->index();
EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
return UnallocatedOperand::cast(operand)->virtual_register();
}
{kMachUint32, kX64Movl, kX64Movl},
{kMachInt64, kX64Movq, kX64Movq},
{kMachUint64, kX64Movq, kX64Movq},
+ {kMachFloat32, kX64Movss, kX64Movss},
{kMachFloat64, kX64Movsd, kX64Movsd}};
} // namespace