}
+void Assembler::vmov(const SwVfpRegister dst, float imm) {
+ mov(ip, Operand(bit_cast<int32_t>(imm)));
+ vmov(dst, ip);
+}
+
+
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
SwVfpRegister last,
Condition cond = al);
+ void vmov(const SwVfpRegister dst, float imm);
void vmov(const DwVfpRegister dst,
double imm,
const Register scratch = no_reg);
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
- return {kNoBoundsCheck, kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
- kMachAnyTagged};
+ return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
}
int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
switch (type) {
case kExternalInt8Array:
- return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
- kMachInt8};
+ return {taggedness, header_size, Type::Signed32(), kMachInt8};
case kExternalUint8Array:
case kExternalUint8ClampedArray:
- return {kTypedArrayBoundsCheck, taggedness, header_size,
- Type::Unsigned32(), kMachUint8};
+ return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
case kExternalInt16Array:
- return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
- kMachInt16};
+ return {taggedness, header_size, Type::Signed32(), kMachInt16};
case kExternalUint16Array:
- return {kTypedArrayBoundsCheck, taggedness, header_size,
- Type::Unsigned32(), kMachUint16};
+ return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
case kExternalInt32Array:
- return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
- kMachInt32};
+ return {taggedness, header_size, Type::Signed32(), kMachInt32};
case kExternalUint32Array:
- return {kTypedArrayBoundsCheck, taggedness, header_size,
- Type::Unsigned32(), kMachUint32};
+ return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
case kExternalFloat32Array:
- return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Number(),
- kMachFloat32};
+ return {taggedness, header_size, Type::Number(), kMachFloat32};
case kExternalFloat64Array:
- return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Number(),
- kMachFloat64};
+ return {taggedness, header_size, Type::Number(), kMachFloat64};
}
UNREACHABLE();
- return {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::None(), kMachNone};
+ return {kUntaggedBase, 0, Type::None(), kMachNone};
}
} // namespace compiler
return MemOperand(r0);
}
- MemOperand InputOffset() {
- int index = 0;
- return InputOffset(&index);
+ MemOperand InputOffset(int first_index = 0) {
+ return InputOffset(&first_index);
}
MemOperand ToMemOperand(InstructionOperand* op) const {
};
+namespace {
+
+class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
+ }
+
+ private:
+ SwVfpRegister const result_;
+};
+
+
+class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
+ }
+
+ private:
+ DwVfpRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ mov(result_, Operand::Zero()); }
+
+ private:
+ Register const result_;
+};
+
+} // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
+ do { \
+ auto result = i.OutputFloat##width##Register(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
+ __ b(hs, ool->entry()); \
+ __ vldr(result, i.InputOffset(2)); \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ __ b(hs, ool->entry()); \
+ __ asm_instr(result, i.InputOffset(2)); \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ auto value = i.InputFloat##width##Register(2); \
+ __ vstr(value, i.InputOffset(3), lo); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ auto value = i.InputRegister(2); \
+ __ asm_instr(value, i.InputOffset(3), lo); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ } while (0)
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(32);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(64);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(strb);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(strh);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(str);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(32);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(64);
+ break;
}
}
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- SwVfpRegister dst = destination->IsDoubleRegister()
- ? g.ToFloat32Register(destination)
- : kScratchDoubleReg.low();
- // TODO(turbofan): Can we do better here?
- __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ vmov(dst, ip);
if (destination->IsDoubleStackSlot()) {
- __ vstr(dst, g.ToMemOperand(destination));
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ str(ip, dst);
+ } else {
+ SwVfpRegister dst = g.ToFloat32Register(destination);
+ __ vmov(dst, src.ToFloat32());
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
DwVfpRegister dst = destination->IsDoubleRegister()
? g.ToFloat64Register(destination)
: kScratchDoubleReg;
- __ vmov(dst, src.ToFloat64());
+ __ vmov(dst, src.ToFloat64(), kScratchReg);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
}
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ ArmOperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand* offset_operand = g.UseRegister(offset);
+ InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length);
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ g.UseRegister(buffer), offset_operand);
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ ArmOperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand* offset_operand = g.UseRegister(offset);
+ InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length);
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), nullptr,
+ offset_operand, length_operand, g.UseRegister(value),
+ g.UseRegister(buffer), offset_operand);
+}
+
+
namespace {
void EmitBic(InstructionSelector* selector, Node* node, Node* left,
Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
+ DoubleRegister InputFloat32Register(int index) {
+ return InputDoubleRegister(index).S();
+ }
+
+ DoubleRegister InputFloat64Register(int index) {
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
+
+ DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
+
Register InputRegister32(int index) {
return ToRegister(instr_->InputAt(index)).W();
}
return MemOperand(no_reg);
}
- MemOperand MemoryOperand() {
- int index = 0;
- return MemoryOperand(&index);
+ MemOperand MemoryOperand(int first_index = 0) {
+ return MemoryOperand(&first_index);
}
Operand ToOperand(InstructionOperand* op) {
};
+namespace {
+
+class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat64(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ Mov(result_, 0); }
+
+ private:
+ Register const result_;
+};
+
+} // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
+ do { \
+ auto result = i.OutputFloat##width##Register(); \
+ auto offset = i.InputRegister32(0); \
+ auto length = i.InputOperand32(1); \
+ __ Cmp(offset, length); \
+ auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
+ __ B(hs, ool->entry()); \
+ __ Ldr(result, i.MemoryOperand(2)); \
+ __ Bind(ool->exit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister32(); \
+ auto offset = i.InputRegister32(0); \
+ auto length = i.InputOperand32(1); \
+ __ Cmp(offset, length); \
+ auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ __ B(hs, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ Bind(ool->exit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
+ do { \
+ auto offset = i.InputRegister32(0); \
+ auto length = i.InputOperand32(1); \
+ __ Cmp(offset, length); \
+ Label done; \
+ __ B(hs, &done); \
+ __ Str(i.InputFloat##width##Register(2), i.MemoryOperand(3)); \
+ __ Bind(&done); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ auto offset = i.InputRegister32(0); \
+ auto length = i.InputOperand32(1); \
+ __ Cmp(offset, length); \
+ Label done; \
+ __ B(hs, &done); \
+ __ asm_instr(i.InputRegister32(2), i.MemoryOperand(3)); \
+ __ Bind(&done); \
+ } while (0)
+
+
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
}
break;
}
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(32);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(64);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(Str);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(32);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(64);
+ break;
}
}
cc = vc;
break;
}
- __ bind(&check);
+ __ Bind(&check);
__ Cset(reg, cc);
__ Bind(&done);
}
}
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ Arm64OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand* offset_operand = g.UseRegister(offset);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), offset_operand, g.UseRegister(length),
+ g.UseRegister(buffer), offset_operand);
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ Arm64OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand* offset_operand = g.UseRegister(offset);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), nullptr, offset_operand,
+ g.UseRegister(length), g.UseRegister(value), g.UseRegister(buffer),
+ offset_operand);
+}
+
+
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
};
+// Generator for out-of-line code that is emitted after the main code is done.
+class OutOfLineCode : public ZoneObject {
+ public:
+ explicit OutOfLineCode(CodeGenerator* gen);
+ virtual ~OutOfLineCode();
+
+ virtual void Generate() = 0;
+
+ Label* entry() { return &entry_; }
+ Label* exit() { return &exit_; }
+ MacroAssembler* masm() const { return masm_; }
+ OutOfLineCode* next() const { return next_; }
+
+ private:
+ Label entry_;
+ Label exit_;
+ MacroAssembler* const masm_;
+ OutOfLineCode* const next_;
+};
+
+
// TODO(dcarney): generify this on bleeding_edge and replace this call
// when merged.
static inline void FinishCode(MacroAssembler* masm) {
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
translations_(code->zone()),
- last_lazy_deopt_pc_(0) {
+ last_lazy_deopt_pc_(0),
+ ools_(nullptr) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
}
}
+ // Assemble all out-of-line code.
+ if (ools_) {
+ masm()->RecordComment("-- Out of line code --");
+ for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
+ masm()->bind(ool->entry());
+ ool->Generate();
+ masm()->jmp(ool->exit());
+ }
+ }
+
FinishCode(masm());
// Ensure there is space for lazy deopt.
#endif // !V8_TURBOFAN_BACKEND
+
+OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
+ : masm_(gen->masm()), next_(gen->ools_) {
+ gen->ools_ = this;
+}
+
+
+OutOfLineCode::~OutOfLineCode() {}
+
} // namespace compiler
} // namespace internal
} // namespace v8
#ifndef V8_COMPILER_CODE_GENERATOR_H_
#define V8_COMPILER_CODE_GENERATOR_H_
-#include <deque>
-
#include "src/compiler/gap-resolver.h"
#include "src/compiler/instruction.h"
#include "src/deoptimizer.h"
namespace internal {
namespace compiler {
+// Forward declarations.
class Linkage;
+class OutOfLineCode;
struct BranchInfo {
FlagsCondition condition;
int pc_offset_;
};
+ friend class OutOfLineCode;
+
Frame* const frame_;
Linkage* const linkage_;
InstructionSequence* const code_;
ZoneDeque<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
+ OutOfLineCode* ools_;
};
} // namespace compiler
return Operand(no_reg, 0);
}
- Operand MemoryOperand() {
- int first_input = 0;
+ Operand MemoryOperand(int first_input = 0) {
return MemoryOperand(&first_input);
}
};
-static bool HasImmediateInput(Instruction* instr, int index) {
+namespace {
+
+bool HasImmediateInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsImmediate();
}
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ xor_(result_, result_); }
+
+ private:
+ Register const result_;
+};
+
+
+class OutOfLineLoadFloat FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ pcmpeqd(result_, result_); }
+
+ private:
+ XMMRegister const result_;
+};
+
+} // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
+ __ bind(&done); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ __ j(above_equal, &done, Label::kNear); \
+ if (instr->InputAt(2)->IsRegister()) { \
+ __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
+ } else { \
+ __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
+ } \
+ __ bind(&done); \
+ } while (false)
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
IA32OperandConverter i(this, instr);
__ RecordWrite(object, index, value, mode);
break;
}
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ break;
}
}
}
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ IA32OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+ Int32Matcher mlength(length);
+ Int32BinopMatcher moffset(offset);
+ if (mlength.HasValue() && moffset.right().HasValue() &&
+ mlength.Value() > moffset.right().Value()) {
+ Int32Matcher mbuffer(buffer);
+ InstructionOperand* offset_operand = g.UseRegister(moffset.left().node());
+ InstructionOperand* length_operand =
+ g.TempImmediate(mlength.Value() - moffset.right().Value());
+ if (mbuffer.HasValue()) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ offset_operand,
+ g.TempImmediate(mbuffer.Value() + moffset.right().Value()));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ g.UseRegister(buffer), offset_operand,
+ g.UseImmediate(moffset.right().node()));
+ }
+ return;
+ }
+ }
+ InstructionOperand* offset_operand = g.UseRegister(offset);
+ InstructionOperand* length_operand =
+ g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+ if (g.CanBeImmediate(buffer)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ offset_operand, g.UseImmediate(buffer));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MR1),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ g.UseRegister(buffer), offset_operand);
+ }
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ IA32OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand* value_operand =
+ g.CanBeImmediate(value)
+ ? g.UseImmediate(value)
+ : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
+ : g.UseRegister(value));
+ if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+ Int32Matcher mbuffer(buffer);
+ Int32Matcher mlength(length);
+ Int32BinopMatcher moffset(offset);
+ if (mlength.HasValue() && moffset.right().HasValue() &&
+ mlength.Value() > moffset.right().Value()) {
+ InstructionOperand* offset_operand = g.UseRegister(moffset.left().node());
+ InstructionOperand* length_operand =
+ g.TempImmediate(mlength.Value() - moffset.right().Value());
+ if (mbuffer.HasValue()) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+ offset_operand, length_operand, value_operand, offset_operand,
+ g.TempImmediate(mbuffer.Value() + moffset.right().Value()));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I), nullptr,
+ offset_operand, length_operand, value_operand,
+ g.UseRegister(buffer), offset_operand,
+ g.UseImmediate(moffset.right().node()));
+ }
+ return;
+ }
+ }
+ InstructionOperand* offset_operand = g.UseRegister(offset);
+ InstructionOperand* length_operand =
+ g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+ if (g.CanBeImmediate(buffer)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+ offset_operand, length_operand, value_operand, offset_operand,
+ g.UseImmediate(buffer));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
+ offset_operand, length_operand, value_operand, g.UseRegister(buffer),
+ offset_operand);
+ }
+}
+
+
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
V(ArchRet) \
V(ArchStackPointer) \
V(ArchTruncateDoubleToI) \
+ V(CheckedLoadInt8) \
+ V(CheckedLoadUint8) \
+ V(CheckedLoadInt16) \
+ V(CheckedLoadUint16) \
+ V(CheckedLoadWord32) \
+ V(CheckedLoadFloat32) \
+ V(CheckedLoadFloat64) \
+ V(CheckedStoreWord8) \
+ V(CheckedStoreWord16) \
+ V(CheckedStoreWord32) \
+ V(CheckedStoreFloat32) \
+ V(CheckedStoreFloat64) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
}
+Instruction* InstructionSelector::Emit(
+ InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+ InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+ InstructionOperand* e, size_t temp_count, InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* inputs[] = {a, b, c, d, e};
+ size_t input_count = arraysize(inputs);
+ return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+ temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+ InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+ InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+ InstructionOperand* e, InstructionOperand* f, size_t temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* inputs[] = {a, b, c, d, e, f};
+ size_t input_count = arraysize(inputs);
+ return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+ temps);
+}
+
+
Instruction* InstructionSelector::Emit(
InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
size_t input_count, InstructionOperand** inputs, size_t temp_count,
return OpParameter<LoadRepresentation>(node);
case IrOpcode::kStore:
return kMachNone;
+ case IrOpcode::kCheckedLoad:
+ return OpParameter<MachineType>(node);
+ case IrOpcode::kCheckedStore:
+ return kMachNone;
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
+ case IrOpcode::kCheckedLoad: {
+ MachineType rep = OpParameter<MachineType>(node);
+ MarkAsRepresentation(rep, node);
+ return VisitCheckedLoad(node);
+ }
+ case IrOpcode::kCheckedStore:
+ return VisitCheckedStore(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
InstructionOperand* a, InstructionOperand* b,
InstructionOperand* c, InstructionOperand* d,
size_t temp_count = 0, InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, InstructionOperand* b,
+ InstructionOperand* c, InstructionOperand* d,
+ InstructionOperand* e, size_t temp_count = 0,
+ InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, InstructionOperand* b,
+ InstructionOperand* c, InstructionOperand* d,
+ InstructionOperand* e, InstructionOperand* f,
+ size_t temp_count = 0, InstructionOperand* *temps = NULL);
Instruction* Emit(InstructionCode opcode, size_t output_count,
InstructionOperand** outputs, size_t input_count,
InstructionOperand** inputs, size_t temp_count = 0,
return machine()->Is32() ? Int32Constant(static_cast<int32_t>(value))
: Int64Constant(static_cast<int64_t>(value));
}
+ template <typename T>
+ Node* PointerConstant(T* value) {
+ return IntPtrConstant(bit_cast<intptr_t>(value));
+ }
// Creates a Float32Constant node, usually canonicalized.
Node* Float32Constant(float value);
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties-inl.h"
#include "src/types.h"
one_range_ = Type::Range(one, one, zone());
Handle<Object> thirtyone = factory->NewNumber(31.0);
zero_thirtyone_range_ = Type::Range(zero, thirtyone, zone());
+ for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
+ Handle<Object> min = factory->NewNumber(kMinInt / (1 << k));
+ Handle<Object> max = factory->NewNumber(kMaxInt / (1 << k));
+ shifted_int32_ranges_[k] = Type::Range(min, max, zone());
+ }
}
-JSTypedLowering::~JSTypedLowering() {}
-
-
Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
NodeProperties::ReplaceWithValue(old, node, node);
return Changed(node);
Type* base_type = NodeProperties::GetBounds(base).upper;
// TODO(mstarzinger): This lowering is not correct if:
// a) The typed array or it's buffer is neutered.
- if (base_type->IsConstant() && key_type->Is(Type::Integral32()) &&
+ if (base_type->IsConstant() &&
base_type->AsConstant()->Value()->IsJSTypedArray()) {
- // JSLoadProperty(typed-array, int32)
- Handle<JSTypedArray> array =
+ Handle<JSTypedArray> const array =
Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
- if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
- ExternalArrayType type = array->type();
- double byte_length = array->byte_length()->Number();
- if (byte_length <= kMaxInt) {
- Handle<ExternalArray> elements =
- Handle<ExternalArray>::cast(handle(array->elements()));
- Node* pointer = jsgraph()->IntPtrConstant(
- bit_cast<intptr_t>(elements->external_pointer()));
- Node* length = jsgraph()->Constant(array->length()->Number());
- Node* effect = NodeProperties::GetEffectInput(node);
+ BufferAccess const access(array->type());
+ size_t const k = ElementSizeLog2Of(access.machine_type());
+ double const byte_length = array->byte_length()->Number();
+ CHECK_LT(k, arraysize(shifted_int32_ranges_));
+ if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
+ access.external_array_type() != kExternalUint8ClampedArray &&
+ key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+ // JSLoadProperty(typed-array, int32)
+ Handle<ExternalArray> elements =
+ Handle<ExternalArray>::cast(handle(array->elements()));
+ Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
+ Node* length = jsgraph()->Constant(byte_length);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ // Check if we can avoid the bounds check.
+ if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
Node* load = graph()->NewNode(
simplified()->LoadElement(
- AccessBuilder::ForTypedArrayElement(type, true)),
- pointer, key, length, effect);
+ AccessBuilder::ForTypedArrayElement(array->type(), true)),
+ buffer, key, effect, control);
return ReplaceEagerly(node, load);
}
+ // Compute byte offset.
+ Node* offset = Word32Shl(key, static_cast<int>(k));
+ Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
+ offset, length, effect, control);
+ return ReplaceEagerly(node, load);
}
}
return NoChange();
Node* value = NodeProperties::GetValueInput(node, 2);
Type* key_type = NodeProperties::GetBounds(key).upper;
Type* base_type = NodeProperties::GetBounds(base).upper;
+ Type* value_type = NodeProperties::GetBounds(value).upper;
// TODO(mstarzinger): This lowering is not correct if:
// a) The typed array or its buffer is neutered.
- if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
+ if (base_type->IsConstant() &&
base_type->AsConstant()->Value()->IsJSTypedArray()) {
- // JSStoreProperty(typed-array, int32, value)
- Handle<JSTypedArray> array =
+ Handle<JSTypedArray> const array =
Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
- if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
- ExternalArrayType type = array->type();
- double byte_length = array->byte_length()->Number();
- if (byte_length <= kMaxInt) {
- Handle<ExternalArray> elements =
- Handle<ExternalArray>::cast(handle(array->elements()));
- Node* pointer = jsgraph()->IntPtrConstant(
- bit_cast<intptr_t>(elements->external_pointer()));
- Node* length = jsgraph()->Constant(array->length()->Number());
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
- Type* value_type = NodeProperties::GetBounds(value).upper;
- // If the value input does not have the required type, insert the
- // appropriate conversion.
-
- // Convert to a number first.
- if (!value_type->Is(Type::Number())) {
- Reduction number_reduction = ReduceJSToNumberInput(value);
- if (number_reduction.Changed()) {
- value = number_reduction.replacement();
- } else {
- Node* context = NodeProperties::GetContextInput(node);
- value = graph()->NewNode(javascript()->ToNumber(), value, context,
- effect, control);
- effect = value;
- }
+ BufferAccess const access(array->type());
+ size_t const k = ElementSizeLog2Of(access.machine_type());
+ double const byte_length = array->byte_length()->Number();
+ CHECK_LT(k, arraysize(shifted_int32_ranges_));
+ if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
+ access.external_array_type() != kExternalUint8ClampedArray &&
+ key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+ // JSLoadProperty(typed-array, int32)
+ Handle<ExternalArray> elements =
+ Handle<ExternalArray>::cast(handle(array->elements()));
+ Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
+ Node* length = jsgraph()->Constant(byte_length);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ // Convert to a number first.
+ if (!value_type->Is(Type::Number())) {
+ Reduction number_reduction = ReduceJSToNumberInput(value);
+ if (number_reduction.Changed()) {
+ value = number_reduction.replacement();
+ } else {
+ value = effect = graph()->NewNode(javascript()->ToNumber(), value,
+ context, effect, control);
}
- // For integer-typed arrays, convert to the integer type.
- if (access.type->Is(Type::Signed32()) &&
- !value_type->Is(Type::Signed32())) {
- value = graph()->NewNode(simplified()->NumberToInt32(), value);
- } else if (access.type->Is(Type::Unsigned32()) &&
- !value_type->Is(Type::Unsigned32())) {
- value = graph()->NewNode(simplified()->NumberToUint32(), value);
- }
-
- Node* store =
- graph()->NewNode(simplified()->StoreElement(access), pointer, key,
- length, value, effect, control);
- return ReplaceEagerly(node, store);
}
+ // For integer-typed arrays, convert to the integer type.
+ if (TypeOf(access.machine_type()) == kTypeInt32 &&
+ !value_type->Is(Type::Signed32())) {
+ value = graph()->NewNode(simplified()->NumberToInt32(), value);
+ } else if (TypeOf(access.machine_type()) == kTypeUint32 &&
+ !value_type->Is(Type::Unsigned32())) {
+ value = graph()->NewNode(simplified()->NumberToUint32(), value);
+ }
+ // Check if we can avoid the bounds check.
+ if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
+ node->set_op(simplified()->StoreElement(
+ AccessBuilder::ForTypedArrayElement(array->type(), true)));
+ node->ReplaceInput(0, buffer);
+ DCHECK_EQ(key, node->InputAt(1));
+ node->ReplaceInput(2, value);
+ node->ReplaceInput(3, effect);
+ node->ReplaceInput(4, control);
+ node->TrimInputCount(5);
+ return Changed(node);
+ }
+ // Compute byte offset.
+ Node* offset = Word32Shl(key, static_cast<int>(k));
+ // Turn into a StoreBuffer operation.
+ node->set_op(simplified()->StoreBuffer(access));
+ node->ReplaceInput(0, buffer);
+ node->ReplaceInput(1, offset);
+ node->ReplaceInput(2, length);
+ node->ReplaceInput(3, value);
+ node->ReplaceInput(4, effect);
+ DCHECK_EQ(control, node->InputAt(5));
+ DCHECK_EQ(6, node->InputCount());
+ return Changed(node);
}
}
return NoChange();
return NoChange();
}
+
+Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
+ if (rhs == 0) return lhs;
+ return graph()->NewNode(machine()->Word32Shl(), lhs,
+ jsgraph()->Int32Constant(rhs));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
class JSTypedLowering FINAL : public Reducer {
public:
explicit JSTypedLowering(JSGraph* jsgraph);
- virtual ~JSTypedLowering();
+ ~JSTypedLowering() {}
- virtual Reduction Reduce(Node* node) OVERRIDE;
+ Reduction Reduce(Node* node) OVERRIDE;
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
Reduction ReduceI32Shift(Node* node, bool left_signed,
const Operator* shift_op);
+ Node* Word32Shl(Node* const lhs, int32_t const rhs);
+
JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
CommonOperatorBuilder* common() { return jsgraph_->common(); }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
Type* zero_range_;
Type* one_range_;
Type* zero_thirtyone_range_;
+ Type* shifted_int32_ranges_[4];
};
} // namespace compiler
switch (node->opcode()) {
case IrOpcode::kProjection:
return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
- case IrOpcode::kWord32And: {
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
- if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
- if (m.IsFoldable()) { // K & K => K
- return ReplaceInt32(m.left().Value() & m.right().Value());
- }
- if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
- if (m.left().IsWord32And() && m.right().HasValue()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) { // (x & K) & K => x & K
- node->ReplaceInput(0, mleft.left().node());
- node->ReplaceInput(
- 1, Int32Constant(m.right().Value() & mleft.right().Value()));
- return Changed(node);
- }
- }
- if (m.left().IsInt32Add() && m.right().IsNegativePowerOf2()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() &&
- (mleft.right().Value() & m.right().Value()) ==
- mleft.right().Value()) {
- // (x + K) & K => (x & K) + K
- return Replace(graph()->NewNode(
- machine()->Int32Add(),
- graph()->NewNode(machine()->Word32And(), mleft.left().node(),
- m.right().node()),
- mleft.right().node()));
- }
- }
- break;
- }
+ case IrOpcode::kWord32And:
+ return ReduceWord32And(node);
case IrOpcode::kWord32Or:
return ReduceWord32Or(node);
case IrOpcode::kWord32Xor: {
}
break;
}
- case IrOpcode::kWord32Shl: {
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
- if (m.IsFoldable()) { // K << K => K
- return ReplaceInt32(m.left().Value() << m.right().Value());
- }
- if (m.right().IsInRange(1, 31)) {
- // (x >>> K) << K => x & ~(2^K - 1)
- // (x >> K) << K => x & ~(2^K - 1)
- if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(m.right().Value())) {
- node->set_op(machine()->Word32And());
- node->ReplaceInput(0, mleft.left().node());
- node->ReplaceInput(
- 1, Uint32Constant(~((1U << m.right().Value()) - 1U)));
- return Changed(node);
- }
- }
- }
- return ReduceWord32Shifts(node);
- }
+ case IrOpcode::kWord32Shl:
+ return ReduceWord32Shl(node);
case IrOpcode::kWord32Shr: {
Uint32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
node->set_op(machine()->Word32Shl());
node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
- return Changed(node);
+ Reduction reduction = ReduceWord32Shl(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
break;
}
DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||
(node->opcode() == IrOpcode::kWord32Shr) ||
(node->opcode() == IrOpcode::kWord32Sar));
-
if (machine()->Word32ShiftIsSafe()) {
// Remove the explicit 'and' with 0x1f if the shift provided by the machine
// instruction matches that required by JavaScript.
}
-Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kWord32Or);
+Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
+ DCHECK_EQ(IrOpcode::kWord32Shl, node->opcode());
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
+ if (m.IsFoldable()) { // K << K => K
+ return ReplaceInt32(m.left().Value() << m.right().Value());
+ }
+ if (m.right().IsInRange(1, 31)) {
+ // (x >>> K) << K => x & ~(2^K - 1)
+ // (x >> K) << K => x & ~(2^K - 1)
+ if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(m.right().Value())) {
+ node->set_op(machine()->Word32And());
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(1,
+ Uint32Constant(~((1U << m.right().Value()) - 1U)));
+ Reduction reduction = ReduceWord32And(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ }
+ return ReduceWord32Shifts(node);
+}
+
+Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
+ DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
+ if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
+ if (m.IsFoldable()) { // K & K => K
+ return ReplaceInt32(m.left().Value() & m.right().Value());
+ }
+ if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) { // (x & K) & K => x & K
+ node->ReplaceInput(0, mleft.left().node());
+ node->ReplaceInput(
+ 1, Int32Constant(m.right().Value() & mleft.right().Value()));
+ Reduction reduction = ReduceWord32And(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ if (m.left().IsInt32Add() && m.right().IsNegativePowerOf2()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() &&
+ (mleft.right().Value() & m.right().Value()) == mleft.right().Value()) {
+ // (x + K) & K => (x & K) + K
+ return Replace(graph()->NewNode(
+ machine()->Int32Add(),
+ graph()->NewNode(machine()->Word32And(), mleft.left().node(),
+ m.right().node()),
+ mleft.right().node()));
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
+ DCHECK_EQ(IrOpcode::kWord32Or, node->opcode());
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
Reduction ReduceStore(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
Reduction ReduceWord32Shifts(Node* node);
+ Reduction ReduceWord32Shl(Node* node);
+ Reduction ReduceWord32And(Node* node);
Reduction ReduceWord32Or(Node* node);
Graph* graph() const;
}
+CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
+ return OpParameter<CheckedLoadRepresentation>(op);
+}
+
+
+CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kCheckedStore, op->opcode());
+ return OpParameter<CheckedStoreRepresentation>(op);
+}
+
+
#define PURE_OP_LIST(V) \
V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
PURE_OP_LIST(PURE)
#undef PURE
-#define LOAD(Type) \
- struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> { \
- Load##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, k##Type) {} \
- }; \
- Load##Type##Operator k##Load##Type;
+#define LOAD(Type) \
+ struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, \
+ "Load", 2, 1, 1, 1, 1, 0, k##Type) {} \
+ }; \
+ struct CheckedLoad##Type##Operator FINAL \
+ : public Operator1<CheckedLoadRepresentation> { \
+ CheckedLoad##Type##Operator() \
+ : Operator1<CheckedLoadRepresentation>( \
+ IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, k##Type) {} \
+ }; \
+ Load##Type##Operator kLoad##Type; \
+ CheckedLoad##Type##Operator kCheckedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-#define STORE(Type) \
- struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
- explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
- : Operator1<StoreRepresentation>( \
- IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, \
- "Store", 3, 1, 1, 0, 1, 0, \
- StoreRepresentation(k##Type, write_barrier_kind)) {} \
- }; \
- struct Store##Type##NoWriteBarrier##Operator FINAL \
- : public Store##Type##Operator { \
- Store##Type##NoWriteBarrier##Operator() \
- : Store##Type##Operator(kNoWriteBarrier) {} \
- }; \
- struct Store##Type##FullWriteBarrier##Operator FINAL \
- : public Store##Type##Operator { \
- Store##Type##FullWriteBarrier##Operator() \
- : Store##Type##Operator(kFullWriteBarrier) {} \
- }; \
- Store##Type##NoWriteBarrier##Operator k##Store##Type##NoWriteBarrier; \
- Store##Type##FullWriteBarrier##Operator k##Store##Type##FullWriteBarrier;
+#define STORE(Type) \
+ struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
+ explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, \
+ "Store", 3, 1, 1, 0, 1, 0, \
+ StoreRepresentation(k##Type, write_barrier_kind)) {} \
+ }; \
+ struct Store##Type##NoWriteBarrier##Operator FINAL \
+ : public Store##Type##Operator { \
+ Store##Type##NoWriteBarrier##Operator() \
+ : Store##Type##Operator(kNoWriteBarrier) {} \
+ }; \
+ struct Store##Type##FullWriteBarrier##Operator FINAL \
+ : public Store##Type##Operator { \
+ Store##Type##FullWriteBarrier##Operator() \
+ : Store##Type##Operator(kFullWriteBarrier) {} \
+ }; \
+ struct CheckedStore##Type##Operator FINAL \
+ : public Operator1<CheckedStoreRepresentation> { \
+ CheckedStore##Type##Operator() \
+ : Operator1<CheckedStoreRepresentation>( \
+ IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
+ "CheckedStore", 4, 1, 1, 0, 1, 0, k##Type) {} \
+ }; \
+ Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
+ Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
+ CheckedStore##Type##Operator kCheckedStore##Type;
MACHINE_TYPE_LIST(STORE)
#undef STORE
};
switch (rep) {
#define LOAD(Type) \
case k##Type: \
- return &cache_.k##Load##Type;
+ return &cache_.kLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-
default:
break;
}
IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, "Store", 3, 1,
1, 0, 1, 0, rep);
}
+
+
+const Operator* MachineOperatorBuilder::CheckedLoad(
+ CheckedLoadRepresentation rep) {
+ switch (rep) {
+#define LOAD(Type) \
+ case k##Type: \
+ return &cache_.kCheckedLoad##Type;
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone_) Operator1<CheckedLoadRepresentation>(
+ IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite,
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, rep);
+}
+
+
+const Operator* MachineOperatorBuilder::CheckedStore(
+ CheckedStoreRepresentation rep) {
+ switch (rep) {
+#define STORE(Type) \
+ case k##Type: \
+ return &cache_.kCheckedStore##Type;
+ MACHINE_TYPE_LIST(STORE)
+#undef STORE
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone_) Operator1<CheckedStoreRepresentation>(
+ IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow,
+ "CheckedStore", 4, 1, 1, 0, 1, 0, rep);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
StoreRepresentation const& StoreRepresentationOf(Operator const*);
+// A CheckedLoad needs a MachineType.
+typedef MachineType CheckedLoadRepresentation;
+
+CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
+
+
+// A CheckedStore needs a MachineType.
+typedef MachineType CheckedStoreRepresentation;
+
+CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
+
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
// Access to the machine stack.
const Operator* LoadStackPointer();
+ // checked-load heap, index, length
+ const Operator* CheckedLoad(CheckedLoadRepresentation);
+ // checked-store heap, index, length, value
+ const Operator* CheckedStore(CheckedStoreRepresentation);
+
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == kRepWord32; }
bool Is64() const { return word() == kRepWord64; }
V(ChangeBoolToBit) \
V(ChangeBitToBool) \
V(LoadField) \
+ V(LoadBuffer) \
V(LoadElement) \
V(StoreField) \
+ V(StoreBuffer) \
V(StoreElement) \
V(ObjectIsSmi) \
V(ObjectIsNonNegativeSmi)
V(Float64Ceil) \
V(Float64RoundTruncate) \
V(Float64RoundTiesAway) \
- V(LoadStackPointer)
+ V(LoadStackPointer) \
+ V(CheckedLoad) \
+ V(CheckedStore)
#define VALUE_OP_LIST(V) \
COMMON_OP_LIST(V) \
#include "src/compiler/simplified-lowering.h"
-#include <limits>
-
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
if (lower()) lowering->DoStoreField(node);
break;
}
- case IrOpcode::kLoadElement: {
- ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- ProcessInput(node, 1, kMachInt32); // element index
+ case IrOpcode::kLoadBuffer: {
+ BufferAccess access = BufferAccessOf(node->op());
+ ProcessInput(node, 0, kMachPtr); // buffer
+ ProcessInput(node, 1, kMachInt32); // offset
ProcessInput(node, 2, kMachInt32); // length
ProcessRemainingInputs(node, 3);
// Tagged overrides everything if we have to do a typed array bounds
// check, because we may need to return undefined then.
MachineType output_type =
- (access.bounds_check == kTypedArrayBoundsCheck &&
- (use & kRepTagged))
- ? kMachAnyTagged
- : access.machine_type;
+ (use & kRepTagged) ? kMachAnyTagged : access.machine_type();
SetOutput(node, output_type);
- if (lower()) lowering->DoLoadElement(node, output_type);
+ if (lower()) lowering->DoLoadBuffer(node, output_type, changer_);
+ break;
+ }
+ case IrOpcode::kStoreBuffer: {
+ BufferAccess access = BufferAccessOf(node->op());
+ ProcessInput(node, 0, kMachPtr); // buffer
+ ProcessInput(node, 1, kMachInt32); // offset
+ ProcessInput(node, 2, kMachInt32); // length
+ ProcessInput(node, 3, access.machine_type()); // value
+ ProcessRemainingInputs(node, 4);
+ SetOutput(node, 0);
+ if (lower()) lowering->DoStoreBuffer(node);
+ break;
+ }
+ case IrOpcode::kLoadElement: {
+ ElementAccess access = ElementAccessOf(node->op());
+ ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
+ ProcessInput(node, 1, kMachInt32); // index
+ ProcessRemainingInputs(node, 2);
+ SetOutput(node, access.machine_type);
+ if (lower()) lowering->DoLoadElement(node);
break;
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- ProcessInput(node, 1, kMachInt32); // element index
- ProcessInput(node, 2, kMachInt32); // length
- ProcessInput(node, 3, access.machine_type);
- ProcessRemainingInputs(node, 4);
+ ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
+ ProcessInput(node, 1, kMachInt32); // index
+ ProcessInput(node, 2, access.machine_type); // value
+ ProcessRemainingInputs(node, 3);
SetOutput(node, 0);
if (lower()) lowering->DoStoreElement(node);
break;
}
-namespace {
-
-intptr_t AddressForOutOfBoundsLoad(MachineType type) {
- switch (RepresentationOf(type)) {
- case kRepFloat32: {
- static const float dummy = std::numeric_limits<float>::quiet_NaN();
- return bit_cast<intptr_t>(&dummy);
- }
- case kRepFloat64: {
- static const double dummy = std::numeric_limits<double>::quiet_NaN();
- return bit_cast<intptr_t>(&dummy);
- }
- case kRepBit:
- case kRepWord8:
- case kRepWord16:
- case kRepWord32: {
- static const int32_t dummy = 0;
- return bit_cast<intptr_t>(&dummy);
- }
- default:
- break;
+void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
+ RepresentationChanger* changer) {
+ DCHECK_EQ(IrOpcode::kLoadBuffer, node->opcode());
+ DCHECK_NE(kMachNone, RepresentationOf(output_type));
+ MachineType const type = BufferAccessOf(node->op()).machine_type();
+ if (output_type & kRepTagged) {
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const effect = node->InputAt(3);
+ Node* const control = node->InputAt(4);
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThan(), offset, length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = graph()->NewNode(machine()->Load(type), buffer, offset,
+ effect, if_true);
+ Node* vtrue = changer->GetTaggedRepresentationFor(etrue, type);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->UndefinedConstant();
+ Node* efalse = effect;
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+
+ // Replace effect uses of {node} with the {ephi}.
+ NodeProperties::ReplaceWithValue(node, node, ephi);
+
+ // Turn the {node} into a Phi.
+ node->set_op(common()->Phi(kMachAnyTagged, 2));
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->ReplaceInput(2, merge);
+ node->TrimInputCount(3);
+ } else {
+ node->set_op(machine()->CheckedLoad(type));
}
- UNREACHABLE();
- return 0;
}
-intptr_t AddressForOutOfBoundsStore() {
- static volatile double dummy = 0;
- return bit_cast<intptr_t>(&dummy);
+void SimplifiedLowering::DoStoreBuffer(Node* node) {
+ DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
+ MachineType const type = BufferAccessOf(node->op()).machine_type();
+ node->set_op(machine()->CheckedStore(type));
}
-} // namespace
-
-void SimplifiedLowering::DoLoadElement(Node* node, MachineType output_type) {
+void SimplifiedLowering::DoLoadElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
- const Operator* op = machine()->Load(access.machine_type);
- Node* key = node->InputAt(1);
- Node* index = ComputeIndex(access, key);
- Node* effect = node->InputAt(3);
- if (access.bounds_check == kNoBoundsCheck) {
- DCHECK_EQ(access.machine_type, output_type);
- node->set_op(op);
- node->ReplaceInput(1, index);
- node->ReplaceInput(2, effect);
- node->ReplaceInput(3, graph()->start());
- } else {
- DCHECK_EQ(kTypedArrayBoundsCheck, access.bounds_check);
-
- Node* base = node->InputAt(0);
- Node* length = node->InputAt(2);
- Node* check = graph()->NewNode(machine()->Uint32LessThan(), key, length);
-
- IntPtrMatcher mbase(base);
- if (mbase.HasValue() && (output_type & kRepTagged) == 0) {
- Node* select = graph()->NewNode(
- common()->Select(kMachIntPtr, BranchHint::kTrue), check, index,
- jsgraph()->IntPtrConstant(AddressForOutOfBoundsLoad(output_type) -
- mbase.Value()));
-
- node->set_op(op);
- node->ReplaceInput(1, select);
- node->ReplaceInput(2, effect);
- node->ReplaceInput(3, graph()->start());
- } else {
- Diamond d(graph(), common(), check, BranchHint::kTrue);
-
- Node* load = graph()->NewNode(op, base, index, effect, d.if_true);
- Node* result = load;
- if (output_type & kRepTagged) {
- // TODO(turbofan): This is ugly as hell!
- SimplifiedOperatorBuilder simplified(graph()->zone());
- RepresentationChanger changer(jsgraph(), &simplified,
- graph()->zone()->isolate());
- result =
- changer.GetTaggedRepresentationFor(result, access.machine_type);
- }
-
- Node* undefined;
- if (output_type & kRepTagged) {
- DCHECK_EQ(0, access.machine_type & kRepTagged);
- undefined = jsgraph()->UndefinedConstant();
- } else if (output_type & kRepFloat32) {
- undefined =
- jsgraph()->Float32Constant(std::numeric_limits<float>::quiet_NaN());
- } else if (output_type & kRepFloat64) {
- undefined = jsgraph()->Float64Constant(
- std::numeric_limits<double>::quiet_NaN());
- } else {
- undefined = jsgraph()->Int32Constant(0);
- }
-
- // Replace effect uses of node with the effect phi.
- NodeProperties::ReplaceWithValue(node, node, d.EffectPhi(load, effect));
-
- d.OverwriteWithPhi(node, output_type, result, undefined);
- }
- }
+ node->set_op(machine()->Load(access.machine_type));
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
}
void SimplifiedLowering::DoStoreElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
- const Operator* op = machine()->Store(StoreRepresentation(
+ node->set_op(machine()->Store(StoreRepresentation(
access.machine_type,
ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
- access.type)));
- Node* key = node->InputAt(1);
- Node* index = ComputeIndex(access, key);
- if (access.bounds_check == kNoBoundsCheck) {
- node->set_op(op);
- node->ReplaceInput(1, index);
- node->RemoveInput(2);
- } else {
- DCHECK_EQ(kTypedArrayBoundsCheck, access.bounds_check);
-
- Node* base = node->InputAt(0);
- Node* length = node->InputAt(2);
- Node* value = node->InputAt(3);
- Node* effect = node->InputAt(4);
- Node* control = node->InputAt(5);
- Node* check = graph()->NewNode(machine()->Uint32LessThan(), key, length);
-
- IntPtrMatcher mbase(base);
- if (mbase.HasValue()) {
- Node* select = graph()->NewNode(
- common()->Select(kMachIntPtr, BranchHint::kTrue), check, index,
- jsgraph()->IntPtrConstant(AddressForOutOfBoundsStore() -
- mbase.Value()));
-
- node->set_op(op);
- node->ReplaceInput(1, select);
- node->RemoveInput(2);
- } else {
- Diamond d(graph(), common(), check, BranchHint::kTrue);
- d.Chain(control);
- Node* store = graph()->NewNode(op, base, index, value, effect, d.if_true);
- d.OverwriteWithEffectPhi(node, store, effect);
- }
- }
+ access.type))));
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
}
namespace internal {
namespace compiler {
+// Forward declarations.
+class RepresentationChanger;
+
+
class SimplifiedLowering FINAL {
public:
explicit SimplifiedLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
void DoStoreField(Node* node);
// TODO(turbofan): The output_type can be removed once the result of the
// representation analysis is stored in the node bounds.
- void DoLoadElement(Node* node, MachineType output_type);
+ void DoLoadBuffer(Node* node, MachineType output_type,
+ RepresentationChanger* changer);
+ void DoStoreBuffer(Node* node);
+ void DoLoadElement(Node* node);
void DoStoreElement(Node* node);
void DoStringAdd(Node* node);
void DoStringEqual(Node* node);
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
- case IrOpcode::kLoadElement: {
- ElementAccess access = ElementAccessOf(node->op());
- if (access.bounds_check == kTypedArrayBoundsCheck) {
- NumberMatcher mkey(node->InputAt(1));
- NumberMatcher mlength(node->InputAt(2));
- if (mkey.HasValue() && mlength.HasValue()) {
- // Skip the typed array bounds check if key and length are constant.
- if (mkey.Value() >= 0 && mkey.Value() < mlength.Value()) {
- access.bounds_check = kNoBoundsCheck;
- node->set_op(simplified()->LoadElement(access));
- return Changed(node);
- }
- }
- }
- break;
- }
- case IrOpcode::kStoreElement: {
- ElementAccess access = ElementAccessOf(node->op());
- if (access.bounds_check == kTypedArrayBoundsCheck) {
- NumberMatcher mkey(node->InputAt(1));
- NumberMatcher mlength(node->InputAt(2));
- if (mkey.HasValue() && mlength.HasValue()) {
- // Skip the typed array bounds check if key and length are constant.
- if (mkey.Value() >= 0 && mkey.Value() < mlength.Value()) {
- access.bounds_check = kNoBoundsCheck;
- node->set_op(simplified()->StoreElement(access));
- return Changed(node);
- }
- }
- }
- break;
- }
default:
break;
}
}
+MachineType BufferAccess::machine_type() const {
+ switch (external_array_type_) {
+ case kExternalUint8Array:
+ return kMachUint8;
+ case kExternalInt8Array:
+ return kMachInt8;
+ case kExternalUint16Array:
+ return kMachUint16;
+ case kExternalInt16Array:
+ return kMachInt16;
+ case kExternalUint32Array:
+ return kMachUint32;
+ case kExternalInt32Array:
+ return kMachInt32;
+ case kExternalFloat32Array:
+ return kMachFloat32;
+ case kExternalFloat64Array:
+ return kMachFloat64;
+ case kExternalUint8ClampedArray:
+ break;
+ }
+ UNREACHABLE();
+ return kMachNone;
+}
+
+
+bool operator==(BufferAccess lhs, BufferAccess rhs) {
+ return lhs.external_array_type() == rhs.external_array_type();
+}
+
+
+bool operator!=(BufferAccess lhs, BufferAccess rhs) { return !(lhs == rhs); }
+
+
+size_t hash_value(BufferAccess access) {
+ return base::hash<ExternalArrayType>()(access.external_array_type());
+}
+
+
+std::ostream& operator<<(std::ostream& os, BufferAccess access) {
+ switch (access.external_array_type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return os << #Type;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+BufferAccess const BufferAccessOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kLoadBuffer ||
+ op->opcode() == IrOpcode::kStoreBuffer);
+ return OpParameter<BufferAccess>(op);
+}
+
+
bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
lhs.machine_type == rhs.machine_type;
}
-std::ostream& operator<<(std::ostream& os, BoundsCheckMode bounds_check_mode) {
- switch (bounds_check_mode) {
- case kNoBoundsCheck:
- return os << "no bounds check";
- case kTypedArrayBoundsCheck:
- return os << "ignore out of bounds";
- }
- UNREACHABLE();
- return os;
-}
-
-
bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
return lhs.base_is_tagged == rhs.base_is_tagged &&
lhs.header_size == rhs.header_size &&
std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", ";
access.type->PrintTo(os);
- os << ", " << access.machine_type << ", " << access.bounds_check;
+ os << ", " << access.machine_type;
return os;
}
Name##Operator k##Name;
PURE_OP_LIST(PURE)
#undef PURE
+
+#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
+ struct LoadBuffer##Type##Operator FINAL : public Operator1<BufferAccess> { \
+ LoadBuffer##Type##Operator() \
+ : Operator1<BufferAccess>(IrOpcode::kLoadBuffer, \
+ Operator::kNoThrow | Operator::kNoWrite, \
+ "LoadBuffer", 3, 1, 1, 1, 1, 0, \
+ BufferAccess(kExternal##Type##Array)) {} \
+ }; \
+ struct StoreBuffer##Type##Operator FINAL : public Operator1<BufferAccess> { \
+ StoreBuffer##Type##Operator() \
+ : Operator1<BufferAccess>(IrOpcode::kStoreBuffer, \
+ Operator::kNoRead | Operator::kNoThrow, \
+ "StoreBuffer", 4, 1, 1, 0, 1, 0, \
+ BufferAccess(kExternal##Type##Array)) {} \
+ }; \
+ LoadBuffer##Type##Operator kLoadBuffer##Type; \
+ StoreBuffer##Type##Operator kStoreBuffer##Type;
+ TYPED_ARRAYS(BUFFER_ACCESS)
+#undef BUFFER_ACCESS
};
}
+const Operator* SimplifiedOperatorBuilder::LoadBuffer(BufferAccess access) {
+ switch (access.external_array_type()) {
+#define LOAD_BUFFER(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return &cache_.kLoadBuffer##Type;
+ TYPED_ARRAYS(LOAD_BUFFER)
+#undef LOAD_BUFFER
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+const Operator* SimplifiedOperatorBuilder::StoreBuffer(BufferAccess access) {
+ switch (access.external_array_type()) {
+#define STORE_BUFFER(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return &cache_.kStoreBuffer##Type;
+ TYPED_ARRAYS(STORE_BUFFER)
+#undef STORE_BUFFER
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
#define ACCESS_OP_LIST(V) \
V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
- V(LoadElement, ElementAccess, Operator::kNoWrite, 3, 0, 1) \
- V(StoreElement, ElementAccess, Operator::kNoRead, 4, 1, 0)
+ V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
+ V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
std::ostream& operator<<(std::ostream&, BaseTaggedness);
+// An access descriptor for loads/stores of array buffers.
+class BufferAccess FINAL {
+ public:
+ explicit BufferAccess(ExternalArrayType external_array_type)
+ : external_array_type_(external_array_type) {}
+
+ ExternalArrayType external_array_type() const { return external_array_type_; }
+ MachineType machine_type() const;
+
+ private:
+ ExternalArrayType const external_array_type_;
+};
+
+bool operator==(BufferAccess, BufferAccess);
+bool operator!=(BufferAccess, BufferAccess);
+
+size_t hash_value(BufferAccess);
+
+std::ostream& operator<<(std::ostream&, BufferAccess);
+
+BufferAccess const BufferAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+
+
// An access descriptor for loads/stores of fixed structures like field
// accesses of heap objects. Accesses from either tagged or untagged base
// pointers are supported; untagging is done automatically during lowering.
std::ostream& operator<<(std::ostream&, FieldAccess const&);
-
-// The bound checking mode for ElementAccess below.
-enum BoundsCheckMode { kNoBoundsCheck, kTypedArrayBoundsCheck };
-
-std::ostream& operator<<(std::ostream&, BoundsCheckMode);
+FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
// An access descriptor for loads/stores of indexed structures like characters
// untagged base pointers are supported; untagging is done automatically during
// lowering.
struct ElementAccess {
- BoundsCheckMode bounds_check; // specifies the bounds checking mode.
BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged.
int header_size; // size of the header, without tag.
Type* type; // type of the element.
std::ostream& operator<<(std::ostream&, ElementAccess const&);
-
-// If the accessed object is not a heap object, add this to the header_size.
-static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
-
-
-const FieldAccess& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
-const ElementAccess& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
// Interface for building simplified operators, which represent the
const Operator* ObjectIsSmi();
const Operator* ObjectIsNonNegativeSmi();
- const Operator* LoadField(const FieldAccess&);
- const Operator* StoreField(const FieldAccess&);
+ const Operator* LoadField(FieldAccess const&);
+ const Operator* StoreField(FieldAccess const&);
+
+ // load-buffer buffer, offset, length
+ const Operator* LoadBuffer(BufferAccess);
+
+ // store-buffer buffer, offset, length, value
+ const Operator* StoreBuffer(BufferAccess);
// load-element [base + index], length
const Operator* LoadElement(ElementAccess const&);
integer = Type::Range(minusinfinity, infinity, zone);
weakint = Type::Union(integer, nan_or_minuszero, zone);
+ signed8_ = Type::Range(f->NewNumber(kMinInt8), f->NewNumber(kMaxInt8), zone);
+ unsigned8_ = Type::Range(zero, f->NewNumber(kMaxUInt8), zone);
+ signed16_ =
+ Type::Range(f->NewNumber(kMinInt16), f->NewNumber(kMaxInt16), zone);
+ unsigned16_ = Type::Range(zero, f->NewNumber(kMaxUInt16), zone);
+
number_fun0_ = Type::Function(number, zone);
number_fun1_ = Type::Function(number, number, zone);
number_fun2_ = Type::Function(number, number, number, zone);
random_fun_ = Type::Function(Type::Union(
Type::UnsignedSmall(), Type::OtherNumber(), zone), zone);
- Type* int8 = Type::Intersect(
- Type::Range(f->NewNumber(-0x7F), f->NewNumber(0x7F-1), zone),
- Type::UntaggedInt8(), zone);
- Type* int16 = Type::Intersect(
- Type::Range(f->NewNumber(-0x7FFF), f->NewNumber(0x7FFF-1), zone),
- Type::UntaggedInt16(), zone);
- Type* uint8 = Type::Intersect(
- Type::Range(zero, f->NewNumber(0xFF-1), zone),
- Type::UntaggedInt8(), zone);
- Type* uint16 = Type::Intersect(
- Type::Range(zero, f->NewNumber(0xFFFF-1), zone),
- Type::UntaggedInt16(), zone);
-
-#define NATIVE_TYPE(sem, rep) \
- Type::Intersect(Type::sem(), Type::rep(), zone)
- Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32);
- Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32);
- Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32);
- Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64);
+#define NATIVE_TYPE(sem, rep) Type::Intersect(sem, rep, zone)
+ Type* int8 = NATIVE_TYPE(signed8_, Type::UntaggedInt8());
+ Type* uint8 = NATIVE_TYPE(unsigned8_, Type::UntaggedInt8());
+ Type* int16 = NATIVE_TYPE(signed16_, Type::UntaggedInt16());
+ Type* uint16 = NATIVE_TYPE(unsigned16_, Type::UntaggedInt16());
+ Type* int32 = NATIVE_TYPE(Type::Signed32(), Type::UntaggedInt32());
+ Type* uint32 = NATIVE_TYPE(Type::Unsigned32(), Type::UntaggedInt32());
+ Type* float32 = NATIVE_TYPE(Type::Number(), Type::UntaggedFloat32());
+ Type* float64 = NATIVE_TYPE(Type::Number(), Type::UntaggedFloat64());
#undef NATIVE_TYPE
Type* buffer = Type::Buffer(zone);
}
+Bounds Typer::Visitor::TypeLoadBuffer(Node* node) {
+ switch (BufferAccessOf(node->op()).external_array_type()) {
+ case kExternalInt8Array:
+ return Bounds(typer_->signed8_);
+ case kExternalUint8Array:
+ return Bounds(typer_->unsigned8_);
+ case kExternalInt16Array:
+ return Bounds(typer_->signed16_);
+ case kExternalUint16Array:
+ return Bounds(typer_->unsigned16_);
+ case kExternalInt32Array:
+ return Bounds(Type::Signed32());
+ case kExternalUint32Array:
+ return Bounds(Type::Unsigned32());
+ case kExternalFloat32Array:
+ case kExternalFloat64Array:
+ return Bounds(Type::Number());
+ case kExternalUint8ClampedArray:
+ break;
+ }
+ UNREACHABLE();
+ return Bounds();
+}
+
+
Bounds Typer::Visitor::TypeLoadElement(Node* node) {
return Bounds(ElementAccessOf(node->op()).type);
}
}
+Bounds Typer::Visitor::TypeStoreBuffer(Node* node) {
+ UNREACHABLE();
+ return Bounds();
+}
+
+
Bounds Typer::Visitor::TypeStoreElement(Node* node) {
UNREACHABLE();
return Bounds();
}
+Bounds Typer::Visitor::TypeCheckedLoad(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeCheckedStore(Node* node) {
+ UNREACHABLE();
+ return Bounds();
+}
+
+
// Heap constants.
return Type::Constant(value, zone());
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
Type* falsish;
Type* integer;
Type* weakint;
+ Type* signed8_;
+ Type* unsigned8_;
+ Type* signed16_;
+ Type* unsigned16_;
Type* number_fun0_;
Type* number_fun1_;
Type* number_fun2_;
ZoneVector<Handle<Object> > weaken_max_limits_;
DISALLOW_COPY_AND_ASSIGN(Typer);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_TYPER_H_
// CheckValueInputIs(node, 0, Type::Object());
// CheckUpperIs(node, Field(node).type));
break;
+ case IrOpcode::kLoadBuffer:
+ break;
case IrOpcode::kLoadElement:
// Object -> elementtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 1, Field(node).type));
CheckNotTyped(node);
break;
+ case IrOpcode::kStoreBuffer:
+ break;
case IrOpcode::kStoreElement:
// (Object, elementtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kLoadStackPointer:
+ case IrOpcode::kCheckedLoad:
+ case IrOpcode::kCheckedStore:
// TODO(rossberg): Check.
break;
}
return Operand(no_reg, 0);
}
- Operand MemoryOperand() {
- int first_input = 0;
+ Operand MemoryOperand(int first_input = 0) {
return MemoryOperand(&first_input);
}
};
-static bool HasImmediateInput(Instruction* instr, int index) {
+namespace {
+
+bool HasImmediateInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsImmediate();
}
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ xorl(result_, result_); }
+
+ private:
+ Register const result_;
+};
+
+
+class OutOfLineLoadFloat FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ pcmpeqd(result_, result_); }
+
+ private:
+ XMMRegister const result_;
+};
+
+} // namespace
+
+
#define ASSEMBLE_UNOP(asm_instr) \
do { \
if (instr->Output()->IsRegister()) { \
} while (0)
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmpl(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmpl(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmpl(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmpl(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmpl(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmpl(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
+ __ bind(&done); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmpl(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmpl(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ __ j(above_equal, &done, Label::kNear); \
+ if (instr->InputAt(2)->IsRegister()) { \
+ __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
+ } else { \
+ __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
+ } \
+ __ bind(&done); \
+ } while (false)
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X64OperandConverter i(this, instr);
__ RecordWrite(object, index, value, mode);
break;
}
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(movb);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(movw);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(movl);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ break;
}
}
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(base)) {
- // load [#base + %index]
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
- } else if (g.CanBeImmediate(index)) {
+ if (g.CanBeImmediate(index)) {
// load [%base + #index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else if (g.CanBeImmediate(base)) {
+ // load [#base + %index]
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
} else {
// load [%base + %index*1]
Emit(opcode | AddressingModeField::encode(kMode_MR1),
}
InstructionOperand* value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
- if (g.CanBeImmediate(base)) {
- // store [#base + %index], %|#value
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
- g.UseRegister(index), g.UseImmediate(base), value_operand);
- } else if (g.CanBeImmediate(index)) {
+ if (g.CanBeImmediate(index)) {
// store [%base + #index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(base), g.UseImmediate(index), value_operand);
+ } else if (g.CanBeImmediate(base)) {
+ // store [#base + %index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+ g.UseRegister(index), g.UseImmediate(base), value_operand);
} else {
// store [%base + %index*1], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
}
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ X64OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+ Int32Matcher mlength(length);
+ Int32BinopMatcher moffset(offset);
+ if (mlength.HasValue() && moffset.right().HasValue() &&
+ mlength.Value() > moffset.right().Value()) {
+ InstructionOperand* offset_operand = g.UseRegister(moffset.left().node());
+ InstructionOperand* length_operand =
+ g.TempImmediate(mlength.Value() - moffset.right().Value());
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ g.UseRegister(buffer), offset_operand,
+ g.UseImmediate(moffset.right().node()));
+ return;
+ }
+ }
+ InstructionOperand* offset_operand = g.UseRegister(offset);
+ InstructionOperand* length_operand =
+ g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+ Emit(opcode | AddressingModeField::encode(kMode_MR1),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ g.UseRegister(buffer), offset_operand);
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ X64OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand* value_operand =
+ g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+ if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+ Int32Matcher mlength(length);
+ Int32BinopMatcher moffset(offset);
+ if (mlength.HasValue() && moffset.right().HasValue() &&
+ mlength.Value() > moffset.right().Value()) {
+ InstructionOperand* offset_operand = g.UseRegister(moffset.left().node());
+ InstructionOperand* length_operand =
+ g.TempImmediate(mlength.Value() - moffset.right().Value());
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I), nullptr,
+ offset_operand, length_operand, value_operand, g.UseRegister(buffer),
+ offset_operand, g.UseImmediate(moffset.right().node()));
+ return;
+ }
+ }
+ InstructionOperand* offset_operand = g.UseRegister(offset);
+ InstructionOperand* length_operand =
+ g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+ Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr, offset_operand,
+ length_operand, value_operand, g.UseRegister(buffer), offset_operand);
+}
+
+
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
}
-void Assembler::mov_b(const Operand& dst, int8_t imm8) {
+void Assembler::mov_b(const Operand& dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0xC6);
emit_operand(eax, dst);
- EMIT(imm8);
+ EMIT(static_cast<int8_t>(src.x_));
}
}
-void Assembler::mov_w(const Operand& dst, int16_t imm16) {
+void Assembler::mov_w(const Operand& dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0xC7);
emit_operand(eax, dst);
- EMIT(static_cast<int8_t>(imm16 & 0xff));
- EMIT(static_cast<int8_t>(imm16 >> 8));
+ EMIT(static_cast<int8_t>(src.x_ & 0xff));
+ EMIT(static_cast<int8_t>(src.x_ >> 8));
}
void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
- void mov_b(const Operand& dst, int8_t imm8);
+ void mov_b(const Operand& dst, int8_t src) { mov_b(dst, Immediate(src)); }
+ void mov_b(const Operand& dst, const Immediate& src);
void mov_b(const Operand& dst, Register src);
void mov_w(Register dst, const Operand& src);
+ void mov_w(const Operand& dst, int16_t src) { mov_w(dst, Immediate(src)); }
+ void mov_w(const Operand& dst, const Immediate& src);
void mov_w(const Operand& dst, Register src);
- void mov_w(const Operand& dst, int16_t imm16);
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
return NewNode(simplified()->StoreField(access), object, value);
}
- Node* LoadElement(const ElementAccess& access, Node* object, Node* index,
- Node* length) {
- return NewNode(simplified()->LoadElement(access), object, index, length);
+ Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
+ return NewNode(simplified()->LoadElement(access), object, index);
}
Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
- Node* length, Node* value) {
- return NewNode(simplified()->StoreElement(access), object, index, length,
- value);
+ Node* value) {
+ return NewNode(simplified()->StoreElement(access), object, index, value);
}
protected:
TEST(RunLoadStoreFixedArrayIndex) {
SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
ElementAccess access = AccessBuilder::ForFixedArrayElement();
- Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0),
- t.Int32Constant(2));
- t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), t.Int32Constant(2),
- load);
+ Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
+ t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
t.Return(load);
t.LowerAllNodes();
Node* backing_store = t.LoadField(
AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
Node* load =
- t.LoadElement(buffer_access, backing_store, t.Int32Constant(index),
- t.Int32Constant(array_length));
+ t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
- t.Int32Constant(array_length), load);
+ load);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodes();
for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
- ElementAccess access = {kNoBoundsCheck, kUntaggedBase, offset,
- Type::Integral32(), kMachAnyTagged};
+ ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
+ kMachAnyTagged};
SimplifiedLoweringTester<Object*> t;
- Node* load = t.LoadElement(
- access, t.PointerConstant(smis), t.Int32Constant(static_cast<int>(j)),
- t.Int32Constant(static_cast<int>(arraysize(smis))));
+ Node* load = t.LoadElement(access, t.PointerConstant(smis),
+ t.Int32Constant(static_cast<int>(j)));
t.Return(load);
t.LowerAllNodes();
for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
- ElementAccess access = {kNoBoundsCheck, kUntaggedBase, offset,
- Type::Integral32(), kMachAnyTagged};
+ ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
+ kMachAnyTagged};
SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
Node* p0 = t.Parameter(0);
t.StoreElement(access, t.PointerConstant(smis),
- t.Int32Constant(static_cast<int>(j)),
- t.Int32Constant(static_cast<int>(arraysize(smis))), p0);
+ t.Int32Constant(static_cast<int>(j)), p0);
t.Return(p0);
t.LowerAllNodes();
SimplifiedLoweringTester<Object*> t;
Node* ptr = GetBaseNode(&t);
- Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index),
- t.Int32Constant(static_cast<int>(num_elements)));
- t.StoreElement(access, ptr, t.Int32Constant(to_index),
- t.Int32Constant(static_cast<int>(num_elements)), load);
+ Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
+ t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodes();
t.GenerateCode();
private:
ElementAccess GetElementAccess() {
- ElementAccess access = {
- kNoBoundsCheck, tagged ? kTaggedBase : kUntaggedBase,
- tagged ? FixedArrayBase::kHeaderSize : 0, Type::Any(), rep};
+ ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
+ tagged ? FixedArrayBase::kHeaderSize : 0,
+ Type::Any(), rep};
return access;
}
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- ElementAccess access = {kNoBoundsCheck, kTaggedBase,
- FixedArrayBase::kHeaderSize, Type::Any(),
- kMachineReps[i]};
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Any(), kMachineReps[i]};
- Node* load =
- t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
- t.jsgraph.Int32Constant(1024), t.start, t.start);
+ Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
+ t.p1, t.start, t.start);
Node* use = t.Use(load, kMachineReps[i]);
t.Return(use);
t.Lower();
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- ElementAccess access = {kNoBoundsCheck, kTaggedBase,
- FixedArrayBase::kHeaderSize, Type::Any(),
- kMachineReps[i]};
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Any(), kMachineReps[i]};
Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
- t.p1, t.jsgraph.Int32Constant(1024), val,
- t.start, t.start);
+ t.p1, val, t.start, t.start);
t.Effect(store);
t.Lower();
CHECK_EQ(IrOpcode::kStore, store->opcode());
TEST(InsertChangeForLoadElementIndex) {
// LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
// Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
- TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
- ElementAccess access = {kNoBoundsCheck, kTaggedBase,
- FixedArrayBase::kHeaderSize, Type::Any(),
+ TestingGraph t(Type::Any(), Type::Signed32());
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
kMachAnyTagged};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
- t.p1, t.p2, t.start, t.start);
+ t.p1, t.start, t.start);
t.Return(load);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
TEST(InsertChangeForStoreElementIndex) {
// StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
// Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
- TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
- ElementAccess access = {kNoBoundsCheck, kTaggedBase,
- FixedArrayBase::kHeaderSize, Type::Any(),
+ TestingGraph t(Type::Any(), Type::Signed32());
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
kMachAnyTagged};
Node* store =
- t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1, t.p2,
+ t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
t.jsgraph.TrueConstant(), t.start, t.start);
t.Effect(store);
t.Lower();
TEST(InsertChangeForLoadElement) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
- ElementAccess access = {kNoBoundsCheck, kTaggedBase,
- FixedArrayBase::kHeaderSize, Type::Any(),
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
kMachFloat64};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
- t.p1, t.p1, t.start, t.start);
+ t.p1, t.start, t.start);
t.Return(load);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
TEST(InsertChangeForStoreElement) {
// TODO(titzer): test all load/store representation change insertions.
- TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
- ElementAccess access = {kNoBoundsCheck, kTaggedBase,
- FixedArrayBase::kHeaderSize, Type::Any(),
+ TestingGraph t(Type::Any(), Type::Signed32());
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
kMachFloat64};
- Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
- t.jsgraph.Int32Constant(0), t.p2, t.p1,
- t.start, t.start);
+ Node* store =
+ t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
+ t.jsgraph.Int32Constant(0), t.p1, t.start, t.start);
t.Effect(store);
t.Lower();
namespace {
const ExternalArrayType kExternalArrayTypes[] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) kExternal##Type##Array,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-};
+ kExternalUint8Array, kExternalInt8Array, kExternalUint16Array,
+ kExternalInt16Array, kExternalUint32Array, kExternalInt32Array,
+ kExternalFloat32Array, kExternalFloat64Array};
Type* const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ int const element_size = static_cast<int>(array->element_size());
+
+ Node* key = Parameter(
+ Type::Range(factory()->NewNumber(kMinInt / element_size),
+ factory()->NewNumber(kMaxInt / element_size), zone()));
+ Node* base = HeapConstant(array);
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* node = graph()->NewNode(javascript()->LoadProperty(feedback), base,
+ key, context);
+ if (FLAG_turbo_deoptimization) {
+ node->AppendInput(zone(), UndefinedConstant());
+ }
+ node->AppendInput(zone(), effect);
+ node->AppendInput(zone(), control);
+ Reduction r = Reduce(node);
+
+ Matcher<Node*> offset_matcher =
+ element_size == 1
+ ? key
+ : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsLoadBuffer(BufferAccess(type),
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ offset_matcher,
+ IsNumberConstant(array->byte_length()->Number()), effect,
+ control));
+ }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
+ const size_t kLength = 17;
+ double backing_store[kLength];
+ Handle<JSArrayBuffer> buffer =
+ NewArrayBuffer(backing_store, sizeof(backing_store));
+ VectorSlotPair feedback(Handle<TypeFeedbackVector>::null(),
+ FeedbackVectorICSlot::Invalid());
+ TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+ Handle<JSTypedArray> array =
+ factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
- Node* key = Parameter(Type::Integral32());
+ int min = random_number_generator()->NextInt(static_cast<int>(kLength));
+ int max = random_number_generator()->NextInt(static_cast<int>(kLength));
+ if (min > max) std::swap(min, max);
+ Node* key = Parameter(Type::Range(factory()->NewNumber(min),
+ factory()->NewNumber(max), zone()));
Node* base = HeapConstant(array);
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsLoadElement(
- AccessBuilder::ForTypedArrayElement(type, true),
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- key, IsNumberConstant(array->length()->Number()), effect));
+ EXPECT_THAT(
+ r.replacement(),
+ IsLoadElement(access,
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ key, effect, control));
}
}
TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ int const element_size = static_cast<int>(array->element_size());
- Node* key = Parameter(Type::Integral32());
+ Node* key = Parameter(
+ Type::Range(factory()->NewNumber(kMinInt / element_size),
+ factory()->NewNumber(kMaxInt / element_size), zone()));
Node* base = HeapConstant(array);
Node* value =
Parameter(AccessBuilder::ForTypedArrayElement(type, true).type);
node->AppendInput(zone(), control);
Reduction r = Reduce(node);
+ Matcher<Node*> offset_matcher =
+ element_size == 1
+ ? key
+ : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStoreElement(
- AccessBuilder::ForTypedArrayElement(type, true),
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- key, IsNumberConstant(array->length()->Number()), value,
- effect, control));
+ EXPECT_THAT(
+ r.replacement(),
+ IsStoreBuffer(BufferAccess(type),
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ offset_matcher,
+ IsNumberConstant(array->byte_length()->Number()), value,
+ effect, control));
}
}
}
TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ int const element_size = static_cast<int>(array->element_size());
- Node* key = Parameter(Type::Integral32());
+ Node* key = Parameter(
+ Type::Range(factory()->NewNumber(kMinInt / element_size),
+ factory()->NewNumber(kMaxInt / element_size), zone()));
Node* base = HeapConstant(array);
Node* value = Parameter(Type::Any());
Node* context = UndefinedConstant();
node->AppendInput(zone(), control);
Reduction r = Reduce(node);
+ Matcher<Node*> offset_matcher =
+ element_size == 1
+ ? key
+ : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+
Matcher<Node*> value_matcher =
IsToNumber(value, context, effect, control);
Matcher<Node*> effect_matcher = value_matcher;
}
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStoreElement(
- AccessBuilder::ForTypedArrayElement(type, true),
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- key, IsNumberConstant(array->length()->Number()),
- value_matcher, effect_matcher, control));
+ EXPECT_THAT(
+ r.replacement(),
+ IsStoreBuffer(BufferAccess(type),
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ offset_matcher,
+ IsNumberConstant(array->byte_length()->Number()),
+ value_matcher, effect_matcher, control));
+ }
+ }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
+ const size_t kLength = 17;
+ double backing_store[kLength];
+ Handle<JSArrayBuffer> buffer =
+ NewArrayBuffer(backing_store, sizeof(backing_store));
+ TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+ TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+ Handle<JSTypedArray> array =
+ factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
+
+ int min = random_number_generator()->NextInt(static_cast<int>(kLength));
+ int max = random_number_generator()->NextInt(static_cast<int>(kLength));
+ if (min > max) std::swap(min, max);
+ Node* key = Parameter(Type::Range(factory()->NewNumber(min),
+ factory()->NewNumber(max), zone()));
+ Node* base = HeapConstant(array);
+ Node* value = Parameter(access.type);
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
+ base, key, value, context);
+ if (FLAG_turbo_deoptimization) {
+ node->AppendInput(zone(), UndefinedConstant());
+ }
+ node->AppendInput(zone(), effect);
+ node->AppendInput(zone(), control);
+ Reduction r = Reduce(node);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsStoreElement(
+ access, IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ key, value, effect, control));
}
}
}
graph()->NewNode(machine()->Word32And(), p0, Int32Constant(k)),
Int32Constant(l)));
ASSERT_TRUE(r1.Changed());
- EXPECT_THAT(r1.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
+ EXPECT_THAT(r1.replacement(),
+ (k & l) ? IsWord32And(p0, IsInt32Constant(k & l))
+ : IsInt32Constant(0));
// (K & x) & L => x & (K & L)
Reduction const r2 = Reduce(graph()->NewNode(
graph()->NewNode(machine()->Word32And(), Int32Constant(k), p0),
Int32Constant(l)));
ASSERT_TRUE(r2.Changed());
- EXPECT_THAT(r2.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
+ EXPECT_THAT(r2.replacement(),
+ (k & l) ? IsWord32And(p0, IsInt32Constant(k & l))
+ : IsInt32Constant(0));
}
}
}
}
+TEST_F(MachineOperatorReducerTest,
+ Word32ShlWithWord32SarAndInt32AddAndConstant) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int32_t, k, kInt32Values) {
+ TRACED_FORRANGE(int32_t, l, 1, 31) {
+ // (x + (K << L)) >> L << L => (x & (-1 << L)) + (K << L)
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Word32Shl(),
+ graph()->NewNode(machine()->Word32Sar(),
+ graph()->NewNode(machine()->Int32Add(), p0,
+ Int32Constant(k << l)),
+ Int32Constant(l)),
+ Int32Constant(l)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
+ IsInt32Constant(k << l)));
+ }
+ }
+}
+
+
TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
Node* p0 = Parameter(0);
TRACED_FORRANGE(int32_t, x, 1, 31) {
};
+class IsLoadBufferMatcher FINAL : public NodeMatcher {
+ public:
+ IsLoadBufferMatcher(const Matcher<BufferAccess>& access_matcher,
+ const Matcher<Node*>& buffer_matcher,
+ const Matcher<Node*>& offset_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kLoadBuffer),
+ access_matcher_(access_matcher),
+ buffer_matcher_(buffer_matcher),
+ offset_matcher_(offset_matcher),
+ length_matcher_(length_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose access (";
+ access_matcher_.DescribeTo(os);
+ *os << "), buffer (";
+ buffer_matcher_.DescribeTo(os);
+ *os << "), offset (";
+ offset_matcher_.DescribeTo(os);
+ *os << "), length (";
+ length_matcher_.DescribeTo(os);
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(BufferAccessOf(node->op()), "access",
+ access_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "buffer", buffer_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "offset", offset_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "length", length_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<BufferAccess> access_matcher_;
+ const Matcher<Node*> buffer_matcher_;
+ const Matcher<Node*> offset_matcher_;
+ const Matcher<Node*> length_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsStoreBufferMatcher FINAL : public NodeMatcher {
+ public:
+ IsStoreBufferMatcher(const Matcher<BufferAccess>& access_matcher,
+ const Matcher<Node*>& buffer_matcher,
+ const Matcher<Node*>& offset_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kStoreBuffer),
+ access_matcher_(access_matcher),
+ buffer_matcher_(buffer_matcher),
+ offset_matcher_(offset_matcher),
+ length_matcher_(length_matcher),
+ value_matcher_(value_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose access (";
+ access_matcher_.DescribeTo(os);
+ *os << "), buffer (";
+ buffer_matcher_.DescribeTo(os);
+ *os << "), offset (";
+ offset_matcher_.DescribeTo(os);
+ *os << "), length (";
+ length_matcher_.DescribeTo(os);
+ *os << "), value (";
+ value_matcher_.DescribeTo(os);
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(Node* node,
+ MatchResultListener* listener) const OVERRIDE {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(BufferAccessOf(node->op()), "access",
+ access_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "buffer", buffer_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "offset", offset_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "length", length_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<BufferAccess> access_matcher_;
+ const Matcher<Node*> buffer_matcher_;
+ const Matcher<Node*> offset_matcher_;
+ const Matcher<Node*> length_matcher_;
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
class IsLoadElementMatcher FINAL : public NodeMatcher {
public:
IsLoadElementMatcher(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& length_matcher,
- const Matcher<Node*>& effect_matcher)
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
: NodeMatcher(IrOpcode::kLoadElement),
access_matcher_(access_matcher),
base_matcher_(base_matcher),
index_matcher_(index_matcher),
- length_matcher_(length_matcher),
- effect_matcher_(effect_matcher) {}
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
virtual void DescribeTo(std::ostream* os) const OVERRIDE {
NodeMatcher::DescribeTo(os);
base_matcher_.DescribeTo(os);
*os << "), index (";
index_matcher_.DescribeTo(os);
- *os << "), length (";
- length_matcher_.DescribeTo(os);
- *os << ") and effect (";
+ *os << "), effect (";
effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
*os << ")";
}
base_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
"index", index_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
- "length", length_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener));
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
}
private:
const Matcher<ElementAccess> access_matcher_;
const Matcher<Node*> base_matcher_;
const Matcher<Node*> index_matcher_;
- const Matcher<Node*> length_matcher_;
const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
};
IsStoreElementMatcher(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& length_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
access_matcher_(access_matcher),
base_matcher_(base_matcher),
index_matcher_(index_matcher),
- length_matcher_(length_matcher),
value_matcher_(value_matcher),
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
base_matcher_.DescribeTo(os);
*os << "), index (";
index_matcher_.DescribeTo(os);
- *os << "), length (";
- length_matcher_.DescribeTo(os);
*os << "), value (";
value_matcher_.DescribeTo(os);
*os << "), effect (";
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
"index", index_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
- "length", length_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
"value", value_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
effect_matcher_, listener) &&
const Matcher<ElementAccess> access_matcher_;
const Matcher<Node*> base_matcher_;
const Matcher<Node*> index_matcher_;
- const Matcher<Node*> length_matcher_;
const Matcher<Node*> value_matcher_;
const Matcher<Node*> effect_matcher_;
const Matcher<Node*> control_matcher_;
}
+Matcher<Node*> IsLoadBuffer(const Matcher<BufferAccess>& access_matcher,
+ const Matcher<Node*>& buffer_matcher,
+ const Matcher<Node*>& offset_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsLoadBufferMatcher(access_matcher, buffer_matcher,
+ offset_matcher, length_matcher,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsStoreBuffer(const Matcher<BufferAccess>& access_matcher,
+ const Matcher<Node*>& buffer_matcher,
+ const Matcher<Node*>& offset_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsStoreBufferMatcher(
+ access_matcher, buffer_matcher, offset_matcher, length_matcher,
+ value_matcher, effect_matcher, control_matcher));
+}
+
+
Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& length_matcher,
- const Matcher<Node*>& effect_matcher) {
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsLoadElementMatcher(access_matcher, base_matcher,
- index_matcher, length_matcher,
- effect_matcher));
+ index_matcher, effect_matcher,
+ control_matcher));
}
Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& length_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsStoreElementMatcher(
- access_matcher, base_matcher, index_matcher, length_matcher,
- value_matcher, effect_matcher, control_matcher));
+ access_matcher, base_matcher, index_matcher, value_matcher,
+ effect_matcher, control_matcher));
}
namespace compiler {
// Forward declarations.
+class BufferAccess;
class CallDescriptor;
struct ElementAccess;
struct FieldAccess;
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsLoadBuffer(const Matcher<BufferAccess>& access_matcher,
+ const Matcher<Node*>& buffer_matcher,
+ const Matcher<Node*>& offset_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsStoreBuffer(const Matcher<BufferAccess>& access_matcher,
+ const Matcher<Node*>& buffer_matcher,
+ const Matcher<Node*>& offset_matcher,
+ const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& length_matcher,
+ const Matcher<Node*>& control_matcher,
const Matcher<Node*>& effect_matcher);
Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& length_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
}
}
-
-// -----------------------------------------------------------------------------
-// LoadElement
-
-
-TEST_F(SimplifiedOperatorReducerTest, LoadElementWithConstantKeyAndLength) {
- ElementAccess const access = {kTypedArrayBoundsCheck, kUntaggedBase, 0,
- Type::Any(), kMachAnyTagged};
- ElementAccess access_nocheck = access;
- access_nocheck.bounds_check = kNoBoundsCheck;
- Node* const base = Parameter(0);
- Node* const effect = graph()->start();
- {
- Node* const key = NumberConstant(-42.0);
- Node* const length = NumberConstant(100.0);
- Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect));
- ASSERT_FALSE(r.Changed());
- }
- {
- Node* const key = NumberConstant(-0.0);
- Node* const length = NumberConstant(1.0);
- Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsLoadElement(access_nocheck, base, key, length, effect));
- }
- {
- Node* const key = NumberConstant(0);
- Node* const length = NumberConstant(1);
- Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsLoadElement(access_nocheck, base, key, length, effect));
- }
- {
- Node* const key = NumberConstant(42.2);
- Node* const length = NumberConstant(128);
- Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsLoadElement(access_nocheck, base, key, length, effect));
- }
- {
- Node* const key = NumberConstant(39.2);
- Node* const length = NumberConstant(32.0);
- Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
- base, key, length, effect));
- ASSERT_FALSE(r.Changed());
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// StoreElement
-
-
-TEST_F(SimplifiedOperatorReducerTest, StoreElementWithConstantKeyAndLength) {
- ElementAccess const access = {kTypedArrayBoundsCheck, kUntaggedBase, 0,
- Type::Any(), kMachAnyTagged};
- ElementAccess access_nocheck = access;
- access_nocheck.bounds_check = kNoBoundsCheck;
- Node* const base = Parameter(0);
- Node* const value = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- {
- Node* const key = NumberConstant(-72.1);
- Node* const length = NumberConstant(0.0);
- Reduction r =
- Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
- length, value, effect, control));
- ASSERT_FALSE(r.Changed());
- }
- {
- Node* const key = NumberConstant(-0.0);
- Node* const length = NumberConstant(999);
- Reduction r =
- Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
- length, value, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStoreElement(access_nocheck, base, key, length, value, effect,
- control));
- }
- {
- Node* const key = NumberConstant(0);
- Node* const length = NumberConstant(1);
- Reduction r =
- Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
- length, value, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStoreElement(access_nocheck, base, key, length, value, effect,
- control));
- }
- {
- Node* const key = NumberConstant(42.2);
- Node* const length = NumberConstant(128);
- Reduction r =
- Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
- length, value, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStoreElement(access_nocheck, base, key, length, value, effect,
- control));
- }
- {
- Node* const key = NumberConstant(39.2);
- Node* const length = NumberConstant(32.0);
- Reduction r =
- Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
- length, value, effect, control));
- ASSERT_FALSE(r.Changed());
- }
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
::testing::ValuesIn(kPureOperators));
+// -----------------------------------------------------------------------------
+// Buffer access operators.
+
+
+namespace {
+
+const ExternalArrayType kExternalArrayTypes[] = {
+ kExternalUint8Array, kExternalInt8Array, kExternalUint16Array,
+ kExternalInt16Array, kExternalUint32Array, kExternalInt32Array,
+ kExternalFloat32Array, kExternalFloat64Array};
+
+} // namespace
+
+
+class SimplifiedBufferAccessOperatorTest
+ : public TestWithZone,
+ public ::testing::WithParamInterface<ExternalArrayType> {};
+
+
+TEST_P(SimplifiedBufferAccessOperatorTest, InstancesAreGloballyShared) {
+ BufferAccess const access(GetParam());
+ SimplifiedOperatorBuilder simplified1(zone());
+ SimplifiedOperatorBuilder simplified2(zone());
+ EXPECT_EQ(simplified1.LoadBuffer(access), simplified2.LoadBuffer(access));
+ EXPECT_EQ(simplified1.StoreBuffer(access), simplified2.StoreBuffer(access));
+}
+
+
+TEST_P(SimplifiedBufferAccessOperatorTest, LoadBuffer) {
+ SimplifiedOperatorBuilder simplified(zone());
+ BufferAccess const access(GetParam());
+ const Operator* op = simplified.LoadBuffer(access);
+
+ EXPECT_EQ(IrOpcode::kLoadBuffer, op->opcode());
+ EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(access, BufferAccessOf(op));
+
+ EXPECT_EQ(3, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(1, op->ValueOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(SimplifiedBufferAccessOperatorTest, StoreBuffer) {
+ SimplifiedOperatorBuilder simplified(zone());
+ BufferAccess const access(GetParam());
+ const Operator* op = simplified.StoreBuffer(access);
+
+ EXPECT_EQ(IrOpcode::kStoreBuffer, op->opcode());
+ EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(access, BufferAccessOf(op));
+
+ EXPECT_EQ(4, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
+
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
+ SimplifiedBufferAccessOperatorTest,
+ ::testing::ValuesIn(kExternalArrayTypes));
+
+
// -----------------------------------------------------------------------------
// Element access operators.
+
namespace {
const ElementAccess kElementAccesses[] = {
- {kNoBoundsCheck, kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
- kMachAnyTagged},
- {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
- Type::Any(), kMachInt8},
- {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
- Type::Any(), kMachInt16},
- {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
- Type::Any(), kMachInt32},
- {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
- Type::Any(), kMachUint8},
- {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
- Type::Any(), kMachUint16},
- {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
- Type::Any(), kMachUint32},
- {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt8},
- {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
- {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt16},
- {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
- {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt32},
- {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
- {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Number(), kRepFloat32},
- {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Number(), kRepFloat64},
- {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
- Type::Signed32(), kMachInt8},
- {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
- Type::Unsigned32(), kMachUint8},
- {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
- Type::Signed32(), kMachInt16},
- {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
- Type::Unsigned32(), kMachUint16},
- {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
- Type::Signed32(), kMachInt32},
- {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
- Type::Unsigned32(), kMachUint32},
- {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
- Type::Number(), kRepFloat32},
- {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
- Type::Number(), kRepFloat64}};
+ {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
+ {kUntaggedBase, 0, Type::Any(), kMachInt8},
+ {kUntaggedBase, 0, Type::Any(), kMachInt16},
+ {kUntaggedBase, 0, Type::Any(), kMachInt32},
+ {kUntaggedBase, 0, Type::Any(), kMachUint8},
+ {kUntaggedBase, 0, Type::Any(), kMachUint16},
+ {kUntaggedBase, 0, Type::Any(), kMachUint32},
+ {kUntaggedBase, 0, Type::Signed32(), kMachInt8},
+ {kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
+ {kUntaggedBase, 0, Type::Signed32(), kMachInt16},
+ {kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
+ {kUntaggedBase, 0, Type::Signed32(), kMachInt32},
+ {kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
+ {kUntaggedBase, 0, Type::Number(), kRepFloat32},
+ {kUntaggedBase, 0, Type::Number(), kRepFloat64},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+ kMachInt8},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ kMachUint8},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+ kMachInt16},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ kMachUint16},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+ kMachInt32},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ kMachUint32},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+ kRepFloat32},
+ {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+ kRepFloat64}};
} // namespace
EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
- EXPECT_EQ(3, op->ValueInputCount());
+ EXPECT_EQ(2, op->ValueInputCount());
EXPECT_EQ(1, op->EffectInputCount());
- EXPECT_EQ(0, op->ControlInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(1, op->ValueOutputCount());
EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
- EXPECT_EQ(4, op->ValueInputCount());
+ EXPECT_EQ(3, op->ValueInputCount());
EXPECT_EQ(1, op->EffectInputCount());
EXPECT_EQ(1, op->ControlInputCount());
- EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(0, op->ValueOutputCount());
EXPECT_EQ(1, op->EffectOutputCount());
Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
+base::RandomNumberGenerator* TestWithIsolate::random_number_generator() const {
+ return isolate()->random_number_generator();
+}
+
+
TestWithZone::~TestWithZone() {}
} // namespace internal
Isolate* isolate() const {
return reinterpret_cast<Isolate*>(::v8::TestWithIsolate::isolate());
}
+ base::RandomNumberGenerator* random_number_generator() const;
private:
DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);