V(MathSqrt) \
V(ModByConstI) \
V(ModByPowerOf2I) \
+ V(NullarySIMDOperation) \
+ V(UnarySIMDOperation) \
+ V(BinarySIMDOperation) \
+ V(TernarySIMDOperation) \
+ V(QuarternarySIMDOperation) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
+ V(SIMD128ToTagged) \
+ V(TaggedToSIMD128) \
V(OsrEntry) \
V(Parameter) \
V(Power) \
};
+class LNullarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LNullarySIMDOperation(BuiltinFunctionId op)
+ : op_(op) {
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kNullarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LNullarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsNullarySIMDOperation());
+ return reinterpret_cast<LNullarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(NullarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LUnarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LUnarySIMDOperation(LOperand* value, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kUnarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LUnarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsUnarySIMDOperation());
+ return reinterpret_cast<LUnarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(UnarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LBinarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBinarySIMDOperation(LOperand* left, LOperand* right, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kBinarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LBinarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsBinarySIMDOperation());
+ return reinterpret_cast<LBinarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(BinarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LTernarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LTernarySIMDOperation(LOperand* first, LOperand* second, LOperand* third,
+ BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = first;
+ inputs_[1] = second;
+ inputs_[2] = third;
+ }
+
+ LOperand* first() { return inputs_[0]; }
+ LOperand* second() { return inputs_[1]; }
+ LOperand* third() { return inputs_[2]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kTernarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LTernarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsTernarySIMDOperation());
+ return reinterpret_cast<LTernarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(TernarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LQuarternarySIMDOperation V8_FINAL
+ : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LQuarternarySIMDOperation(LOperand* x, LOperand* y, LOperand* z,
+ LOperand* w, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = x;
+ inputs_[1] = y;
+ inputs_[2] = z;
+ inputs_[3] = w;
+ }
+
+ LOperand* x() { return inputs_[0]; }
+ LOperand* y() { return inputs_[1]; }
+ LOperand* z() { return inputs_[2]; }
+ LOperand* w() { return inputs_[3]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kQuarternarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LQuarternarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsQuarternarySIMDOperation());
+ return reinterpret_cast<LQuarternarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(QuarternarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
};
+inline static bool ExternalArrayOpRequiresPreScale(
+ Representation key_representation,
+ ElementsKind kind) {
+ int shift_size = ElementsKindToShiftSize(kind);
+ return key_representation.IsSmi()
+ ? shift_size > static_cast<int>(maximal_scale_factor) + kSmiTagSize
+ : shift_size > static_cast<int>(maximal_scale_factor);
+}
+
+
inline static bool ExternalArrayOpRequiresTemp(
Representation key_representation,
ElementsKind elements_kind) {
- // Operations that require the key to be divided by two to be converted into
- // an index cannot fold the scale operation into a load and need an extra
- // temp register to do the work.
- return key_representation.IsSmi() &&
- (elements_kind == EXTERNAL_INT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
- elements_kind == UINT8_ELEMENTS ||
- elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
+ // Operations that require the key to be scaled by a factor or divided by two
+ // to be converted into an index cannot fold the scale operation into a load
+ // and need an extra temp register to do the work.
+ return ExternalArrayOpRequiresPreScale(key_representation, elements_kind) ||
+ (key_representation.IsSmi() &&
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS));
}
};
+class LSIMD128ToTagged V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LSIMD128ToTagged(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SIMD128ToTagged, "simd128-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
};
+class LTaggedToSIMD128 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LTaggedToSIMD128(LOperand* value, LOperand* temp,
+ Representation representation)
+ : representation_(representation) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Representation representation() const { return representation_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToSIMD128, "simd128-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
+ private:
+ Representation representation_;
+};
+
+
class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)