%endif
Name: crosswalk
-Version: 8.37.183.0
+Version: 8.37.186.0
Release: 0
Summary: Crosswalk is an app runtime based on Chromium
License: (BSD-3-Clause and LGPL-2.1+)
bool IsFloat32Array() const;
/**
+ * Returns true if this value is a Float32x4Array.
+ * This is an experimental feature.
+ */
+ bool IsFloat32x4Array() const;
+
+ /**
+ * Returns true if this value is a Float64x2Array.
+ * This is an experimental feature.
+ */
+ bool IsFloat64x2Array() const;
+
+ /**
+ * Returns true if this value is a Int32x4Array.
+ * This is an experimental feature.
+ */
+ bool IsInt32x4Array() const;
+
+ /**
* Returns true if this value is a Float64Array.
* This is an experimental feature.
*/
kExternalInt16Array,
kExternalUint16Array,
kExternalInt32Array,
+ kExternalInt32x4Array,
kExternalUint32Array,
kExternalFloat32Array,
+ kExternalFloat32x4Array,
+ kExternalFloat64x2Array,
kExternalFloat64Array,
kExternalUint8ClampedArray,
};
+class V8_EXPORT Float32x4Array : public TypedArray {
+ public:
+ static Local<Float32x4Array> New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Float32x4Array* Cast(Value* obj);
+
+ private:
+ Float32x4Array();
+ static void CheckCast(Value* obj);
+};
+
+
+class V8_EXPORT Float64x2Array : public TypedArray {
+ public:
+ static Local<Float64x2Array> New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Float64x2Array* Cast(Value* obj);
+
+ private:
+ Float64x2Array();
+ static void CheckCast(Value* obj);
+};
+
+
+class V8_EXPORT Int32x4Array : public TypedArray {
+ public:
+ static Local<Int32x4Array> New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Int32x4Array* Cast(Value* obj);
+
+ private:
+ Int32x4Array();
+ static void CheckCast(Value* obj);
+};
+
+
/**
* An instance of Float64Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 76;
+ static const int kContextEmbedderDataIndex = 89;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 163;
+ static const int kEmptyStringRootIndex = 175;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xbb;
+ static const int kJSObjectType = 0xc1;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
}
+Float32x4Array* Float32x4Array::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Float32x4Array*>(value);
+}
+
+
+Float64x2Array* Float64x2Array::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Float64x2Array*>(value);
+}
+
+
+Int32x4Array* Int32x4Array::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int32x4Array*>(value);
+}
+
+
Float64Array* Float64Array::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
V(Uint32Array, JSTypedArray) \
V(Int32Array, JSTypedArray) \
V(Float32Array, JSTypedArray) \
+ V(Float32x4Array, JSTypedArray) \
+ V(Float64x2Array, JSTypedArray) \
+ V(Int32x4Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \
V(String, String) \
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float32Array> ToLocalFloat32Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float32x4Array> ToLocalFloat32x4Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float64x2Array> ToLocalFloat64x2Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int32x4Array> ToLocalInt32x4Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
+bool CpuFeatures::SupportsSIMD128InCrankshaft() { return false; }
int Register::NumAllocatableRegisters() {
return r;
}
+ static int ToAllocationIndex(QwNeonRegister reg) {
+ ASSERT(reg.code() < kMaxNumRegisters);
+ return reg.code();
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumRegisters);
+ const char* const names[] = {
+ "q0",
+ "q1",
+ "q2",
+ "q3",
+ "q4",
+ "q5",
+ "q6",
+ "q7",
+ "q8",
+ "q9",
+ "q10",
+ "q11",
+ "q12",
+ "q13",
+ "q14",
+ "q15",
+ };
+ return names[index];
+ }
+
bool is_valid() const {
return (0 <= code_) && (code_ < kMaxNumRegisters);
}
typedef QwNeonRegister QuadRegister;
+typedef QwNeonRegister SIMD128Register;
// Support for the VFP registers s0 to s31 (d0 to d15).
}
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+void Deoptimizer::CopySIMD128Registers(FrameDescription* output_frame) {
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
+ int double_regs_offset = FrameDescription::simd128_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ CheckFor32DRegs(ip);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
- int src_offset = FrameDescription::double_registers_offset();
+ int src_offset = FrameDescription::simd128_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
if (i == kDoubleRegZero.code()) continue;
if (i == kScratchDoubleReg.code()) continue;
}
+double FrameDescription::GetDoubleRegister(unsigned n) const {
+ ASSERT(n < 2 * ARRAY_SIZE(simd128_registers_));
+ return simd128_registers_[n / 2].d[n % 2];
+}
+
+
+void FrameDescription::SetDoubleRegister(unsigned n, double value) {
+ ASSERT(n < 2 * ARRAY_SIZE(simd128_registers_));
+ simd128_registers_[n / 2].d[n % 2] = value;
+}
+
+
#undef __
} } // namespace v8::internal
}
+LInstruction* LChunkBuilder::DoNullarySIMDOperation(
+ HNullarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUnarySIMDOperation(
+ HUnarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBinarySIMDOperation(
+ HBinarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoTernarySIMDOperation(
+ HTernarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoQuarternarySIMDOperation(
+ HQuarternarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ bool load_128bits_without_neon = IsSIMD128ElementsKind(elements_kind);
+ LOperand* key = load_128bits_without_neon
+ ? UseRegisterOrConstant(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
if (!instr->is_typed_elements()) {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key, NULL, NULL));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
+ IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->representation().IsTagged() &&
+ (IsSIMD128ElementsKind(elements_kind))));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ result = load_128bits_without_neon
+ ? DefineAsRegister(new(zone()) LLoadKeyed(
+ backing_store, key, TempRegister(), TempRegister()))
+ : DefineAsRegister(new(zone()) LLoadKeyed(
+ backing_store, key, NULL, NULL));
+ if (load_128bits_without_neon) {
+ info()->MarkAsDeferredCalling();
+ AssignPointerMap(result);
+ }
}
if ((instr->is_external() || instr->is_fixed_typed_array()) ?
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new(zone()) LStoreKeyed(object, key, val, NULL, NULL);
}
ASSERT(
(instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsTagged() &&
+ IsSIMD128ElementsKind(instr->elements_kind())));
ASSERT((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
instr->elements()->representation().IsExternal()));
LOperand* val = UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ bool store_128bits_without_neon =
+ IsSIMD128ElementsKind(instr->elements_kind());
+ LOperand* key = store_128bits_without_neon
+ ? UseRegisterOrConstant(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LStoreKeyed* result =
+ new(zone()) LStoreKeyed(backing_store, key, val,
+ store_128bits_without_neon ? TempRegister() : NULL,
+ store_128bits_without_neon ? TempRegister() : NULL);
+ return store_128bits_without_neon ? AssignEnvironment(result) : result;
}
};
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key,
+ LOperand* temp, LOperand* temp2) {
inputs_[0] = elements;
inputs_[1] = key;
+ temps_[0] = temp;
+ temps_[1] = temp2;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
};
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* temp, LOperand* temp2) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
}
bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
}
+void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
+ Runtime::FunctionId id) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ mov(reg, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ StoreToSafepointRegisterSlot(r0, reg);
+}
+
+
+template<class T>
+void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
+ class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
+ public:
+ DeferredSIMD128ToTagged(LCodeGen* codegen, LInstruction* instr,
+ Runtime::FunctionId id)
+ : LDeferredCode(codegen), instr_(instr), id_(id) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LInstruction* instr_;
+ Runtime::FunctionId id_;
+ };
+
+ // Allocate a SIMD128 object on the heap.
+ Register reg = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+ Register scratch = scratch0();
+
+ DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
+ this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
+ __ jmp(deferred->entry());
+ __ bind(deferred->exit());
+
+ // Copy the SIMD128 value from the external array to the heap object.
+ STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int base_offset = instr->base_offset();
+ Operand operand = key_is_constant
+ ? Operand(constant_key << element_size_shift)
+ : Operand(key, LSL, shift_size);
+
+ __ add(scratch, external_pointer, operand);
+
+ // Load the inner FixedTypedArray.
+ __ ldr(temp2, MemOperand(reg, T::kValueOffset));
+
+ for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
+ __ ldr(temp, MemOperand(scratch, base_offset + offset));
+ __ str(
+ temp,
+ MemOperand(
+ temp2,
+ FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
+ }
+
+ // Now that we have finished with the object's real address tag it
+ __ add(reg, reg, Operand(kHeapObjectTag));
+}
+
+
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), base_offset);
}
+ } else if (IsFloat32x4ElementsKind(elements_kind)) {
+ DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
+ } else if (IsFloat64x2ElementsKind(elements_kind)) {
+ DoLoadKeyedSIMD128ExternalArray<Float64x2>(instr);
+ } else if (IsInt32x4ElementsKind(elements_kind)) {
+ DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
}
+template<class T>
+void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
+ ASSERT(instr->value()->IsRegister());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+ Register input_reg = ToRegister(instr->value());
+ __ SmiTst(input_reg);
+ DeoptimizeIf(eq, instr->environment());
+ __ CompareObjectType(input_reg, temp, no_reg, T::kInstanceType);
+ DeoptimizeIf(ne, instr->environment());
+
+ STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int base_offset = instr->base_offset();
+ Register address = scratch0();
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ add(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ add(address, external_pointer, Operand(key, LSL, shift_size));
+ }
+
+ // Load the inner FixedTypedArray.
+ __ ldr(temp2, MemOperand(input_reg, T::kValueOffset - kHeapObjectTag));
+
+ for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
+ __ ldr(temp, MemOperand(temp2,
+ FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
+ __ str(temp, MemOperand(address, base_offset + offset));
+ }
+}
+
+
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
} else { // Storing doubles, not floats.
__ vstr(value, address, base_offset);
}
+ } else if (IsFloat32x4ElementsKind(elements_kind)) {
+ DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
+ } else if (IsFloat64x2ElementsKind(elements_kind)) {
+ DoStoreKeyedSIMD128ExternalArray<Float64x2>(instr);
+ } else if (IsInt32x4ElementsKind(elements_kind)) {
+ DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
} else {
Register value(ToRegister(instr->value()));
MemOperand mem_operand = PrepareKeyedOperand(
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
Register result,
Register object,
Register index);
+ void DoDeferredSIMD128ToTagged(LInstruction* instr, Runtime::FunctionId id);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ template<class T>
+ void DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ template<class T>
+ void DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
}
+// Allocates a simd128 object or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateSIMDHeapObject(int size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register map,
+ Label* gc_required,
+ TaggingMode tagging_mode) {
+ UNREACHABLE(); // NOTIMPLEMENTED
+}
+
+
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
Register scratch2,
Register heap_number_map,
Label* gc_required);
+ void AllocateSIMDHeapObject(int size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register map,
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst,
}
static inline bool SupportsCrankshaft();
+ static inline bool SupportsSIMD128InCrankshaft();
static inline unsigned cache_line_size() {
ASSERT(cache_line_size_ != 0);
Handle<Map>* external_map);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
+ void InstallExperimentalSIMDBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
native_context()->set_##type##_array_fun(*fun); \
native_context()->set_##type##_array_external_map(*external_map); \
}
- TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
+ BUILTIN_TYPED_ARRAY(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
Handle<JSFunction> data_view_fun =
iterator_result_map->instance_size());
native_context()->set_iterator_result_map(*iterator_result_map);
}
+
+ if (FLAG_simd_object) {
+ // --- S I M D ---
+ Handle<String> name = factory()->InternalizeUtf8String("SIMD");
+ Handle<JSFunction> cons = factory()->NewFunction(name);
+ JSFunction::SetInstancePrototype(cons,
+ Handle<Object>(native_context()->initial_object_prototype(),
+ isolate()));
+ cons->SetInstanceClassName(*name);
+ Handle<JSObject> simd_object = factory()->NewJSObject(cons, TENURED);
+ ASSERT(simd_object->IsJSObject());
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ global, name, simd_object, DONT_ENUM).Check();
+ native_context()->set_simd_object(*simd_object);
+ // --- f l o a t 3 2 x 4 ---
+ Handle<JSFunction> float32x4_fun =
+ InstallFunction(simd_object, "float32x4", FLOAT32x4_TYPE,
+ Float32x4::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal);
+ native_context()->set_float32x4_function(*float32x4_fun);
+
+ // --- f l o a t 6 4 x 2 ---
+ Handle<JSFunction> float64x2_fun =
+ InstallFunction(simd_object, "float64x2", FLOAT64x2_TYPE,
+ Float64x2::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal);
+ native_context()->set_float64x2_function(*float64x2_fun);
+
+ // --- i n t 3 2 x 4 ---
+ Handle<JSFunction> int32x4_fun =
+ InstallFunction(simd_object, "int32x4", INT32x4_TYPE,
+ Int32x4::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal);
+ native_context()->set_int32x4_function(*int32x4_fun);
+
+ // --- F l o a t 3 2 x 4 A r r a y---
+ Handle<JSFunction> fun;
+ Handle<Map> external_map;
+ InstallTypedArray(
+ "Float32x4Array", FLOAT32x4_ELEMENTS, &fun, &external_map);
+ native_context()->set_float32x4_array_fun(*fun);
+ native_context()->set_float32x4_array_external_map(*external_map);
+
+ // --- F l o a t 6 4 x 2 A r r a y---
+ InstallTypedArray(
+ "Float64x2Array", FLOAT64x2_ELEMENTS, &fun, &external_map);
+ native_context()->set_float64x2_array_fun(*fun);
+ native_context()->set_float64x2_array_external_map(*external_map);
+
+ // --- I n t 3 2 x 4 A r r a y---
+ InstallTypedArray(
+ "Int32x4Array", INT32x4_ELEMENTS, &fun, &external_map);
+ native_context()->set_int32x4_array_fun(*fun);
+ native_context()->set_int32x4_array_external_map(*external_map);
+ }
}
INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
INSTALL_EXPERIMENTAL_NATIVE(i, arrays, "harmony-array.js")
INSTALL_EXPERIMENTAL_NATIVE(i, maths, "harmony-math.js")
+ if (FLAG_simd_object &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native simd128.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ // Store the map for the float32x4, float64x2 and int32x4 function
+ // prototype after the float32x4 and int32x4 function has been set up.
+ InstallExperimentalSIMDBuiltinFunctionIds();
+ JSObject* float32x4_function_prototype = JSObject::cast(
+ native_context()->float32x4_function()->instance_prototype());
+ native_context()->set_float32x4_function_prototype_map(
+ float32x4_function_prototype->map());
+ JSObject* float64x2_function_prototype = JSObject::cast(
+ native_context()->float64x2_function()->instance_prototype());
+ native_context()->set_float64x2_function_prototype_map(
+ float64x2_function_prototype->map());
+ JSObject* int32x4_function_prototype = JSObject::cast(
+ native_context()->int32x4_function()->instance_prototype());
+ native_context()->set_int32x4_function_prototype_map(
+ int32x4_function_prototype->map());
+ }
}
InstallExperimentalNativeFunctions();
}
+static Handle<JSObject> ResolveBuiltinSIMDIdHolder(
+ Handle<Context> native_context,
+ const char* holder_expr) {
+ Isolate* isolate = native_context->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<GlobalObject> global(native_context->global_object());
+ Handle<Object> holder = global;
+ char* name = const_cast<char*>(holder_expr);
+ char* period_pos = strchr(name, '.');
+ while (period_pos != NULL) {
+ Vector<const char> property(name,
+ static_cast<int>(period_pos - name));
+ Handle<String> property_string = factory->InternalizeUtf8String(property);
+ ASSERT(!property_string.is_null());
+ holder = Object::GetProperty(holder, property_string).ToHandleChecked();
+ if (strcmp(".prototype", period_pos) == 0) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(holder);
+ return Handle<JSObject>(JSObject::cast(function->prototype()));
+ } else {
+ name = period_pos + 1;
+ period_pos = strchr(name, '.');
+ }
+ }
+
+ return Handle<JSObject>::cast(Object::GetPropertyOrElement(
+ holder, factory->InternalizeUtf8String(name)).ToHandleChecked());
+}
+
+
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
}
+void Genesis::InstallExperimentalSIMDBuiltinFunctionIds() {
+ HandleScope scope(isolate());
+#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
+ { \
+ Handle<JSObject> holder = ResolveBuiltinSIMDIdHolder( \
+ native_context(), #holder_expr); \
+ BuiltinFunctionId id = k##name; \
+ InstallBuiltinFunctionId(holder, #fun_name, id); \
+ }
+ SIMD_ARRAY_OPERATIONS(INSTALL_BUILTIN_ID)
+#define INSTALL_SIMD_NULLARY_FUNCTION_ID(p1, p2, p3, p4) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_NULLARY_OPERATIONS(INSTALL_SIMD_NULLARY_FUNCTION_ID)
+#undef INSTALL_SIMD_NULLARY_FUNCTION_ID
+#define INSTALL_SIMD_UNARY_FUNCTION_ID(p1, p2, p3, p4, p5) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_UNARY_OPERATIONS(INSTALL_SIMD_UNARY_FUNCTION_ID)
+#undef INSTALL_SIMD_UNARY_FUNCTION_ID
+#define INSTALL_SIMD_BINARY_FUNCTION_ID(p1, p2, p3, p4, p5, p6) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_BINARY_OPERATIONS(INSTALL_SIMD_BINARY_FUNCTION_ID)
+#undef INSTALL_SIMD_BINARY_FUNCTION_ID
+#define INSTALL_SIMD_TERNARY_FUNCTION_ID(p1, p2, p3, p4, p5, p6, p7) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_TERNARY_OPERATIONS(INSTALL_SIMD_TERNARY_FUNCTION_ID)
+#undef INSTALL_SIMD_TERNARY_FUNCTION_ID
+#define INSTALL_SIMD_QUARTERNARY_FUNCTION_ID(p1, p2, p3, p4, p5, p6, p7, p8) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_QUARTERNARY_OPERATIONS(INSTALL_SIMD_QUARTERNARY_FUNCTION_ID)
+#undef INSTALL_SIMD_QUARTERNARY_FUNCTION_ID
+#undef INSTALL_BUILTIN_ID
+}
+
+
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
+ V(FLOAT32x4_FUNCTION_INDEX, JSFunction, float32x4_function) \
+ V(FLOAT32x4_FUNCTION_PROTOTYPE_MAP_INDEX, Map, \
+ float32x4_function_prototype_map) \
+ V(FLOAT64x2_FUNCTION_INDEX, JSFunction, float64x2_function) \
+ V(FLOAT64x2_FUNCTION_PROTOTYPE_MAP_INDEX, Map, \
+ float64x2_function_prototype_map) \
+ V(INT32x4_FUNCTION_INDEX, JSFunction, int32x4_function) \
+ V(INT32x4_FUNCTION_PROTOTYPE_MAP_INDEX, Map, \
+ int32x4_function_prototype_map) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
+ V(SIMD_OBJECT_INDEX, JSObject, simd_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
+ V(FLOAT32x4_ARRAY_FUN_INDEX, JSFunction, float32x4_array_fun) \
+ V(FLOAT64x2_ARRAY_FUN_INDEX, JSFunction, float64x2_array_fun) \
+ V(INT32x4_ARRAY_FUN_INDEX, JSFunction, int32x4_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
V(INT8_ARRAY_EXTERNAL_MAP_INDEX, Map, int8_array_external_map) \
V(UINT8_ARRAY_EXTERNAL_MAP_INDEX, Map, uint8_array_external_map) \
V(UINT32_ARRAY_EXTERNAL_MAP_INDEX, Map, uint32_array_external_map) \
V(FLOAT32_ARRAY_EXTERNAL_MAP_INDEX, Map, float32_array_external_map) \
V(FLOAT64_ARRAY_EXTERNAL_MAP_INDEX, Map, float64_array_external_map) \
+ V(FLOAT32x4_ARRAY_EXTERNAL_MAP_INDEX, Map, float32x4_array_external_map) \
+ V(FLOAT64x2_ARRAY_EXTERNAL_MAP_INDEX, Map, float64x2_array_external_map) \
+ V(INT32x4_ARRAY_EXTERNAL_MAP_INDEX, Map, int32x4_array_external_map) \
V(UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX, Map, \
uint8_clamped_array_external_map) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
INITIAL_ARRAY_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX,
+ FLOAT32x4_FUNCTION_INDEX,
+ FLOAT32x4_FUNCTION_PROTOTYPE_MAP_INDEX,
+ FLOAT64x2_FUNCTION_INDEX,
+ FLOAT64x2_FUNCTION_PROTOTYPE_MAP_INDEX,
+ INT32x4_FUNCTION_INDEX,
+ INT32x4_FUNCTION_PROTOTYPE_MAP_INDEX,
STRING_FUNCTION_INDEX,
STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
SYMBOL_FUNCTION_INDEX,
JS_ARRAY_MAPS_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
+ SIMD_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
CREATE_DATE_FUN_INDEX,
TO_NUMBER_FUN_INDEX,
UINT32_ARRAY_FUN_INDEX,
INT32_ARRAY_FUN_INDEX,
FLOAT32_ARRAY_FUN_INDEX,
+ FLOAT32x4_ARRAY_FUN_INDEX,
+ FLOAT64x2_ARRAY_FUN_INDEX,
+ INT32x4_ARRAY_FUN_INDEX,
FLOAT64_ARRAY_FUN_INDEX,
UINT8_CLAMPED_ARRAY_FUN_INDEX,
INT8_ARRAY_EXTERNAL_MAP_INDEX,
INT32_ARRAY_EXTERNAL_MAP_INDEX,
UINT32_ARRAY_EXTERNAL_MAP_INDEX,
FLOAT32_ARRAY_EXTERNAL_MAP_INDEX,
+ FLOAT32x4_ARRAY_EXTERNAL_MAP_INDEX,
+ FLOAT64x2_ARRAY_EXTERNAL_MAP_INDEX,
+ INT32x4_ARRAY_EXTERNAL_MAP_INDEX,
FLOAT64_ARRAY_EXTERNAL_MAP_INDEX,
UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX,
DATA_VIEW_FUN_INDEX,
static void Int32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Uint32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Float32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Float32x4Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Float64x2Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Int32x4Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Float64Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Uint8ClampedArray(
const v8::FunctionCallbackInfo<v8::Value>& args);
}
// Copy the double registers from the input into the output frame.
- CopyDoubleRegisters(output_frame);
+ CopySIMD128Registers(output_frame);
// Fill registers containing handler and number of parameters.
SetPlatformCompiledStubRegisters(output_frame, descriptor);
Memory::Object_at(d.destination()) = *num;
}
+ // Materialize all float32x4 before looking at arguments because when the
+ // output frames are used to materialize arguments objects later on they need
+ // to already contain valid float32x4 values.
+ for (int i = 0; i < deferred_float32x4s_.length(); i++) {
+ SIMD128MaterializationDescriptor<Address> d = deferred_float32x4s_[i];
+ float32x4_value_t x4 = d.value().f4;
+ Handle<Object> float32x4 = isolate_->factory()->NewFloat32x4(x4);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new float32x4 %p "
+ "[float32x4(%e, %e, %e, %e)] in slot %p\n",
+ reinterpret_cast<void*>(*float32x4),
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ d.destination());
+ }
+ Memory::Object_at(d.destination()) = *float32x4;
+ }
+
+ // Materialize all float64x2 before looking at arguments because when the
+ // output frames are used to materialize arguments objects later on they need
+ // to already contain valid float64x2 values.
+ for (int i = 0; i < deferred_float64x2s_.length(); i++) {
+ SIMD128MaterializationDescriptor<Address> d = deferred_float64x2s_[i];
+ float64x2_value_t x2 = d.value().d2;
+ Handle<Object> float64x2 = isolate_->factory()->NewFloat64x2(x2);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new float64x2 %p "
+ "[float64x2(%e, %e)] in slot %p\n",
+ reinterpret_cast<void*>(*float64x2),
+ x2.storage[0], x2.storage[1],
+ d.destination());
+ }
+ Memory::Object_at(d.destination()) = *float64x2;
+ }
+
+ // Materialize all int32x4 before looking at arguments because when the
+ // output frames are used to materialize arguments objects later on they need
+ // to already contain valid int32x4 values.
+ for (int i = 0; i < deferred_int32x4s_.length(); i++) {
+ SIMD128MaterializationDescriptor<Address> d = deferred_int32x4s_[i];
+ int32x4_value_t x4 = d.value().i4;
+ Handle<Object> int32x4 = isolate_->factory()->NewInt32x4(x4);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new int32x4 %p "
+ "[int32x4(%u, %u, %u, %u)] in slot %p\n",
+ reinterpret_cast<void*>(*int32x4),
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ d.destination());
+ }
+ Memory::Object_at(d.destination()) = *int32x4;
+ }
+
+
// Materialize all heap numbers required for arguments/captured objects.
for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
HeapNumberMaterializationDescriptor<int> d =
// Play it safe and clear all object double values before we continue.
deferred_objects_double_values_.Clear();
+ // Materialize all float32x4 values required for arguments/captured objects.
+ for (int i = 0; i < deferred_objects_float32x4_values_.length(); i++) {
+ SIMD128MaterializationDescriptor<int> d =
+ deferred_objects_float32x4_values_[i];
+ float32x4_value_t x4 = d.value().f4;
+ Handle<Object> float32x4 = isolate_->factory()->NewFloat32x4(x4);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new float32x4 %p "
+ "[float32x4(%e, %e, %e, %e)] for object at %d\n",
+ reinterpret_cast<void*>(*float32x4),
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ d.destination());
+ }
+ ASSERT(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), float32x4);
+ }
+
+ // Play it safe and clear all object float32x4 values before we continue.
+ deferred_objects_float32x4_values_.Clear();
+
+ // Materialize all float64x2 values required for arguments/captured objects.
+ for (int i = 0; i < deferred_objects_float64x2_values_.length(); i++) {
+ SIMD128MaterializationDescriptor<int> d =
+ deferred_objects_float64x2_values_[i];
+ float64x2_value_t x2 = d.value().d2;
+ Handle<Object> float64x2 = isolate_->factory()->NewFloat64x2(x2);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new float64x2 %p "
+ "[float64x2(%e, %e)] for object at %d\n",
+ reinterpret_cast<void*>(*float64x2),
+ x2.storage[0], x2.storage[1],
+ d.destination());
+ }
+ ASSERT(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), float64x2);
+ }
+
+ // Play it safe and clear all object float64x2 values before we continue.
+ deferred_objects_float64x2_values_.Clear();
+
+ // Materialize all int32x4 values required for arguments/captured objects.
+ for (int i = 0; i < deferred_objects_int32x4_values_.length(); i++) {
+ SIMD128MaterializationDescriptor<int> d =
+ deferred_objects_int32x4_values_[i];
+ int32x4_value_t x4 = d.value().i4;
+ Handle<Object> int32x4 = isolate_->factory()->NewInt32x4(x4);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new int32x4 %p "
+ "[int32x4(%u, %u, %u, %u)] for object at %d\n",
+ reinterpret_cast<void*>(*int32x4),
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ d.destination());
+ }
+ ASSERT(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), int32x4);
+ }
+
+ // Play it safe and clear all object int32x4 values before we continue.
+ deferred_objects_int32x4_values_.Clear();
+
// Materialize arguments/captured objects.
if (!deferred_objects_.is_empty()) {
List<Handle<Object> > materialized_objects(deferred_objects_.length());
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
+ case Translation::FLOAT32x4_REGISTER:
+ case Translation::FLOAT64x2_REGISTER:
+ case Translation::INT32x4_REGISTER:
case Translation::STACK_SLOT:
case Translation::INT32_STACK_SLOT:
case Translation::UINT32_STACK_SLOT:
case Translation::DOUBLE_STACK_SLOT:
+ case Translation::FLOAT32x4_STACK_SLOT:
+ case Translation::FLOAT64x2_STACK_SLOT:
+ case Translation::INT32x4_STACK_SLOT:
case Translation::LITERAL: {
// The value is not part of any materialized object, so we can ignore it.
iterator->Skip(Translation::NumberOfOperandsFor(opcode));
return;
}
+ case Translation::FLOAT32x4_REGISTER:
+ case Translation::FLOAT64x2_REGISTER:
+ case Translation::INT32x4_REGISTER: {
+ int input_reg = iterator->Next();
+ simd128_value_t value = input_->GetSIMD128Register(input_reg);
+ if (trace_scope_ != NULL) {
+ if (opcode == Translation::FLOAT32x4_REGISTER) {
+ float32x4_value_t x4 = value.f4;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "float32x4(%e, %e, %e, %e) ; %s\n",
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ } else if (opcode == Translation::FLOAT64x2_REGISTER) {
+ float64x2_value_t x2 = value.d2;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "float64x2(%e, %e) ; %s\n",
+ x2.storage[0], x2.storage[1],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ } else {
+ ASSERT(opcode == Translation::INT32x4_REGISTER);
+ int32x4_value_t x4 = value.i4;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "int32x4(%u, %u, %u, %u) ; %s\n",
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ }
+ }
+ AddObjectSIMD128Value(value, opcode);
+ return;
+ }
+
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
return;
}
+ case Translation::FLOAT32x4_STACK_SLOT:
+ case Translation::FLOAT64x2_STACK_SLOT:
+ case Translation::INT32x4_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ simd128_value_t value = input_->GetSIMD128FrameSlot(input_offset);
+ if (trace_scope_ != NULL) {
+ if (opcode == Translation::FLOAT32x4_STACK_SLOT) {
+ float32x4_value_t x4 = value.f4;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "float32x4(%e, %e, %e, %e) ; [sp + %d]\n",
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ input_offset);
+ } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) {
+ float64x2_value_t x2 = value.d2;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "float64x2(%e, %e) ; [sp + %d]\n",
+ x2.storage[0], x2.storage[1],
+ input_offset);
+ } else {
+ ASSERT(opcode == Translation::INT32x4_STACK_SLOT);
+ int32x4_value_t x4 = value.i4;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "int32x4(%u, %u, %u, %u) ; [sp + %d]\n",
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ input_offset);
+ }
+ }
+ AddObjectSIMD128Value(value, opcode);
+ return;
+ }
+
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
if (trace_scope_ != NULL) {
return;
}
+ case Translation::FLOAT32x4_REGISTER:
+ case Translation::FLOAT64x2_REGISTER:
+ case Translation::INT32x4_REGISTER: {
+ int input_reg = iterator->Next();
+ simd128_value_t value = input_->GetSIMD128Register(input_reg);
+ if (trace_scope_ != NULL) {
+ if (opcode == Translation::FLOAT32x4_REGISTER) {
+ float32x4_value_t x4 = value.f4;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ":"
+ " [top + %d] <- float32x4(%e, %e, %e, %e) ; %s\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ } else if (opcode == Translation::FLOAT64x2_REGISTER) {
+ float64x2_value_t x2 = value.d2;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ":"
+ " [top + %d] <- float64x2(%e, %e) ; %s\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x2.storage[0], x2.storage[1],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ } else {
+ ASSERT(opcode == Translation::INT32x4_REGISTER);
+ int32x4_value_t x4 = value.i4;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ":"
+ " [top + %d] <- int32x4(%u, %u, %u, %u) ; %s\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ }
+ }
+ // We save the untagged value on the side and store a GC-safe
+ // temporary placeholder in the frame.
+ AddSIMD128Value(output_[frame_index]->GetTop() + output_offset, value,
+ opcode);
+ output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+ return;
+ }
+
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
return;
}
+ case Translation::FLOAT32x4_STACK_SLOT:
+ case Translation::FLOAT64x2_STACK_SLOT:
+ case Translation::INT32x4_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ simd128_value_t value = input_->GetSIMD128FrameSlot(input_offset);
+ if (trace_scope_ != NULL) {
+ if (opcode == Translation::FLOAT32x4_STACK_SLOT) {
+ float32x4_value_t x4 = value.f4;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": "
+ "[top + %d] <- float32x4(%e, %e, %e, %e) ; [sp + %d]\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ input_offset);
+ } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) {
+ float64x2_value_t x2 = value.d2;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": "
+ "[top + %d] <- float64x2(%e, %e) ; [sp + %d]\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x2.storage[0], x2.storage[1],
+ input_offset);
+ } else {
+ ASSERT(opcode == Translation::INT32x4_STACK_SLOT);
+ int32x4_value_t x4 = value.i4;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": "
+ "[top + %d] <- int32x4(%u, %u, %u, %u) ; [sp + %d]\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ input_offset);
+ }
+ }
+ // We save the untagged value on the side and store a GC-safe
+ // temporary placeholder in the frame.
+ AddSIMD128Value(output_[frame_index]->GetTop() + output_offset, value,
+ opcode);
+ output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+ return;
+ }
+
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
if (trace_scope_ != NULL) {
}
+void Deoptimizer::AddObjectSIMD128Value(simd128_value_t value,
+ int translation_opcode) {
+ deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
+ SIMD128MaterializationDescriptor<int> value_desc(
+ deferred_objects_tagged_values_.length() - 1, value);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(translation_opcode);
+ if (opcode == Translation::FLOAT32x4_REGISTER ||
+ opcode == Translation::FLOAT32x4_STACK_SLOT) {
+ deferred_objects_float32x4_values_.Add(value_desc);
+ } else if (opcode == Translation::FLOAT64x2_REGISTER ||
+ opcode == Translation::FLOAT64x2_STACK_SLOT) {
+ deferred_objects_float64x2_values_.Add(value_desc);
+ } else {
+ ASSERT(opcode == Translation::INT32x4_REGISTER ||
+ opcode == Translation::INT32x4_STACK_SLOT);
+ deferred_objects_int32x4_values_.Add(value_desc);
+ }
+}
+
+
void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
HeapNumberMaterializationDescriptor<Address> value_desc(
reinterpret_cast<Address>(slot_address), value);
}
+void Deoptimizer::AddSIMD128Value(intptr_t slot_address,
+ simd128_value_t value,
+ int translation_opcode) {
+ SIMD128MaterializationDescriptor<Address> value_desc(
+ reinterpret_cast<Address>(slot_address), value);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(translation_opcode);
+ if (opcode == Translation::FLOAT32x4_REGISTER ||
+ opcode == Translation::FLOAT32x4_STACK_SLOT) {
+ deferred_float32x4s_.Add(value_desc);
+ } else if (opcode == Translation::FLOAT64x2_REGISTER ||
+ opcode == Translation::FLOAT64x2_STACK_SLOT) {
+ deferred_float64x2s_.Add(value_desc);
+ } else {
+ ASSERT(opcode == Translation::INT32x4_REGISTER ||
+ opcode == Translation::INT32x4_STACK_SLOT);
+ deferred_int32x4s_.Add(value_desc);
+ }
+}
+
+
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id) {
}
+void Translation::StoreSIMD128Register(SIMD128Register reg, Opcode opcode) {
+ buffer_->Add(opcode, zone());
+ buffer_->Add(SIMD128Register::ToAllocationIndex(reg), zone());
+}
+
+
void Translation::StoreStackSlot(int index) {
buffer_->Add(STACK_SLOT, zone());
buffer_->Add(index, zone());
}
+void Translation::StoreSIMD128StackSlot(int index, Opcode opcode) {
+ buffer_->Add(opcode, zone());
+ buffer_->Add(index, zone());
+}
+
+
void Translation::StoreLiteral(int literal_id) {
buffer_->Add(LITERAL, zone());
buffer_->Add(literal_id, zone());
case INT32_REGISTER:
case UINT32_REGISTER:
case DOUBLE_REGISTER:
+ case FLOAT32x4_REGISTER:
+ case FLOAT64x2_REGISTER:
+ case INT32x4_REGISTER:
case STACK_SLOT:
case INT32_STACK_SLOT:
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
+ case FLOAT32x4_STACK_SLOT:
+ case FLOAT64x2_STACK_SLOT:
+ case INT32x4_STACK_SLOT:
case LITERAL:
case COMPILED_STUB_FRAME:
return 1;
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
+ case Translation::FLOAT32x4_REGISTER:
+ case Translation::FLOAT64x2_REGISTER:
+ case Translation::INT32x4_REGISTER:
// We are at safepoint which corresponds to call. All registers are
// saved by caller so there would be no live registers at this
// point. Thus these translation commands should not be used.
return SlotRef(slot_addr, SlotRef::DOUBLE);
}
+ case Translation::FLOAT32x4_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::FLOAT32x4);
+ }
+
+ case Translation::FLOAT64x2_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::FLOAT64x2);
+ }
+
+ case Translation::INT32x4_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::INT32x4);
+ }
+
case Translation::LITERAL: {
int literal_index = iterator->Next();
return SlotRef(data->GetIsolate(),
return isolate->factory()->NewNumber(value);
}
+ case FLOAT32x4:
+ return isolate->factory()->NewFloat32x4(read_simd128_value(addr_).f4);
+
+ case FLOAT64x2:
+ return isolate->factory()->NewFloat64x2(read_simd128_value(addr_).d2);
+
+ case INT32x4:
+ return isolate->factory()->NewInt32x4(read_simd128_value(addr_).i4);
+
case LITERAL:
return literal_;
#endif // V8_HOST_CAN_READ_UNALIGNED
}
+static inline simd128_value_t read_simd128_value(Address p) {
+ return *reinterpret_cast<simd128_value_t*>(p);
+}
class FrameDescription;
class TranslationIterator;
};
+template<typename T>
+class SIMD128MaterializationDescriptor BASE_EMBEDDED {
+ public:
+ SIMD128MaterializationDescriptor(T destination, simd128_value_t value)
+ : destination_(destination), value_(value) { }
+
+ T destination() const { return destination_; }
+ simd128_value_t value() const { return value_; }
+
+ private:
+ T destination_;
+ simd128_value_t value_;
+};
+
+
class ObjectMaterializationDescriptor BASE_EMBEDDED {
public:
ObjectMaterializationDescriptor(
void AddObjectDuplication(intptr_t slot, int object_index);
void AddObjectTaggedValue(intptr_t value);
void AddObjectDoubleValue(double value);
+ void AddObjectSIMD128Value(simd128_value_t value, int translation_opcode);
void AddDoubleValue(intptr_t slot_address, double value);
+ void AddSIMD128Value(intptr_t slot_address, simd128_value_t value,
+ int translation_opcode);
bool ArgumentsObjectIsAdapted(int object_index) {
ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
CodeStubInterfaceDescriptor* desc);
- // Fill the given output frame's double registers with the original values
- // from the input frame's double registers.
- void CopyDoubleRegisters(FrameDescription* output_frame);
+ // Fill the given output frame's simd128 registers with the original values
+ // from the input frame's simd128 registers.
+ void CopySIMD128Registers(FrameDescription* output_frame);
// Determines whether the input frame contains alignment padding by looking
// at the dynamic alignment state slot inside the frame.
List<Object*> deferred_objects_tagged_values_;
List<HeapNumberMaterializationDescriptor<int> >
deferred_objects_double_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_float32x4_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_float64x2_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_int32x4_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_float32x4s_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_float64x2s_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_int32x4s_;
// Key for lookup of previously materialized objects
Address stack_fp_;
return read_double_value(reinterpret_cast<Address>(ptr));
}
+ simd128_value_t GetSIMD128FrameSlot(unsigned offset) {
+ intptr_t* ptr = GetFrameSlotPointer(offset);
+ return read_simd128_value(reinterpret_cast<Address>(ptr));
+ }
+
void SetFrameSlot(unsigned offset, intptr_t value) {
*GetFrameSlotPointer(offset) = value;
}
return registers_[n];
}
- double GetDoubleRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- return double_registers_[n];
+ double GetDoubleRegister(unsigned n) const;
+
+ simd128_value_t GetSIMD128Register(unsigned n) const {
+ ASSERT(n < ARRAY_SIZE(simd128_registers_));
+ return simd128_registers_[n];
}
void SetRegister(unsigned n, intptr_t value) {
registers_[n] = value;
}
- void SetDoubleRegister(unsigned n, double value) {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- double_registers_[n] = value;
+ void SetDoubleRegister(unsigned n, double value);
+
+ void SetSIMD128Register(unsigned n, simd128_value_t value) {
+ ASSERT(n < ARRAY_SIZE(simd128_registers_));
+ simd128_registers_[n] = value;
}
intptr_t GetTop() const { return top_; }
return OFFSET_OF(FrameDescription, registers_);
}
- static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, double_registers_);
+ static int simd128_registers_offset() {
+ return OFFSET_OF(FrameDescription, simd128_registers_);
}
static int frame_size_offset() {
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kMaxNumRegisters];
+ simd128_value_t simd128_registers_[SIMD128Register::kMaxNumRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
V(INT32_REGISTER) \
V(UINT32_REGISTER) \
V(DOUBLE_REGISTER) \
+ V(FLOAT32x4_REGISTER) \
+ V(FLOAT64x2_REGISTER) \
+ V(INT32x4_REGISTER) \
V(STACK_SLOT) \
V(INT32_STACK_SLOT) \
V(UINT32_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
+ V(FLOAT32x4_STACK_SLOT) \
+ V(FLOAT64x2_STACK_SLOT) \
+ V(INT32x4_STACK_SLOT) \
V(LITERAL)
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
+ void StoreSIMD128Register(SIMD128Register reg, Opcode opcode);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
+ void StoreSIMD128StackSlot(int index, Opcode opcode);
void StoreLiteral(int literal_id);
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
INT32,
UINT32,
DOUBLE,
+ FLOAT32x4,
+ FLOAT64x2,
+ INT32x4,
LITERAL,
DEFERRED_OBJECT, // Object captured by the escape analysis.
// The number of nested objects can be obtained
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
return 3;
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
+ return 4;
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
EXTERNAL_INT16_ELEMENTS,
EXTERNAL_UINT16_ELEMENTS,
EXTERNAL_INT32_ELEMENTS,
+ EXTERNAL_INT32x4_ELEMENTS,
EXTERNAL_UINT32_ELEMENTS,
EXTERNAL_FLOAT32_ELEMENTS,
+ EXTERNAL_FLOAT32x4_ELEMENTS,
EXTERNAL_FLOAT64_ELEMENTS,
+ EXTERNAL_FLOAT64x2_ELEMENTS,
EXTERNAL_UINT8_CLAMPED_ELEMENTS,
// Fixed typed arrays
INT16_ELEMENTS,
UINT32_ELEMENTS,
INT32_ELEMENTS,
+ INT32x4_ELEMENTS,
FLOAT32_ELEMENTS,
+ FLOAT32x4_ELEMENTS,
FLOAT64_ELEMENTS,
+ FLOAT64x2_ELEMENTS,
UINT8_CLAMPED_ELEMENTS,
// Derived constants from ElementsKind
}
+inline bool IsExternalFloat32x4ElementsKind(ElementsKind kind) {
+ return kind == EXTERNAL_FLOAT32x4_ELEMENTS;
+}
+
+
+inline bool IsExternalFloat64x2ElementsKind(ElementsKind kind) {
+ return kind == EXTERNAL_FLOAT64x2_ELEMENTS;
+}
+
+
+inline bool IsExternalInt32x4ElementsKind(ElementsKind kind) {
+ return kind == EXTERNAL_INT32x4_ELEMENTS;
+}
+
+
inline bool IsFixedFloatElementsKind(ElementsKind kind) {
return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS;
}
+inline bool IsFixedFloat32x4ElementsKind(ElementsKind kind) {
+ return kind == FLOAT32x4_ELEMENTS;
+}
+
+
+inline bool IsFixedFloat64x2ElementsKind(ElementsKind kind) {
+ return kind == FLOAT64x2_ELEMENTS;
+}
+
+
+inline bool IsFixedInt32x4ElementsKind(ElementsKind kind) {
+ return kind == INT32x4_ELEMENTS;
+}
+
+
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) ||
IsExternalFloatOrDoubleElementsKind(kind) ||
}
+inline bool IsFloat32x4ElementsKind(ElementsKind kind) {
+ return IsExternalFloat32x4ElementsKind(kind) ||
+ IsFixedFloat32x4ElementsKind(kind);
+}
+
+
+inline bool IsFloat64x2ElementsKind(ElementsKind kind) {
+ return IsExternalFloat64x2ElementsKind(kind) ||
+ IsFixedFloat64x2ElementsKind(kind);
+}
+
+
+inline bool IsInt32x4ElementsKind(ElementsKind kind) {
+ return IsExternalInt32x4ElementsKind(kind) ||
+ IsFixedInt32x4ElementsKind(kind);
+}
+
+
+inline bool IsSIMD128ElementsKind(ElementsKind kind) {
+ return IsFloat32x4ElementsKind(kind) || IsFloat64x2ElementsKind(kind) ||
+ IsInt32x4ElementsKind(kind);
+}
+
+
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
// - ExternalInt16ElementsAccessor
// - ExternalUint16ElementsAccessor
// - ExternalInt32ElementsAccessor
+// - ExternalInt32x4ElementsAccessor
// - ExternalUint32ElementsAccessor
// - ExternalFloat32ElementsAccessor
+// - ExternalFloat32x4ElementsAccessor
// - ExternalFloat64ElementsAccessor
+// - ExternalFloat64x2ElementsAccessor
// - ExternalUint8ClampedElementsAccessor
// - FixedUint8ElementsAccessor
// - FixedInt8ElementsAccessor
// - FixedInt16ElementsAccessor
// - FixedUint32ElementsAccessor
// - FixedInt32ElementsAccessor
+// - FixedInt32x4ElementsAccessor
// - FixedFloat32ElementsAccessor
+// - FixedFloat32x4ElementsAccessor
// - FixedFloat64ElementsAccessor
+// - FixedFloat64x2ElementsAccessor
// - FixedUint8ClampedElementsAccessor
// - DictionaryElementsAccessor
// - SloppyArgumentsElementsAccessor
EXTERNAL_UINT16_ELEMENTS, ExternalUint16Array) \
V(ExternalInt32ElementsAccessor, EXTERNAL_INT32_ELEMENTS, \
ExternalInt32Array) \
+ V(ExternalInt32x4ElementsAccessor, EXTERNAL_INT32x4_ELEMENTS, \
+ ExternalInt32x4Array) \
V(ExternalUint32ElementsAccessor, \
EXTERNAL_UINT32_ELEMENTS, ExternalUint32Array) \
V(ExternalFloat32ElementsAccessor, \
EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array) \
+ V(ExternalFloat32x4ElementsAccessor, \
+ EXTERNAL_FLOAT32x4_ELEMENTS, ExternalFloat32x4Array) \
V(ExternalFloat64ElementsAccessor, \
EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array) \
+ V(ExternalFloat64x2ElementsAccessor, \
+ EXTERNAL_FLOAT64x2_ELEMENTS, ExternalFloat64x2Array) \
V(ExternalUint8ClampedElementsAccessor, \
EXTERNAL_UINT8_CLAMPED_ELEMENTS, \
ExternalUint8ClampedArray) \
V(FixedInt16ElementsAccessor, INT16_ELEMENTS, FixedInt16Array) \
V(FixedUint32ElementsAccessor, UINT32_ELEMENTS, FixedUint32Array) \
V(FixedInt32ElementsAccessor, INT32_ELEMENTS, FixedInt32Array) \
+ V(FixedInt32x4ElementsAccessor, INT32x4_ELEMENTS, FixedInt32x4Array) \
V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
+ V(FixedFloat32x4ElementsAccessor, FLOAT32x4_ELEMENTS, \
+ FixedFloat32x4Array) \
V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
+ V(FixedFloat64x2ElementsAccessor, FLOAT64x2_ELEMENTS, \
+ FixedFloat64x2Array) \
V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
FixedUint8ClampedArray)
}
+Handle<Float32x4> Factory::NewFloat32x4(float32x4_value_t value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFloat32x4(value, pretenure), Float32x4);
+}
+
+
+Handle<Float64x2> Factory::NewFloat64x2(float64x2_value_t value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFloat64x2(value, pretenure), Float64x2);
+}
+
+
+Handle<Int32x4> Factory::NewInt32x4(int32x4_value_t value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateInt32x4(value, pretenure), Int32x4);
+}
+
+
Handle<Object> Factory::NewTypeError(const char* message,
Vector< Handle<Object> > args) {
return NewError("MakeTypeError", message, args);
Handle<HeapNumber> NewHeapNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<Float32x4> NewFloat32x4(float32x4_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<Float64x2> NewFloat64x2(float64x2_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<Int32x4> NewInt32x4(int32x4_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
// These objects are used by the api to create env-independent data
// structures in the heap.
#define FLAG FLAG_FULL
// Flags for language modes and experimental language features.
+DEFINE_bool(simd_object, false, "enable SIMD object and operations")
DEFINE_bool(use_strict, false, "enforce strict mode")
DEFINE_bool(es_staging, false, "enable upcoming ES6+ features")
// -----------------------------------------------------------------------------
// Constants
+struct float32x4_value_t { float storage[4]; };
+struct float64x2_value_t { double storage[2]; };
+struct int32x4_value_t { int32_t storage[4]; };
+union simd128_value_t {
+ double d[2];
+ float32x4_value_t f4;
+ float64x2_value_t d2;
+ int32x4_value_t i4;
+};
+
const int KB = 1024;
const int MB = KB * KB;
const int GB = KB * KB * KB;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-const int kCharSize = sizeof(char); // NOLINT
-const int kShortSize = sizeof(short); // NOLINT
-const int kIntSize = sizeof(int); // NOLINT
-const int kInt32Size = sizeof(int32_t); // NOLINT
-const int kInt64Size = sizeof(int64_t); // NOLINT
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kIntptrSize = sizeof(intptr_t); // NOLINT
-const int kPointerSize = sizeof(void*); // NOLINT
+const int kCharSize = sizeof(char); // NOLINT
+const int kShortSize = sizeof(short); // NOLINT
+const int kIntSize = sizeof(int); // NOLINT
+const int kInt32Size = sizeof(int32_t); // NOLINT
+const int kInt64Size = sizeof(int64_t); // NOLINT
+const int kDoubleSize = sizeof(double); // NOLINT
+const int kFloatSize = sizeof(float); // NOLINT
+const int kFloat32x4Size = sizeof(float32x4_value_t); // NOLINT
+const int kFloat64x2Size = sizeof(float64x2_value_t); // NOLINT
+const int kInt32x4Size = sizeof(int32x4_value_t); // NOLINT
+const int kSIMD128Size = sizeof(simd128_value_t); // NOLINT
+const int kIntptrSize = sizeof(intptr_t); // NOLINT
+const int kPointerSize = sizeof(void*); // NOLINT
const int kRegisterSize = kPointerSize;
const int kPCOnStackSize = kRegisterSize;
const int kFPOnStackSize = kRegisterSize;
}
+#define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
+ V(Float32x4, float32x4) \
+ V(Float64x2, float64x2) \
+ V(Int32x4, int32x4)
+
+
+#define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
+AllocationResult Heap::Allocate##TYPE(type##_value_t value, \
+ PretenureFlag pretenure) { \
+ STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize); \
+ \
+ AllocationSpace space = \
+ SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure); \
+ \
+ HeapObject* result; \
+ { AllocationResult allocation = \
+ AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE); \
+ if (!allocation.To(&result)) return allocation; \
+ } \
+ \
+ result->set_map_no_write_barrier( \
+ isolate()->native_context()->type##_function()->initial_map()); \
+ JSObject::cast(result)->set_properties(empty_fixed_array()); \
+ JSObject::cast(result)->set_elements(empty_fixed_array()); \
+ \
+ HeapObject* storage; \
+ int storage_size = \
+ FixedTypedArrayBase::kDataOffset + k##TYPE##Size; \
+ space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure); \
+ { AllocationResult allocation = \
+ AllocateRaw(storage_size, space, OLD_DATA_SPACE); \
+ if (!allocation.To(&storage)) return allocation; \
+ } \
+ \
+ storage->set_map( \
+ *isolate()->factory()->fixed_##type##_array_map()); \
+ FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
+ elements->set_length(static_cast<int>(1)); \
+ memset(elements->DataPtr(), 0, elements->DataSize()); \
+ Fixed##TYPE##Array::cast(storage)->set(0, value); \
+ TYPE::cast(result)->set_value(storage); \
+ return result; \
+}
+
+
+SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
+
+
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \
V(Map, external_int32_array_map, ExternalInt32ArrayMap) \
+ V(Map, external_int32x4_array_map, ExternalInt32x4ArrayMap) \
V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \
V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \
+ V(Map, external_float32x4_array_map, ExternalFloat32x4ArrayMap) \
+ V(Map, external_float64x2_array_map, ExternalFloat64x2ArrayMap) \
V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \
V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \
V(ExternalArray, empty_external_int8_array, \
V(ExternalArray, empty_external_uint16_array, \
EmptyExternalUint16Array) \
V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \
+ V(ExternalArray, empty_external_int32x4_array, EmptyExternalInt32x4Array) \
V(ExternalArray, empty_external_uint32_array, \
EmptyExternalUint32Array) \
V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \
+ V(ExternalArray, empty_external_float32x4_array, EmptyExternalFloat32x4Array)\
+ V(ExternalArray, empty_external_float64x2_array, EmptyExternalFloat64x2Array)\
V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \
V(ExternalArray, empty_external_uint8_clamped_array, \
EmptyExternalUint8ClampedArray) \
V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
+ V(Map, fixed_int32x4_array_map, FixedInt32x4ArrayMap) \
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
+ V(Map, fixed_float32x4_array_map, FixedFloat32x4ArrayMap) \
+ V(Map, fixed_float64x2_array_map, FixedFloat64x2ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float32x4_array, \
+ EmptyFixedFloat32x4Array) \
+ V(FixedTypedArrayBase, empty_fixed_float64x2_array, \
+ EmptyFixedFloat64x2Array) \
+ V(FixedTypedArrayBase, empty_fixed_int32x4_array, EmptyFixedInt32x4Array) \
V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
EmptyFixedUint8ClampedArray) \
V(null_string, "null") \
V(number_string, "number") \
V(Number_string, "Number") \
+ V(float32x4_string, "float32x4") \
+ V(float64x2_string, "float64x2") \
+ V(int32x4_string, "int32x4") \
V(nan_string, "NaN") \
V(RegExp_string, "RegExp") \
V(source_string, "source") \
V(throw_string, "throw") \
V(done_string, "done") \
V(value_string, "value") \
+ V(signMask, "signMask") \
+ V(x, "x") \
+ V(y, "y") \
+ V(z, "z") \
+ V(w, "w") \
+ V(flagX, "flagX") \
+ V(flagY, "flagY") \
+ V(flagZ, "flagZ") \
+ V(flagW, "flagW") \
+ V(simd, "SIMD") \
V(next_string, "next") \
V(byte_length_string, "byteLength") \
V(byte_offset_string, "byteOffset") \
MUST_USE_RESULT AllocationResult AllocateHeapNumber(
double value, PretenureFlag pretenure = NOT_TENURED);
+ // Allocated a Float32x4 from value.
+ MUST_USE_RESULT AllocationResult AllocateFloat32x4(
+ float32x4_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocated a Float64x2 from value.
+ MUST_USE_RESULT AllocationResult AllocateFloat64x2(
+ float64x2_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocated a Int32x4 from value.
+ MUST_USE_RESULT AllocationResult AllocateInt32x4(
+ int32x4_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// Allocate a byte array of the specified length
MUST_USE_RESULT AllocationResult AllocateByteArray(
int length,
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
case HValue::kStoreFrameContext:
- case HValue::kStoreKeyed:
case HValue::kStoreNamedField:
case HValue::kStoreNamedGeneric:
case HValue::kStringCharCodeAt:
case HValue::kTypeofIsAndBranch:
case HValue::kUnknownOSRValue:
case HValue::kUseConst:
+ case HValue::kNullarySIMDOperation:
return false;
+ case HValue::kStoreKeyed:
+ return !CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ IsSIMD128ElementsKind(HStoreKeyed::cast(this)->elements_kind());
+
case HValue::kAdd:
case HValue::kAllocateBlockContext:
case HValue::kApplyArguments:
case HValue::kTypeof:
case HValue::kUnaryMathOperation:
case HValue::kWrapReceiver:
+ case HValue::kUnarySIMDOperation:
+ case HValue::kBinarySIMDOperation:
+ case HValue::kTernarySIMDOperation:
+ case HValue::kQuarternarySIMDOperation:
return true;
}
UNREACHABLE();
type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
*block = number_type ? FirstSuccessor() : SecondSuccessor();
return true;
+ } else if (value()->representation().IsFloat32x4()) {
+ bool float32x4_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->float32x4_string());
+ *block = float32x4_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ } else if (value()->representation().IsFloat64x2()) {
+ bool float64x2_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->float64x2_string());
+ *block = float64x2_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ } else if (value()->representation().IsInt32x4()) {
+ bool int32x4_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->int32x4_string());
+ *block = int32x4_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
}
+
*block = NULL;
return false;
}
stream->Add("@%d", offset());
}
+
+HInstruction* HNullarySIMDOperation::New(
+ Zone* zone, HValue* context, BuiltinFunctionId op) {
+ return new(zone) HNullarySIMDOperation(context, op);
+}
+
+
+HInstruction* HUnarySIMDOperation::New(
+ Zone* zone, HValue* context, HValue* value, BuiltinFunctionId op,
+ Representation to) {
+ return new(zone) HUnarySIMDOperation(context, value, op, to);
+}
+
+
+HInstruction* HBinarySIMDOperation::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right,
+ BuiltinFunctionId op) {
+ return new(zone) HBinarySIMDOperation(context, left, right, op);
+}
+
+
+HInstruction* HTernarySIMDOperation::New(
+ Zone* zone, HValue* context, HValue* mask, HValue* left, HValue* right,
+ BuiltinFunctionId op) {
+ return new(zone) HTernarySIMDOperation(context, mask, left, right, op);
+}
+
+
+HInstruction* HQuarternarySIMDOperation::New(
+ Zone* zone, HValue* context, HValue* x, HValue* y, HValue* z, HValue* w,
+ BuiltinFunctionId op) {
+ return new(zone) HQuarternarySIMDOperation(context, x, y, z, w, op);
+}
+
+
+const char* HNullarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name: \
+ return #module "." #function;
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void HNullarySIMDOperation::PrintDataTo(StringStream* stream) {
+ const char* name = OpName();
+ stream->Add("%s", name);
+}
+
+
+const char* HUnarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_UNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5) \
+ case k##name: \
+ return #module "." #function;
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void HUnarySIMDOperation::PrintDataTo(StringStream* stream) {
+ const char* name = OpName();
+ stream->Add("%s ", name);
+ value()->PrintNameTo(stream);
+}
+
+
+const char* HBinarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6) \
+ case k##name: \
+ return #module "." #function;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void HBinarySIMDOperation::PrintDataTo(StringStream* stream) {
+ const char* name = OpName();
+ stream->Add("%s ", name);
+ left()->PrintNameTo(stream);
+ stream->Add(" ");
+ right()->PrintNameTo(stream);
+}
+
+
+const char* HTernarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6, \
+ p7) \
+ case k##name: \
+ return #module "." #function;
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void HTernarySIMDOperation::PrintDataTo(StringStream* stream) {
+ const char* name = OpName();
+ stream->Add("%s ", name);
+ first()->PrintNameTo(stream);
+ stream->Add(" ");
+ second()->PrintNameTo(stream);
+ stream->Add(" ");
+ third()->PrintNameTo(stream);
+}
+
+
+const char* HQuarternarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, \
+ p6, p7, p8) \
+ case k##name: \
+ return #module "." #function;
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void HQuarternarySIMDOperation::PrintDataTo(StringStream* stream) {
+ const char* name = OpName();
+ stream->Add("%s ", name);
+ x()->PrintNameTo(stream);
+ stream->Add(" ");
+ y()->PrintNameTo(stream);
+ stream->Add(" ");
+ z()->PrintNameTo(stream);
+ stream->Add(" ");
+ w()->PrintNameTo(stream);
+}
+
+
} } // namespace v8::internal
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
+ V(NullarySIMDOperation) \
+ V(UnarySIMDOperation) \
+ V(BinarySIMDOperation) \
+ V(TernarySIMDOperation) \
+ V(QuarternarySIMDOperation) \
V(UnknownOSRValue) \
V(UseConst) \
V(WrapReceiver)
HType t = type();
if (t.IsSmi()) return Representation::Smi();
if (t.IsHeapNumber()) return Representation::Double();
+ if (t.IsFloat32x4()) return Representation::Float32x4();
+ if (t.IsFloat64x2()) return Representation::Float64x2();
+ if (t.IsInt32x4()) return Representation::Int32x4();
if (t.IsHeapObject()) return r;
return Representation::None();
}
HType type() const { return type_; }
void set_type(HType new_type) {
- ASSERT(new_type.IsSubtypeOf(type_));
+ // TODO(ningxin): for SIMD ops, the initial type is None which
+ // hit the following ASSERT.
+ // ASSERT(new_type.IsSubtypeOf(type_));
type_ = new_type;
}
if (value->representation().IsSmi() || value->type().IsSmi()) {
set_type(HType::Smi());
} else {
- set_type(HType::TaggedNumber());
+ if (to.IsFloat32x4()) {
+ set_type(HType::Float32x4());
+ } else if (to.IsFloat64x2()) {
+ set_type(HType::Float64x2());
+ } else if (to.IsInt32x4()) {
+ set_type(HType::Int32x4());
+ } else {
+ set_type(HType::TaggedNumber());
+ }
if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
}
Representation::Integer32());
}
+ static HObjectAccess ForSIMD128Double0() {
+ return HObjectAccess(
+ kDouble, Float32x4::kValueOffset, Representation::Double());
+ }
+
+ static HObjectAccess ForSIMD128Double1() {
+ return HObjectAccess(kDouble,
+ Float32x4::kValueOffset + kDoubleSize,
+ Representation::Double());
+ }
+
static HObjectAccess ForElementsPointer() {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
Representation::UInteger16());
}
+ static HObjectAccess ForMapPrototype() {
+ return HObjectAccess(kInobject, Map::kPrototypeOffset);
+ }
+
static HObjectAccess ForPropertyCellValue() {
return HObjectAccess(kInobject, PropertyCell::kValueOffset);
}
elements_kind == FLOAT32_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
set_representation(Representation::Double());
+ } else if (IsFloat32x4ElementsKind(elements_kind)) {
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float32x4() : Representation::Tagged());
+ } else if (IsFloat64x2ElementsKind(elements_kind)) {
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float64x2() : Representation::Tagged());
+ } else if (IsInt32x4ElementsKind(elements_kind)) {
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Int32x4() : Representation::Tagged());
} else {
set_representation(Representation::Integer32());
}
}
ASSERT_EQ(index, 2);
+
if (IsDoubleOrFloatElementsKind(elements_kind())) {
return Representation::Double();
}
+ if (IsFloat32x4ElementsKind(elements_kind())) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float32x4() : Representation::Tagged();
+ }
+ if (IsFloat64x2ElementsKind(elements_kind())) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float64x2() : Representation::Tagged();
+ }
+ if (IsInt32x4ElementsKind(elements_kind())) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Int32x4() : Representation::Tagged();
+ }
if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
return Representation::Integer32();
}
if (IsDoubleOrFloatElementsKind(elements_kind())) {
return Representation::Double();
}
+ if (IsFloat32x4ElementsKind(elements_kind())) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float32x4() : Representation::Tagged();
+ }
+ if (IsFloat64x2ElementsKind(elements_kind())) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float64x2() : Representation::Tagged();
+ }
+ if (IsInt32x4ElementsKind(elements_kind())) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Int32x4() : Representation::Tagged();
+ }
if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
return Representation::Integer32();
}
};
+class HNullarySIMDOperation V8_FINAL : public HTemplateInstruction<1> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ BuiltinFunctionId op);
+
+ HValue* context() { return OperandAt(0); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(NullarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HNullarySIMDOperation* b = HNullarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HNullarySIMDOperation(HValue* context, BuiltinFunctionId op)
+ : HTemplateInstruction<1>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ switch (op) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(p1, p2, name, representation) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ break;
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
+
+class HUnarySIMDOperation V8_FINAL : public HTemplateInstruction<2> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* value,
+ BuiltinFunctionId op,
+ Representation to = Representation::Float32x4());
+
+ HValue* context() { return OperandAt(0); }
+ HValue* value() { return OperandAt(1); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else if (op_ == kSIMD128Change) {
+ return value()->representation();
+ } else {
+ switch (op_) {
+#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, representation) \
+ case k##name: \
+ return Representation::representation();
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
+ }
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(UnarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HUnarySIMDOperation* b = HUnarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HUnarySIMDOperation(HValue* context, HValue* value, BuiltinFunctionId op,
+ Representation to = Representation::Float32x4())
+ : HTemplateInstruction<2>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ switch (op) {
+ case kSIMD128Change:
+ set_representation(to);
+ set_type(HType::FromRepresentation(to));
+ break;
+#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
+ break;
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
+
+class HBinarySIMDOperation V8_FINAL : public HTemplateInstruction<3> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ BuiltinFunctionId op);
+
+ HValue* context() { return OperandAt(0); }
+ HValue* left() { return OperandAt(1); }
+ HValue* right() { return OperandAt(2); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ switch (op_) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, p4, left_representation, \
+ right_representation) \
+ case k##name: \
+ return index == 1 ? Representation::left_representation() \
+ : Representation::right_representation(); \
+ break;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
+ }
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(BinarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HBinarySIMDOperation* b = HBinarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HBinarySIMDOperation(HValue* context, HValue* left, HValue* right,
+ BuiltinFunctionId op)
+ : HTemplateInstruction<3>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ SetOperandAt(2, right);
+ switch (op) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5, p6) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
+ break;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
+
+class HTernarySIMDOperation V8_FINAL : public HTemplateInstruction<4> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* first,
+ HValue* second,
+ HValue* third,
+ BuiltinFunctionId op);
+
+ HValue* context() { return OperandAt(0); }
+ HValue* first() { return OperandAt(1); }
+ HValue* second() { return OperandAt(2); }
+ HValue* third() { return OperandAt(3); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ switch (op_) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, \
+ first_representation, second_representation, third_representation) \
+ case k##name: \
+ switch (index) { \
+ case 1: return Representation::first_representation(); \
+ case 2: return Representation::second_representation(); \
+ case 3: return Representation::third_representation(); \
+ default: \
+ UNREACHABLE(); \
+ return Representation::None(); \
+ }
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
+ }
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(TernarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HTernarySIMDOperation* b = HTernarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HTernarySIMDOperation(HValue* context, HValue* first, HValue* second,
+ HValue* third, BuiltinFunctionId op)
+ : HTemplateInstruction<4>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, first);
+ SetOperandAt(2, second);
+ SetOperandAt(3, third);
+ switch (op) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5, \
+ p6, p7) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32() || \
+ Representation::p7().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
+ break;
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
+
+class HQuarternarySIMDOperation V8_FINAL : public HTemplateInstruction<5> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* x,
+ HValue* y,
+ HValue* z,
+ HValue* w,
+ BuiltinFunctionId op);
+
+ HValue* context() { return OperandAt(0); }
+ HValue* x() { return OperandAt(1); }
+ HValue* y() { return OperandAt(2); }
+ HValue* z() { return OperandAt(3); }
+ HValue* w() { return OperandAt(4); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ switch (op_) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, \
+ first_representation, second_representation, third_representation, \
+ fourth_representation) \
+ case k##name: \
+ switch (index) { \
+ case 1: return Representation::first_representation(); \
+ case 2: return Representation::second_representation(); \
+ case 3: return Representation::third_representation(); \
+ case 4: return Representation::fourth_representation(); \
+ default: \
+ UNREACHABLE(); \
+ return Representation::None(); \
+ }
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
+ }
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(QuarternarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HQuarternarySIMDOperation* b = HQuarternarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HQuarternarySIMDOperation(HValue* context, HValue* x, HValue* y, HValue* z,
+ HValue* w, BuiltinFunctionId op)
+ : HTemplateInstruction<5>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, x);
+ SetOperandAt(2, y);
+ SetOperandAt(3, z);
+ SetOperandAt(4, w);
+ switch (op) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5, \
+ p6, p7, p8) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32() || \
+ Representation::p7().IsInteger32() || \
+ Representation::p8().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
+ break;
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
}
if (new_value == NULL) {
- new_value = new(graph()->zone()) HChange(
- value, to, is_truncating_to_smi, is_truncating_to_int);
+ if (((to.IsFloat32x4() || to.IsFloat64x2() || to.IsInt32x4()) &&
+ !value->representation().IsTagged()) ||
+ ((value->representation().IsFloat32x4() ||
+ value->representation().IsFloat64x2() ||
+ value->representation().IsInt32x4()) &&
+ !to.IsTagged())) {
+ new_value = HUnarySIMDOperation::New(graph()->zone(),
+ graph()->entry_block()->last_environment()->context(),
+ value, kSIMD128Change, to);
+ } else {
+ new_value = new(graph()->zone()) HChange(
+ value, to, is_truncating_to_smi, is_truncating_to_int);
+ }
if (!use_value->operand_position(use_index).IsUnknown()) {
new_value->set_position(use_value->operand_position(use_index));
} else {
#include "src/hydrogen-types.h"
+#include "src/property-details.h"
#include "src/types-inl.h"
if (value->IsSmi()) return HType::Smi();
if (value->IsNull()) return HType::Null();
if (value->IsHeapNumber()) return HType::HeapNumber();
+ if (value->IsFloat32x4()) return HType::Float32x4();
+ if (value->IsFloat64x2()) return HType::Float64x2();
+ if (value->IsInt32x4()) return HType::Int32x4();
if (value->IsString()) return HType::String();
if (value->IsBoolean()) return HType::Boolean();
if (value->IsUndefined()) return HType::Undefined();
}
+// static
+HType HType::FromRepresentation(Representation representation) {
+ HType result = HType::Tagged();
+ if (representation.IsSmi()) {
+ result = HType::Smi();
+ } else if (representation.IsDouble()) {
+ result = HType::HeapNumber();
+ } else if (representation.IsFloat32x4()) {
+ result = HType::Float32x4();
+ } else if (representation.IsFloat64x2()) {
+ result = HType::Float64x2();
+ } else if (representation.IsInt32x4()) {
+ result = HType::Int32x4();
+ }
+ return result;
+}
+
+
const char* HType::ToString() const {
// Note: The c1visualizer syntax for locals allows only a sequence of the
// following characters: A-Za-z0-9_-|:
// Forward declarations.
template <typename T> class Handle;
class Object;
+class Representation;
#define HTYPE_LIST(V) \
V(Any, 0x0) /* 0000 0000 0000 0000 */ \
V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \
V(Null, 0x27) /* 0000 0000 0010 0111 */ \
V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \
- V(String, 0x65) /* 0000 0000 0110 0101 */ \
- V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \
- V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \
- V(JSObject, 0x221) /* 0000 0010 0010 0001 */ \
- V(JSArray, 0x621) /* 0000 0110 0010 0001 */ \
- V(None, 0x7ff) /* 0000 0111 1111 1111 */
+ V(Float32x4, 0x65) /* 0000 0000 0110 0101 */ \
+ V(Float64x2, 0xa5) /* 0000 0000 1010 0101 */ \
+ V(Int32x4, 0x125) /* 0000 0001 0010 0101 */ \
+ V(String, 0x225) /* 0000 0010 0010 0101 */ \
+ V(Boolean, 0x425) /* 0000 0100 0010 0101 */ \
+ V(Undefined, 0x825) /* 0000 1000 0010 0101 */ \
+ V(JSObject, 0x1021) /* 0001 0000 0010 0001 */ \
+ V(JSArray, 0x3021) /* 0011 0000 0010 0001 */ \
+ V(None, 0x3fff) /* 0011 1111 1111 1111 */
class HType V8_FINAL {
public:
template <class T>
static HType FromType(typename T::TypeHandle type) V8_WARN_UNUSED_RESULT;
- static HType FromValue(Handle<Object> value) V8_WARN_UNUSED_RESULT;
+ static HType FromValue(Handle<Object> value) ;
+ static HType FromRepresentation(Representation representation);
const char* ToString() const V8_WARN_UNUSED_RESULT;
CHECK_ALIVE(store = BuildNamedGeneric(
STORE, literal, name, value));
} else {
- PropertyAccessInfo info(this, STORE, ToType(map), name);
+ PropertyAccessInfo info(
+ this, STORE, ToType(map), name, map->instance_type());
if (info.CanAccessMonomorphic()) {
HValue* checked_literal = Add<HCheckMaps>(literal, map);
ASSERT(!info.lookup()->IsPropertyCallbacks());
bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
+ if (IsSIMD128PropertyCallback() &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ return true;
+ }
if (!CanInlinePropertyAccess(type_)) return false;
if (IsJSObjectFieldAccessor()) return IsLoad();
if (!LookupDescriptor()) return false;
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
if (types->length() > kMaxLoadPolymorphism) return false;
+ if (IsSIMD128PropertyCallback() &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() == types->first()->instance_type()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
HObjectAccess access = HObjectAccess::ForMap(); // bogus default
if (GetJSObjectFieldAccess(&access)) {
for (int i = 1; i < types->length(); ++i) {
PropertyAccessInfo test_info(
- builder_, access_type_, ToType(types->at(i)), name_);
+ builder_, access_type_, ToType(types->at(i)), name_,
+ types->at(i)->instance_type());
HObjectAccess test_access = HObjectAccess::ForMap(); // bogus default
if (!test_info.GetJSObjectFieldAccess(&test_access)) return false;
if (!access.Equals(test_access)) return false;
for (int i = 1; i < types->length(); ++i) {
PropertyAccessInfo test_info(
- builder_, access_type_, ToType(types->at(i)), name_);
+ builder_, access_type_, ToType(types->at(i)), name_,
+ types->at(i)->instance_type());
if (!test_info.IsCompatible(this)) return false;
}
}
+static bool IsSIMDProperty(Handle<String> name, uint8_t* mask) {
+ SmartArrayPointer<char> cstring = name->ToCString();
+ int i = 0;
+ while (i <= 3) {
+ int shift = 0;
+ switch (cstring[i]) {
+ case 'W':
+ shift++;
+ case 'Z':
+ shift++;
+ case 'Y':
+ shift++;
+ case 'X':
+ break;
+ default:
+ return false;
+ }
+ *mask |= (shift << 2*i);
+ i++;
+ }
+
+ return true;
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
PropertyAccessInfo* info,
HValue* object,
if (info->lookup()->IsField()) {
if (info->IsLoad()) {
+ if (info->map()->constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(info->map()->constructor());
+ String* class_name =
+ String::cast(constructor->shared()->instance_class_name());
+ uint8_t mask = 0;
+ if (class_name->Equals(isolate()->heap()->simd()) &&
+ IsSIMDProperty(info->name(), &mask) &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ return New<HConstant>(mask);
+ }
+ }
return BuildLoadNamedField(info, checked_holder);
} else {
return BuildStoreNamedField(info, checked_object, value);
bool handle_smi = false;
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
+ PropertyAccessInfo info(
+ this, access_type, ToType(types->at(i)), name,
+ types->at(i)->instance_type());
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
handled_string = false;
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
+ PropertyAccessInfo info(
+ this, access_type, ToType(types->at(i)), name,
+ types->at(i)->instance_type());
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
}
+static bool AreInt32x4Types(SmallMapList* types) {
+ if (types == NULL || types->length() == 0) return false;
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() != INT32x4_TYPE) return false;
+ }
+ return true;
+}
+
+
+static bool AreFloat32x4Types(SmallMapList* types) {
+ if (types == NULL || types->length() == 0) return false;
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() != FLOAT32x4_TYPE) return false;
+ }
+ return true;
+}
+
+
+static bool AreFloat64x2Types(SmallMapList* types) {
+ if (types == NULL || types->length() == 0) return false;
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() != FLOAT64x2_TYPE) return false;
+ }
+ return true;
+}
+
+
+static BuiltinFunctionId NameToId(Isolate* isolate, Handle<String> name,
+ InstanceType type) {
+ BuiltinFunctionId id;
+ if (name->Equals(isolate->heap()->signMask())) {
+ if (type == FLOAT32x4_TYPE) {
+ id = kFloat32x4GetSignMask;
+ } else if (type == FLOAT64x2_TYPE) {
+ id = kFloat64x2GetSignMask;
+ } else {
+ ASSERT(type == INT32x4_TYPE);
+ id = kInt32x4GetSignMask;
+ }
+ } else if (name->Equals(isolate->heap()->x())) {
+ if (type == FLOAT32x4_TYPE) {
+ id = kFloat32x4GetX;
+ } else if (type == FLOAT64x2_TYPE) {
+ id = kFloat64x2GetX;
+ } else {
+ ASSERT(type == INT32x4_TYPE);
+ id = kInt32x4GetX;
+ }
+ } else if (name->Equals(isolate->heap()->y())) {
+ if (type == FLOAT32x4_TYPE) {
+ id = kFloat32x4GetY;
+ } else if (type == FLOAT64x2_TYPE) {
+ id = kFloat64x2GetY;
+ } else {
+ ASSERT(type == INT32x4_TYPE);
+ id = kInt32x4GetY;
+ }
+ } else if (name->Equals(isolate->heap()->z())) {
+ id = type == FLOAT32x4_TYPE ? kFloat32x4GetZ : kInt32x4GetZ;
+ } else if (name->Equals(isolate->heap()->w())) {
+ id = type == FLOAT32x4_TYPE ? kFloat32x4GetW : kInt32x4GetW;
+ } else if (name->Equals(isolate->heap()->flagX())) {
+ ASSERT(type == INT32x4_TYPE);
+ id = kInt32x4GetFlagX;
+ } else if (name->Equals(isolate->heap()->flagY())) {
+ ASSERT(type == INT32x4_TYPE);
+ id = kInt32x4GetFlagY;
+ } else if (name->Equals(isolate->heap()->flagZ())) {
+ ASSERT(type == INT32x4_TYPE);
+ id = kInt32x4GetFlagZ;
+ } else if (name->Equals(isolate->heap()->flagW())) {
+ ASSERT(type == INT32x4_TYPE);
+ id = kInt32x4GetFlagW;
+ } else {
+ UNREACHABLE();
+ id = kSIMD128Unreachable;
+ }
+
+ return id;
+}
+
+
void HOptimizedGraphBuilder::BuildStore(Expression* expr,
Property* prop,
BailoutId ast_id,
ASSERT(types != NULL);
if (types->length() > 0) {
- PropertyAccessInfo info(this, access, ToType(types->first()), name);
+ PropertyAccessInfo info(
+ this, access, ToType(types->first()), name,
+ types->first()->instance_type());
if (!info.CanAccessAsMonomorphic(types)) {
HandlePolymorphicNamedFieldAccess(
access, ast_id, return_id, object, value, types, name);
// Type::Number() is only supported by polymorphic load/call handling.
ASSERT(!info.type()->Is(Type::Number()));
BuildCheckHeapObject(object);
+
if (AreStringTypes(types)) {
checked_object =
Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
+ } else if (info.IsSIMD128PropertyCallback() &&
+ AreFloat32x4Types(types) &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ Handle<JSFunction> function(
+ isolate()->native_context()->float32x4_function());
+ HInstruction* constant_function = Add<HConstant>(function);
+ HObjectAccess map_access = HObjectAccess::ForPrototypeOrInitialMap();
+ HInstruction* map = Add<HLoadNamedField>(
+ constant_function, static_cast<HValue*>(NULL), map_access);
+ HObjectAccess prototype_access = HObjectAccess::ForMapPrototype();
+ HInstruction* prototype = Add<HLoadNamedField>(
+ map, static_cast<HValue*>(NULL), prototype_access);
+ Handle<Map> initial_function_prototype_map(
+ isolate()->native_context()->float32x4_function_prototype_map());
+ Add<HCheckMaps>(prototype, initial_function_prototype_map);
+ BuiltinFunctionId id = NameToId(isolate(), name, FLOAT32x4_TYPE);
+ return NewUncasted<HUnarySIMDOperation>(object, id);
+ } else if (info.IsSIMD128PropertyCallback() &&
+ AreFloat64x2Types(types) &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ Handle<JSFunction> function(
+ isolate()->native_context()->float64x2_function());
+ HInstruction* constant_function = Add<HConstant>(function);
+ HObjectAccess map_access = HObjectAccess::ForPrototypeOrInitialMap();
+ HInstruction* map = Add<HLoadNamedField>(
+ constant_function, static_cast<HValue*>(NULL), map_access);
+ HObjectAccess prototype_access = HObjectAccess::ForMapPrototype();
+ HInstruction* prototype = Add<HLoadNamedField>(
+ map, static_cast<HValue*>(NULL), prototype_access);
+ Handle<Map> initial_function_prototype_map(
+ isolate()->native_context()->float64x2_function_prototype_map());
+ Add<HCheckMaps>(prototype, initial_function_prototype_map);
+ BuiltinFunctionId id = NameToId(isolate(), name, FLOAT64x2_TYPE);
+ return NewUncasted<HUnarySIMDOperation>(object, id);
+ } else if (info.IsSIMD128PropertyCallback() &&
+ AreInt32x4Types(types) &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ Handle<JSFunction> function(
+ isolate()->native_context()->int32x4_function());
+ HInstruction* constant_function = Add<HConstant>(function);
+ HObjectAccess map_access = HObjectAccess::ForPrototypeOrInitialMap();
+ HInstruction* map = Add<HLoadNamedField>(
+ constant_function, static_cast<HValue*>(NULL), map_access);
+ HObjectAccess prototype_access = HObjectAccess::ForMapPrototype();
+ HInstruction* prototype = Add<HLoadNamedField>(
+ map, static_cast<HValue*>(NULL), prototype_access);
+ Handle<Map> initial_function_prototype_map(
+ isolate()->native_context()->int32x4_function_prototype_map());
+ Add<HCheckMaps>(prototype, initial_function_prototype_map);
+ BuiltinFunctionId id = NameToId(isolate(), name, INT32x4_TYPE);
+ return NewUncasted<HUnarySIMDOperation>(object, id);
} else {
checked_object = Add<HCheckMaps>(object, types);
}
for (int i = 0;
i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
- PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
+ PropertyAccessInfo info(
+ this, LOAD, ToType(types->at(i)), name,
+ types->at(i)->instance_type());
if (info.CanAccessMonomorphic() &&
info.lookup()->IsConstant() &&
info.constant()->IsJSFunction()) {
for (int fn = 0; fn < ordered_functions; ++fn) {
int i = order[fn].index();
- PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
+ PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name,
+ types->at(i)->instance_type());
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
return true;
}
break;
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(p1, p2, name, p4) \
+ case k##name:
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 0) {
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HNullarySIMDOperation>(id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5) \
+ case k##name:
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 1) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HUnarySIMDOperation>(argument, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6) \
+ case k##name:
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 2) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HBinarySIMDOperation>(left, right, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6, p7) \
+ case k##name:
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 3) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HValue* value = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HTernarySIMDOperation>(value, left, right, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6, p7, p8) \
+ case k##name:
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 4) {
+ HValue* w = Pop();
+ HValue* z = Pop();
+ HValue* y = Pop();
+ HValue* x = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HQuarternarySIMDOperation>(x, y, z, w, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
default:
// Not supported for inlining yet.
break;
ast_context()->ReturnValue(index);
return true;
}
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(p1, p2, name, p4) \
+ case k##name:
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 1) {
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HNullarySIMDOperation>(id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5) \
+ case k##name:
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 2) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HUnarySIMDOperation>(argument, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6) \
+ case k##name:
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 3) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HBinarySIMDOperation>(left, right, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6, p7) \
+ case k##name:
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 4) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HValue* value = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HTernarySIMDOperation>(value, left, right, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6, p7, p8) \
+ case k##name:
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 5) {
+ HValue* w = Pop();
+ HValue* z = Pop();
+ HValue* y = Pop();
+ HValue* x = Pop();
+ Drop(2); // Receiver and function.
+ HValue* context = environment()->context();
+ HInstruction* op =
+ HQuarternarySIMDOperation::New(zone(), context, x, y, z, w, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+ case kFloat32x4ArrayGetAt:
+ case kFloat64x2ArrayGetAt:
+ case kInt32x4ArrayGetAt:
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 2) {
+ HValue* key = Pop();
+ HValue* typed32x4_array = Pop();
+ ASSERT(typed32x4_array == receiver);
+ Drop(1); // Drop function.
+ HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
+ typed32x4_array, key, NULL,
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ receiver_map->elements_kind(),
+ LOAD, // is_store.
+ NEVER_RETURN_HOLE, // load_mode.
+ STANDARD_STORE);
+ ast_context()->ReturnValue(instr);
+ return true;
+ }
+ break;
+ case kFloat32x4ArraySetAt:
+ case kFloat64x2ArraySetAt:
+ case kInt32x4ArraySetAt:
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 3) {
+ HValue* value = Pop();
+ HValue* key = Pop();
+ HValue* typed32x4_array = Pop();
+ ASSERT(typed32x4_array == receiver);
+ Drop(1); // Drop function.
+ // TODO(haitao): add STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS.
+ KeyedAccessStoreMode store_mode = STANDARD_STORE;
+ BuildUncheckedMonomorphicElementAccess(
+ typed32x4_array, key, value,
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ receiver_map->elements_kind(),
+ STORE, // is_store.
+ NEVER_RETURN_HOLE, // load_mode.
+ store_mode);
+ Push(value);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ ast_context()->ReturnValue(Pop());
+ return true;
+ }
+ break;
default:
// Not yet supported for inlining.
break;
if (prop->key()->IsPropertyName() && types->length() > 0) {
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- PropertyAccessInfo info(this, LOAD, ToType(types->first()), name);
+ PropertyAccessInfo info(this, LOAD, ToType(types->first()), name,
+ types->first()->instance_type());
if (!info.CanAccessAsMonomorphic(types)) {
HandlePolymorphicCallNamed(expr, receiver, types, name);
return;
length);
HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
+ if (IsFixedFloat32x4ElementsKind(fixed_elements_kind)) {
+ filler = AddUncasted<HNullarySIMDOperation>(kFloat32x4Zero);
+ } else if (IsFixedFloat64x2ElementsKind(fixed_elements_kind)) {
+ filler = AddUncasted<HNullarySIMDOperation>(kFloat64x2Zero);
+ } else if (IsFixedInt32x4ElementsKind(fixed_elements_kind)) {
+ filler = AddUncasted<HNullarySIMDOperation>(kInt32x4Zero);
+ }
{
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"",
DoubleRegister::AllocationIndexToString(assigned_reg));
+ } else if (op->IsFloat32x4Register()) {
+ trace_.Add(" \"%s\"",
+ SIMD128Register::AllocationIndexToString(assigned_reg));
+ } else if (op->IsFloat64x2Register()) {
+ trace_.Add(" \"%s\"",
+ SIMD128Register::AllocationIndexToString(assigned_reg));
+ } else if (op->IsInt32x4Register()) {
+ trace_.Add(" \"%s\"",
+ SIMD128Register::AllocationIndexToString(assigned_reg));
} else {
ASSERT(op->IsRegister());
trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
LOperand* op = range->TopLevel()->GetSpillOperand();
if (op->IsDoubleStackSlot()) {
trace_.Add(" \"double_stack:%d\"", op->index());
+ } else if (op->IsFloat32x4StackSlot()) {
+ trace_.Add(" \"float32x4_stack:%d\"", op->index());
+ } else if (op->IsFloat64x2StackSlot()) {
+ trace_.Add(" \"float64x2_stack:%d\"", op->index());
+ } else if (op->IsInt32x4StackSlot()) {
+ trace_.Add(" \"int32x4_stack:%d\"", op->index());
} else {
ASSERT(op->IsStackSlot());
trace_.Add(" \"stack:%d\"", op->index());
PropertyAccessInfo(HOptimizedGraphBuilder* builder,
PropertyAccessType access_type,
Type* type,
- Handle<String> name)
+ Handle<String> name,
+ InstanceType instance_type)
: lookup_(builder->isolate()),
builder_(builder),
access_type_(access_type),
type_(type),
name_(name),
field_type_(HType::Tagged()),
- access_(HObjectAccess::ForMap()) { }
+ access_(HObjectAccess::ForMap()),
+ instance_type_(instance_type) { }
// Checkes whether this PropertyAccessInfo can be handled as a monomorphic
// load named. It additionally fills in the fields necessary to generate the
Context* context = current_info()->closure()->context();
context = context->native_context();
return handle(context->number_function()->initial_map());
+ } else if (instance_type_ == Float32x4::kInstanceType) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->float32x4_function()->initial_map());
+ } else if (instance_type_ == Float64x2::kInstanceType) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->float64x2_function()->initial_map());
+ } else if (instance_type_ == Int32x4::kInstanceType) {
+ Context* context = current_info()->closure()->context();
+ context = context->native_context();
+ return handle(context->int32x4_function()->initial_map());
} else if (type_->Is(Type::Boolean())) {
Context* context = current_info()->closure()->context();
context = context->native_context();
bool IsLoad() const { return access_type_ == LOAD; }
LookupResult* lookup() { return &lookup_; }
+ Handle<String> name() { return name_; }
Handle<JSObject> holder() { return holder_; }
Handle<JSFunction> accessor() { return accessor_; }
Handle<Object> constant() { return constant_; }
HType field_type() const { return field_type_; }
HObjectAccess access() { return access_; }
+ bool IsSIMD128PropertyCallback() {
+ return (((instance_type_ == Float32x4::kInstanceType ||
+ instance_type_ == Int32x4::kInstanceType) &&
+ (name_->Equals(isolate()->heap()->signMask()) ||
+ name_->Equals(isolate()->heap()->x()) ||
+ name_->Equals(isolate()->heap()->y()) ||
+ name_->Equals(isolate()->heap()->z()) ||
+ name_->Equals(isolate()->heap()->w()))) ||
+ (instance_type_ == Int32x4::kInstanceType &&
+ (name_->Equals(isolate()->heap()->flagX()) ||
+ name_->Equals(isolate()->heap()->flagY()) ||
+ name_->Equals(isolate()->heap()->flagZ()) ||
+ name_->Equals(isolate()->heap()->flagW()))) ||
+ (instance_type_ == Float64x2::kInstanceType &&
+ (name_->Equals(isolate()->heap()->signMask()) ||
+ name_->Equals(isolate()->heap()->x()) ||
+ name_->Equals(isolate()->heap()->y()))));
+ }
+
private:
Type* ToType(Handle<Map> map) { return builder_->ToType(map); }
Zone* zone() { return builder_->zone(); }
SmallMapList field_maps_;
HType field_type_;
HObjectAccess access_;
+ InstanceType instance_type_;
};
HInstruction* BuildMonomorphicAccess(PropertyAccessInfo* info,
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSIMD128InCrankshaft() { return true; }
static const byte kCallOpcode = 0xE8;
}
+Operand::Operand(const Operand& operand, int32_t offset) {
+ ASSERT(operand.len_ >= 1);
+ // Operand encodes REX ModR/M [SIB] [Disp].
+ byte modrm = operand.buf_[0];
+ ASSERT(modrm < 0xC0); // Disallow mode 3 (register target).
+ bool has_sib = ((modrm & 0x07) == 0x04);
+ byte mode = modrm & 0xC0;
+ int disp_offset = has_sib ? 2 : 1;
+ int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
+ // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+ // displacement.
+ bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
+ int32_t disp_value = 0;
+ if (mode == 0x80 || is_baseless) {
+ // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+ disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
+ } else if (mode == 0x40) {
+ // Mode 1: Byte displacement.
+ disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
+ }
+
+ // Write new operand with same registers, but with modified displacement.
+ ASSERT(offset >= 0 ? disp_value + offset >= disp_value
+ : disp_value + offset < disp_value); // No overflow.
+ disp_value += offset;
+ if (!is_int8(disp_value) || is_baseless) {
+ // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+ buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
+ len_ = disp_offset + 4;
+ Memory::int32_at(&buf_[disp_offset]) = disp_value;
+ } else if (disp_value != 0 || (base_reg == 0x05)) {
+ // Need 8 bits of displacement.
+ buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
+ len_ = disp_offset + 1;
+ buf_[disp_offset] = static_cast<byte>(disp_value);
+ } else {
+ // Need no displacement.
+ buf_[0] = (modrm & 0x3f); // Mode 0.
+ len_ = disp_offset;
+ }
+ if (has_sib) {
+ buf_[1] = operand.buf_[1];
+ }
+}
+
+
bool Operand::is_reg(Register reg) const {
return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
&& ((buf_[0] & 0x07) == reg.code()); // register codes match.
}
+void Assembler::xorpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::andps(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
}
+void Assembler::addpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
}
+void Assembler::andpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
}
+void Assembler::pcmpgtd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x66);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
}
+void Assembler::movups(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x10);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movups(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x11);
+ emit_sse_operand(src, dst);
+}
+
+
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
}
+void Assembler::shufpd(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xC6);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
}
+void Assembler::pslld(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x72);
+ emit_sse_operand(esi, reg); // esi == 6
+ EMIT(shift);
+}
+
+
+void Assembler::pslld(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xF2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrld(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x72);
+ emit_sse_operand(edx, reg); // edx == 2
+ EMIT(shift);
+}
+
+
+void Assembler::psrld(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xD2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrad(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x72);
+ emit_sse_operand(esp, reg); // esp == 4
+ EMIT(shift);
+}
+
+
+void Assembler::psrad(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xE2);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
}
+void Assembler::psrldq(XMMRegister dst, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x73);
+ emit_sse_operand(ebx, dst); // ebx == 3
+ EMIT(shift);
+}
+
+
void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
EMIT(0x66);
}
+void Assembler::minps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x53);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x52);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::paddd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xFE);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psubd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xFA);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmulld(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x38);
+ EMIT(0x40);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmuludq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpackldq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x62);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtps2dq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC2);
+ emit_sse_operand(dst, src);
+ EMIT(cmp);
+}
+
+
+void Assembler::cmpeqps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x0);
+}
+
+
+void Assembler::cmpltps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x1);
+}
+
+
+void Assembler::cmpleps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x2);
+}
+
+
+void Assembler::cmpneqps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x4);
+}
+
+
+void Assembler::cmpnltps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x5);
+}
+
+
+void Assembler::cmpnleps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x6);
+}
+
+
+void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x21);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
void Assembler::RecordComment(const char* msg, bool force) {
if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
+ EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
}
typedef XMMRegister DoubleRegister;
-
+typedef XMMRegister SIMD128Register;
const XMMRegister xmm0 = { 0 };
const XMMRegister xmm1 = { 1 };
times_2 = 1,
times_4 = 2,
times_8 = 3,
+ maximal_scale_factor = times_8,
times_int_size = times_4,
times_half_pointer_size = times_2,
times_pointer_size = times_4,
int32_t disp,
RelocInfo::Mode rmode = RelocInfo::NONE32);
+ // Offset from existing memory operand.
+ // Offset is added to existing displacement as 32-bit signed values and
+ // this must not overflow.
+ Operand(const Operand& base, int32_t offset);
+
static Operand StaticVariable(const ExternalReference& ext) {
return Operand(reinterpret_cast<int32_t>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
// SSE instructions
void movaps(XMMRegister dst, XMMRegister src);
+ void movups(XMMRegister dst, const Operand& src);
+ void movups(const Operand& dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+ void shufpd(XMMRegister dst, XMMRegister src, byte imm8);
void andps(XMMRegister dst, const Operand& src);
void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
void divps(XMMRegister dst, const Operand& src);
void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
+ void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
+ void minps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); }
+ void maxps(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
+ void rcpps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
+ void rsqrtps(XMMRegister dst, const Operand& src);
+ void sqrtps(XMMRegister dst, XMMRegister src) { sqrtps(dst, Operand(src)); }
+ void sqrtps(XMMRegister dst, const Operand& src);
+ void sqrtpd(XMMRegister dst, XMMRegister src) { sqrtpd(dst, Operand(src)); }
+ void sqrtpd(XMMRegister dst, const Operand& src);
+
+ void addpd(XMMRegister dst, const Operand& src);
+ void addpd(XMMRegister dst, XMMRegister src) { addpd(dst, Operand(src)); }
+ void subpd(XMMRegister dst, const Operand& src);
+ void subpd(XMMRegister dst, XMMRegister src) { subpd(dst, Operand(src)); }
+ void mulpd(XMMRegister dst, const Operand& src);
+ void mulpd(XMMRegister dst, XMMRegister src) { mulpd(dst, Operand(src)); }
+ void divpd(XMMRegister dst, const Operand& src);
+ void divpd(XMMRegister dst, XMMRegister src) { divpd(dst, Operand(src)); }
+ void minpd(XMMRegister dst, XMMRegister src) { minpd(dst, Operand(src)); }
+ void minpd(XMMRegister dst, const Operand& src);
+ void maxpd(XMMRegister dst, XMMRegister src) { maxpd(dst, Operand(src)); }
+ void maxpd(XMMRegister dst, const Operand& src);
+
+ void cvtdq2ps(XMMRegister dst, const Operand& src);
+ void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
+ void cmpeqps(XMMRegister dst, XMMRegister src);
+ void cmpltps(XMMRegister dst, XMMRegister src);
+ void cmpleps(XMMRegister dst, XMMRegister src);
+ void cmpneqps(XMMRegister dst, XMMRegister src);
+ void cmpnltps(XMMRegister dst, XMMRegister src);
+ void cmpnleps(XMMRegister dst, XMMRegister src);
+
+ // SSE 2, introduced by SIMD
+ void paddd(XMMRegister dst, XMMRegister src) { paddd(dst, Operand(src)); }
+ void paddd(XMMRegister dst, const Operand& src);
+ void psubd(XMMRegister dst, XMMRegister src) { psubd(dst, Operand(src)); }
+ void psubd(XMMRegister dst, const Operand& src);
+ void pmuludq(XMMRegister dst, XMMRegister src) { pmuludq(dst, Operand(src)); }
+ void pmuludq(XMMRegister dst, const Operand& src);
+ void punpackldq(XMMRegister dst, XMMRegister src) {
+ punpackldq(dst, Operand(src));
+ }
+ void punpackldq(XMMRegister dst, const Operand& src);
+ void cvtps2dq(XMMRegister dst, XMMRegister src) {
+ cvtps2dq(dst, Operand(src));
+ }
+ void cvtps2dq(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, XMMRegister src) {
+ cvtdq2ps(dst, Operand(src));
+ }
+ // SSE 4.1, introduced by SIMD
+ void insertps(XMMRegister dst, XMMRegister src, byte imm8);
+ void pmulld(XMMRegister dst, XMMRegister src) { pmulld(dst, Operand(src)); }
+ void pmulld(XMMRegister dst, const Operand& src);
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void xorpd(XMMRegister dst, const Operand& src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, const Operand& src);
void andpd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, const Operand& src);
void orpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
+ void pcmpgtd(XMMRegister dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void psllq(XMMRegister reg, int8_t shift);
void psllq(XMMRegister dst, XMMRegister src);
+ void pslld(XMMRegister reg, int8_t shift);
+ void pslld(XMMRegister dst, XMMRegister src);
+ void psrld(XMMRegister reg, int8_t shift);
+ void psrld(XMMRegister dst, XMMRegister src);
+ void psrad(XMMRegister reg, int8_t shift);
+ void psrad(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
+ void psrldq(XMMRegister dst, int8_t shift);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ simd128_value_t zero = {{0.0, 0.0}};
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
+ input_->SetSIMD128Register(i, zero);
}
// Fill the frame content from the actual data on the frame.
}
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+void Deoptimizer::CopySIMD128Registers(FrameDescription* output_frame) {
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
+ simd128_value_t xmm_value = input_->GetSIMD128Register(i);
+ output_frame->SetSIMD128Register(i, xmm_value);
}
}
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kMaxNumAllocatableRegisters;
- __ sub(esp, Immediate(kDoubleRegsSize));
+ const int kXMMRegsSize = kSIMD128Size *
+ XMMRegister::kMaxNumAllocatableRegisters;
+ __ sub(esp, Immediate(kXMMRegsSize));
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(esp, offset), xmm_reg);
+ int offset = i * kSIMD128Size;
+ __ movups(Operand(esp, offset), xmm_reg);
}
__ pushad();
const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
- kDoubleRegsSize;
+ kXMMRegsSize;
// Get the bailout id from the stack.
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
__ pop(Operand(ebx, offset));
}
- int double_regs_offset = FrameDescription::double_registers_offset();
+ int xmm_regs_offset = FrameDescription::simd128_registers_offset();
// Fill in the double input registers.
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movsd(xmm0, Operand(esp, src_offset));
- __ movsd(Operand(ebx, dst_offset), xmm0);
+ int dst_offset = i * kSIMD128Size + xmm_regs_offset;
+ int src_offset = i * kSIMD128Size;
+ __ movups(xmm0, Operand(esp, src_offset));
+ __ movups(Operand(ebx, dst_offset), xmm0);
}
// Clear FPU all exceptions.
__ fnclex();
// Remove the bailout id, return address and the double registers.
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
+ __ add(esp, Immediate(kXMMRegsSize + 2 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
// In case of a failed STUB, we have to restore the XMM registers.
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(ebx, src_offset));
+ int src_offset = i * kSIMD128Size + xmm_regs_offset;
+ __ movups(xmm_reg, Operand(ebx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
}
+double FrameDescription::GetDoubleRegister(unsigned n) const {
+ ASSERT(n < ARRAY_SIZE(simd128_registers_));
+ return simd128_registers_[n].d[0];
+}
+
+
+void FrameDescription::SetDoubleRegister(unsigned n, double value) {
+ ASSERT(n < ARRAY_SIZE(simd128_registers_));
+ simd128_registers_[n].d[0] = value;
+}
+
+
#undef __
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+ } else if (f0byte == 0x10) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movups %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (f0byte == 0x11) {
+ AppendToBuffer("movups ");
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (f0byte >= 0x51 && f0byte <= 0x5F) {
const char* const pseudo_op[] = {
+ "sqrtps",
+ "rsqrtps",
"rcpps",
"andps",
"andnps",
"subps",
"minps",
"divps",
- "maxps",
+ "maxps"
};
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
AppendToBuffer("%s %s,",
- pseudo_op[f0byte - 0x53],
+ pseudo_op[f0byte - 0x51],
NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
} else if (f0byte == 0x50) {
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0xC2) {
+ // Intel manual 2A, Table 3-11.
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ const char* const pseudo_op[] = {
+ "cmpeqps",
+ "cmpltps",
+ "cmpleps",
+ "cmpunordps",
+ "cmpneqps",
+ "cmpnltps",
+ "cmpnleps",
+ "cmpordps"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[data[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data += 2;
} else if (f0byte== 0xC6) {
// shufps xmm, xmm/m128, imm8
data += 2;
NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
+ } else if (f0byte== 0x5B) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("cvtdq2ps %s,",
+ NameOfXMMRegister(rm));
+ data += PrintRightXMMOperand(data);
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x40) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pmulld %s,%s",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (*data == 0x2A) {
// movntdqa
data++;
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x21) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("insertps %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
} else if (*data == 0x17) {
data++;
int mod, regop, rm;
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x51) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("sqrtpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x54) {
data++;
int mod, regop, rm;
data++;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("xorpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
+ AppendToBuffer("xorpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x58) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("addpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x59) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("mulpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5B) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("cvtps2dq %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5C) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("subpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5D) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("minpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("divpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5F) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("maxpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x62) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("punpackldq %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0xF4) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pmuludq %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0xFA) {
data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("psubd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0xFE) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("paddd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (*data == 0x6E) {
data++;
int mod, regop, rm;
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x66) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pcmpgtd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x76) {
data++;
int mod, regop, rm;
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0xF2) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pslld %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x72) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ ASSERT(regop == esi || regop == edx);
+ AppendToBuffer("%s %s,%d",
+ (regop == esi) ? "pslld"
+ : ((regop == edx) ? "psrld" : "psrad"),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0xC6) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("shufpd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0xD2) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("psrld %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0xD3) {
data++;
int mod, regop, rm;
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0xE2) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("psrad %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x7F) {
AppendToBuffer("movdqa ");
data++;
}
+XMMRegister LCodeGen::ToSIMD128Register(int index) const {
+ return XMMRegister::FromAllocationIndex(index);
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
}
+XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
+ ASSERT(op->IsFloat32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToFloat64x2Register(LOperand* op) const {
+ ASSERT(op->IsFloat64x2Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
+ ASSERT(op->IsInt32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
+ ASSERT(op->IsFloat32x4Register() || op->IsFloat64x2Register() ||
+ op->IsInt32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
return ToRepresentation(op, Representation::Integer32());
}
Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsRegister()) return Operand(ToRegister(op));
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (op->IsFloat32x4Register()) return Operand(ToFloat32x4Register(op));
+ if (op->IsFloat64x2Register()) return Operand(ToFloat64x2Register(op));
+ if (op->IsInt32x4Register()) return Operand(ToInt32x4Register(op));
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
+ op->IsFloat32x4StackSlot() || op->IsFloat64x2StackSlot() ||
+ op->IsInt32x4StackSlot());
if (NeedsEagerFrame()) {
return Operand(ebp, StackSlotOffset(op->index()));
} else {
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsFloat32x4StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT32x4_STACK_SLOT);
+ } else if (op->IsFloat64x2StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT64x2_STACK_SLOT);
+ } else if (op->IsInt32x4StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::INT32x4_STACK_SLOT);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
} else if (op->IsDoubleRegister()) {
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
+ } else if (op->IsFloat32x4Register()) {
+ XMMRegister reg = ToFloat32x4Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
+ } else if (op->IsFloat64x2Register()) {
+ XMMRegister reg = ToFloat64x2Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT64x2_REGISTER);
+ } else if (op->IsInt32x4Register()) {
+ XMMRegister reg = ToInt32x4Register(op);
+ translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
+ } else if (r.IsSIMD128()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, no_condition);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
}
+void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
+ Runtime::FunctionId id) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+}
+
+
+void LCodeGen::HandleExternalArrayOpRequiresTemp(
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
+ int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
+ static_cast<int>(maximal_scale_factor);
+ if (key_representation.IsSmi()) {
+ pre_shift_size -= kSmiTagSize;
+ }
+ ASSERT(pre_shift_size > 0);
+ __ shl(ToRegister(key), pre_shift_size);
+ } else {
+ __ SmiUntag(ToRegister(key));
+ }
+}
+
+
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
+ ExternalArrayOpRequiresTemp(
+ instr->hydrogen()->key()->representation(), elements_kind)) {
+ HandleExternalArrayOpRequiresTemp(
+ key, instr->hydrogen()->key()->representation(), elements_kind);
}
+
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
+ } else if (IsSIMD128ElementsKind(elements_kind)) {
+ __ movups(ToSIMD128Register(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
((constant_value) << shift_size)
+ base_offset);
} else {
- // Take the tag bit into account while computing the shift size.
- if (key_representation.IsSmi() && (shift_size >= 1)) {
+ if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
+ // Make sure the key is pre-scaled against maximal_scale_factor.
+ shift_size = static_cast<int>(maximal_scale_factor);
+ } else if (key_representation.IsSmi() && (shift_size >= 1)) {
+ // Take the tag bit into account while computing the shift size.
shift_size -= kSmiTagSize;
}
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
if (!key->IsConstantOperand() &&
ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
elements_kind)) {
- __ SmiUntag(ToRegister(key));
+ HandleExternalArrayOpRequiresTemp(
+ key, instr->hydrogen()->key()->representation(), elements_kind);
}
+
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
+ } else if (IsSIMD128ElementsKind(elements_kind)) {
+ __ movups(operand, ToSIMD128Register(instr->value()));
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
}
+template<class T>
+void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
+ class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
+ public:
+ DeferredSIMD128ToTagged(LCodeGen* codegen,
+ LInstruction* instr,
+ Runtime::FunctionId id)
+ : LDeferredCode(codegen), instr_(instr), id_(id) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LInstruction* instr_;
+ Runtime::FunctionId id_;
+ };
+
+ XMMRegister input_reg = ToSIMD128Register(instr->value());
+ Register reg = ToRegister(instr->result());
+ Register tmp = ToRegister(instr->temp());
+ Register tmp2 = ToRegister(instr->temp2());
+
+ DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
+ this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
+
+ if (FLAG_inline_new) {
+ if (T::kInstanceType == FLOAT32x4_TYPE) {
+ __ AllocateFloat32x4(reg, tmp, tmp2, deferred->entry());
+ } else if (T::kInstanceType == INT32x4_TYPE) {
+ __ AllocateInt32x4(reg, tmp, tmp2, deferred->entry());
+ } else if (T::kInstanceType == FLOAT64x2_TYPE) {
+ __ AllocateFloat64x2(reg, tmp, tmp2, deferred->entry());
+ }
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+
+ // Load the inner FixedTypedArray object.
+ __ mov(tmp, FieldOperand(reg, T::kValueOffset));
+
+ __ movups(FieldOperand(tmp, FixedTypedArrayBase::kDataOffset), input_reg);
+}
+
+
+void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
+ if (instr->value()->IsFloat32x4Register()) {
+ HandleSIMD128ToTagged<Float32x4>(instr);
+ } else if (instr->value()->IsFloat64x2Register()) {
+ HandleSIMD128ToTagged<Float64x2>(instr);
+ } else {
+ ASSERT(instr->value()->IsInt32x4Register());
+ HandleSIMD128ToTagged<Int32x4>(instr);
+ }
+}
+
+
+template<class T>
+void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsSIMD128Register());
+
+ Register input_reg = ToRegister(input);
+ Register temp_reg = ToRegister(instr->temp());
+ XMMRegister result_reg = ToSIMD128Register(result);
+
+ __ test(input_reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ __ CmpObjectType(input_reg, T::kInstanceType, temp_reg);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ // Load the inner FixedTypedArray object.
+ __ mov(temp_reg, FieldOperand(input_reg, T::kValueOffset));
+
+ __ movups(
+ result_reg, FieldOperand(temp_reg, FixedTypedArrayBase::kDataOffset));
+}
+
+
+void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
+ if (instr->representation().IsFloat32x4()) {
+ HandleTaggedToSIMD128<Float32x4>(instr);
+ } else if (instr->representation().IsFloat64x2()) {
+ HandleTaggedToSIMD128<Float64x2>(instr);
+ } else {
+ ASSERT(instr->representation().IsInt32x4());
+ HandleTaggedToSIMD128<Int32x4>(instr);
+ }
+}
+
+
+void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Zero: {
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ __ xorps(result_reg, result_reg);
+ return;
+ }
+ case kFloat64x2Zero: {
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ xorpd(result_reg, result_reg);
+ return;
+ }
+ case kInt32x4Zero: {
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ xorps(result_reg, result_reg);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
+ uint8_t select = 0;
+ switch (instr->op()) {
+ case kSIMD128Change: {
+ Comment(";;; deoptimize: can not perform representation change"
+ "for float32x4 or int32x4");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ case kFloat32x4Abs:
+ case kFloat32x4Neg:
+ case kFloat32x4Reciprocal:
+ case kFloat32x4ReciprocalSqrt:
+ case kFloat32x4Sqrt: {
+ ASSERT(instr->value()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ switch (instr->op()) {
+ case kFloat32x4Abs:
+ __ absps(input_reg);
+ break;
+ case kFloat32x4Neg:
+ __ negateps(input_reg);
+ break;
+ case kFloat32x4Reciprocal:
+ __ rcpps(input_reg, input_reg);
+ break;
+ case kFloat32x4ReciprocalSqrt:
+ __ rsqrtps(input_reg, input_reg);
+ break;
+ case kFloat32x4Sqrt:
+ __ sqrtps(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt: {
+ ASSERT(instr->value()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ switch (instr->op()) {
+ case kFloat64x2Abs:
+ __ abspd(input_reg);
+ break;
+ case kFloat64x2Neg:
+ __ negatepd(input_reg);
+ break;
+ case kFloat64x2Sqrt:
+ __ sqrtpd(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kInt32x4Not:
+ case kInt32x4Neg: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ switch (instr->op()) {
+ case kInt32x4Not:
+ __ notps(input_reg);
+ break;
+ case kInt32x4Neg:
+ __ pnegd(input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4BitsToInt32x4:
+ case kFloat32x4ToInt32x4: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ if (instr->op() == kFloat32x4BitsToInt32x4) {
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ } else {
+ ASSERT(instr->op() == kFloat32x4ToInt32x4);
+ __ cvtps2dq(result_reg, input_reg);
+ }
+ return;
+ }
+ case kInt32x4BitsToFloat32x4:
+ case kInt32x4ToFloat32x4: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ if (instr->op() == kInt32x4BitsToFloat32x4) {
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ } else {
+ ASSERT(instr->op() == kInt32x4ToFloat32x4);
+ __ cvtdq2ps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kFloat32x4Splat: {
+ ASSERT(instr->hydrogen()->value()->representation().IsDouble());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtsd2ss(xmm_scratch, input_reg);
+ __ shufps(xmm_scratch, xmm_scratch, 0x0);
+ __ movaps(result_reg, xmm_scratch);
+ return;
+ }
+ case kInt32x4Splat: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register input_reg = ToRegister(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ movd(result_reg, input_reg);
+ __ shufps(result_reg, result_reg, 0x0);
+ return;
+ }
+ case kInt32x4GetSignMask: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskps(result, input_reg);
+ return;
+ }
+ case kFloat32x4GetSignMask: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskps(result, input_reg);
+ return;
+ }
+ case kFloat32x4GetW:
+ select++;
+ case kFloat32x4GetZ:
+ select++;
+ case kFloat32x4GetY:
+ select++;
+ case kFloat32x4GetX: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
+
+ if (select == 0x0) {
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtss2sd(xmm_scratch, input_reg);
+ if (!xmm_scratch.is(result)) {
+ __ movaps(result, xmm_scratch);
+ }
+ } else {
+ __ pshufd(xmm_scratch, input_reg, select);
+ if (!xmm_scratch.is(result)) {
+ __ xorps(result, result);
+ }
+ __ cvtss2sd(result, xmm_scratch);
+ }
+ return;
+ }
+ case kFloat64x2GetSignMask: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskpd(result, input_reg);
+ return;
+ }
+ case kFloat64x2GetX: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ return;
+ }
+ case kFloat64x2GetY: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ __ shufpd(result, input_reg, 0x1);
+ return;
+ }
+ case kInt32x4GetX:
+ case kInt32x4GetY:
+ case kInt32x4GetZ:
+ case kInt32x4GetW:
+ case kInt32x4GetFlagX:
+ case kInt32x4GetFlagY:
+ case kInt32x4GetFlagZ:
+ case kInt32x4GetFlagW: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
+ bool flag = false;
+ switch (instr->op()) {
+ case kInt32x4GetFlagX:
+ flag = true;
+ case kInt32x4GetX:
+ break;
+ case kInt32x4GetFlagY:
+ flag = true;
+ case kInt32x4GetY:
+ select = 0x1;
+ break;
+ case kInt32x4GetFlagZ:
+ flag = true;
+ case kInt32x4GetZ:
+ select = 0x2;
+ break;
+ case kInt32x4GetFlagW:
+ flag = true;
+ case kInt32x4GetW:
+ select = 0x3;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ if (select == 0x0) {
+ __ movd(result, input_reg);
+ } else {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ extractps(result, input_reg, select);
+ } else {
+ XMMRegister xmm_scratch = xmm0;
+ __ pshufd(xmm_scratch, input_reg, select);
+ __ movd(result, xmm_scratch);
+ }
+ }
+
+ if (flag) {
+ Label false_value, done;
+ __ test(result, result);
+ __ j(zero, &false_value, Label::kNear);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done, Label::kNear);
+ __ bind(&false_value);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
+ uint8_t imm8 = 0; // for with operation
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ case kFloat32x4Sub:
+ case kFloat32x4Mul:
+ case kFloat32x4Div:
+ case kFloat32x4Min:
+ case kFloat32x4Max: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToFloat32x4Register(instr->right());
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ __ addps(left_reg, right_reg);
+ break;
+ case kFloat32x4Sub:
+ __ subps(left_reg, right_reg);
+ break;
+ case kFloat32x4Mul:
+ __ mulps(left_reg, right_reg);
+ break;
+ case kFloat32x4Div:
+ __ divps(left_reg, right_reg);
+ break;
+ case kFloat32x4Min:
+ __ minps(left_reg, right_reg);
+ break;
+ case kFloat32x4Max:
+ __ maxps(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4Scale: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister scratch_reg = xmm0;
+ __ xorps(scratch_reg, scratch_reg);
+ __ cvtsd2ss(scratch_reg, right_reg);
+ __ shufps(scratch_reg, scratch_reg, 0x0);
+ __ mulps(left_reg, scratch_reg);
+ return;
+ }
+ case kFloat64x2Add:
+ case kFloat64x2Sub:
+ case kFloat64x2Mul:
+ case kFloat64x2Div:
+ case kFloat64x2Min:
+ case kFloat64x2Max: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsFloat64x2());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToFloat64x2Register(instr->right());
+ switch (instr->op()) {
+ case kFloat64x2Add:
+ __ addpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Sub:
+ __ subpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Mul:
+ __ mulpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Div:
+ __ divpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Min:
+ __ minpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Max:
+ __ maxpd(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Scale: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ shufpd(right_reg, right_reg, 0x0);
+ __ mulpd(left_reg, right_reg);
+ return;
+ }
+ case kFloat32x4Shuffle: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ __ shufps(left_reg, left_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ }
+ case kInt32x4Shuffle: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ __ pshufd(left_reg, left_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ }
+ case kInt32x4ShiftLeft:
+ case kInt32x4ShiftRight:
+ case kInt32x4ShiftRightArithmetic: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t shift = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ switch (instr->op()) {
+ case kInt32x4ShiftLeft:
+ __ pslld(left_reg, shift);
+ break;
+ case kInt32x4ShiftRight:
+ __ psrld(left_reg, shift);
+ break;
+ case kInt32x4ShiftRightArithmetic:
+ __ psrad(left_reg, shift);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else {
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register shift = ToRegister(instr->right());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(xmm_scratch, shift);
+ switch (instr->op()) {
+ case kInt32x4ShiftLeft:
+ __ pslld(left_reg, xmm_scratch);
+ break;
+ case kInt32x4ShiftRight:
+ __ psrld(left_reg, xmm_scratch);
+ break;
+ case kInt32x4ShiftRightArithmetic:
+ __ psrad(left_reg, xmm_scratch);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+ }
+ case kFloat32x4LessThan:
+ case kFloat32x4LessThanOrEqual:
+ case kFloat32x4Equal:
+ case kFloat32x4NotEqual:
+ case kFloat32x4GreaterThanOrEqual:
+ case kFloat32x4GreaterThan: {
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToFloat32x4Register(instr->right());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ switch (instr->op()) {
+ case kFloat32x4LessThan:
+ if (result_reg.is(left_reg)) {
+ __ cmpltps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpnltps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpltps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4LessThanOrEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpleps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpnleps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpleps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4Equal:
+ if (result_reg.is(left_reg)) {
+ __ cmpeqps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpeqps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpeqps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4NotEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpneqps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpneqps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpneqps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4GreaterThanOrEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpnltps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpltps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpnltps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4GreaterThan:
+ if (result_reg.is(left_reg)) {
+ __ cmpnleps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpleps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpnleps(result_reg, right_reg);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kInt32x4And:
+ case kInt32x4Or:
+ case kInt32x4Xor:
+ case kInt32x4Add:
+ case kInt32x4Sub:
+ case kInt32x4Mul:
+ case kInt32x4GreaterThan:
+ case kInt32x4Equal:
+ case kInt32x4LessThan: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ XMMRegister right_reg = ToInt32x4Register(instr->right());
+ switch (instr->op()) {
+ case kInt32x4And:
+ __ andps(left_reg, right_reg);
+ break;
+ case kInt32x4Or:
+ __ orps(left_reg, right_reg);
+ break;
+ case kInt32x4Xor:
+ __ xorps(left_reg, right_reg);
+ break;
+ case kInt32x4Add:
+ __ paddd(left_reg, right_reg);
+ break;
+ case kInt32x4Sub:
+ __ psubd(left_reg, right_reg);
+ break;
+ case kInt32x4Mul:
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ pmulld(left_reg, right_reg);
+ } else {
+ // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
+ XMMRegister xmm_scratch = xmm0;
+ __ movaps(xmm_scratch, left_reg);
+ __ pmuludq(left_reg, right_reg);
+ __ psrldq(xmm_scratch, 4);
+ __ psrldq(right_reg, 4);
+ __ pmuludq(xmm_scratch, right_reg);
+ __ pshufd(left_reg, left_reg, 8);
+ __ pshufd(xmm_scratch, xmm_scratch, 8);
+ __ punpackldq(left_reg, xmm_scratch);
+ }
+ break;
+ case kInt32x4GreaterThan:
+ __ pcmpgtd(left_reg, right_reg);
+ break;
+ case kInt32x4Equal:
+ __ pcmpeqd(left_reg, right_reg);
+ break;
+ case kInt32x4LessThan: {
+ XMMRegister xmm_scratch = xmm0;
+ __ movaps(xmm_scratch, right_reg);
+ __ pcmpgtd(xmm_scratch, left_reg);
+ __ movaps(left_reg, xmm_scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4WithW:
+ imm8++;
+ case kFloat32x4WithZ:
+ imm8++;
+ case kFloat32x4WithY:
+ imm8++;
+ case kFloat32x4WithX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtsd2ss(xmm_scratch, right_reg);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ imm8 = imm8 << 4;
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ insertps(left_reg, xmm_scratch, imm8);
+ } else {
+ __ sub(esp, Immediate(kFloat32x4Size));
+ __ movups(Operand(esp, 0), left_reg);
+ __ movss(Operand(esp, imm8 * kFloatSize), xmm_scratch);
+ __ movups(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kFloat32x4Size));
+ }
+ return;
+ }
+ case kFloat64x2WithX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ sub(esp, Immediate(kFloat64x2Size));
+ __ movups(Operand(esp, 0), left_reg);
+ __ movsd(Operand(esp, 0 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2WithY: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ sub(esp, Immediate(kFloat64x2Size));
+ __ movups(Operand(esp, 0), left_reg);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2Constructor: {
+ ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToDoubleRegister(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ sub(esp, Immediate(kFloat64x2Size));
+ __ movsd(Operand(esp, 0 * kDoubleSize), left_reg);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right_reg);
+ __ movups(result_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kInt32x4WithW:
+ imm8++;
+ case kInt32x4WithZ:
+ imm8++;
+ case kInt32x4WithY:
+ imm8++;
+ case kInt32x4WithX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ pinsrd(left_reg, right_reg, imm8);
+ } else {
+ __ sub(esp, Immediate(kInt32x4Size));
+ __ movdqu(Operand(esp, 0), left_reg);
+ __ mov(Operand(esp, imm8 * kFloatSize), right_reg);
+ __ movdqu(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kInt32x4Size));
+ }
+ return;
+ }
+ case kInt32x4WithFlagW:
+ imm8++;
+ case kInt32x4WithFlagZ:
+ imm8++;
+ case kInt32x4WithFlagY:
+ imm8++;
+ case kInt32x4WithFlagX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsTagged());
+ HType type = instr->hydrogen()->right()->type();
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Label load_false_value, done;
+ if (type.IsBoolean()) {
+ __ sub(esp, Immediate(kInt32x4Size));
+ __ movups(Operand(esp, 0), left_reg);
+ __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_value, Label::kNear);
+ } else {
+ Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ // load true value.
+ __ mov(Operand(esp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
+ __ jmp(&done, Label::kNear);
+ __ bind(&load_false_value);
+ __ mov(Operand(esp, imm8 * kFloatSize), Immediate(0x0));
+ __ bind(&done);
+ __ movups(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kInt32x4Size));
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kInt32x4Select: {
+ ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
+
+ XMMRegister mask_reg = ToInt32x4Register(instr->first());
+ XMMRegister left_reg = ToFloat32x4Register(instr->second());
+ XMMRegister right_reg = ToFloat32x4Register(instr->third());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ XMMRegister temp_reg = xmm0;
+
+ // Copy mask.
+ __ movaps(temp_reg, mask_reg);
+ // Invert it.
+ __ notps(temp_reg);
+ // temp_reg = temp_reg & falseValue.
+ __ andps(temp_reg, right_reg);
+
+ if (!result_reg.is(mask_reg)) {
+ if (result_reg.is(left_reg)) {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, mask_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ } else {
+ __ movaps(result_reg, mask_reg);
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ } else {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ return;
+ }
+ case kFloat32x4ShuffleMix: {
+ ASSERT(instr->first()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
+ if (instr->hydrogen()->third()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister first_reg = ToFloat32x4Register(instr->first());
+ XMMRegister second_reg = ToFloat32x4Register(instr->second());
+ __ shufps(first_reg, second_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ }
+ case kFloat32x4Clamp: {
+ ASSERT(instr->first()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
+
+ XMMRegister value_reg = ToFloat32x4Register(instr->first());
+ XMMRegister lower_reg = ToFloat32x4Register(instr->second());
+ XMMRegister upper_reg = ToFloat32x4Register(instr->third());
+ __ minps(value_reg, upper_reg);
+ __ maxps(value_reg, lower_reg);
+ return;
+ }
+ case kFloat64x2Clamp: {
+ ASSERT(instr->first()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->first()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->third()->representation().IsFloat64x2());
+
+ XMMRegister value_reg = ToFloat64x2Register(instr->first());
+ XMMRegister lower_reg = ToFloat64x2Register(instr->second());
+ XMMRegister upper_reg = ToFloat64x2Register(instr->third());
+ __ minpd(value_reg, upper_reg);
+ __ maxpd(value_reg, lower_reg);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Constructor: {
+ ASSERT(instr->hydrogen()->x()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->y()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->z()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->w()->representation().IsDouble());
+ XMMRegister x_reg = ToDoubleRegister(instr->x());
+ XMMRegister y_reg = ToDoubleRegister(instr->y());
+ XMMRegister z_reg = ToDoubleRegister(instr->z());
+ XMMRegister w_reg = ToDoubleRegister(instr->w());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ __ sub(esp, Immediate(kFloat32x4Size));
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, x_reg);
+ __ movss(Operand(esp, 0 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, y_reg);
+ __ movss(Operand(esp, 1 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, z_reg);
+ __ movss(Operand(esp, 2 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, w_reg);
+ __ movss(Operand(esp, 3 * kFloatSize), xmm0);
+ __ movups(result_reg, Operand(esp, 0 * kFloatSize));
+ __ add(esp, Immediate(kFloat32x4Size));
+ return;
+ }
+ case kInt32x4Constructor: {
+ ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
+ ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
+ ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
+ ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
+ Register x_reg = ToRegister(instr->x());
+ Register y_reg = ToRegister(instr->y());
+ Register z_reg = ToRegister(instr->z());
+ Register w_reg = ToRegister(instr->w());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ sub(esp, Immediate(kInt32x4Size));
+ __ mov(Operand(esp, 0 * kInt32Size), x_reg);
+ __ mov(Operand(esp, 1 * kInt32Size), y_reg);
+ __ mov(Operand(esp, 2 * kInt32Size), z_reg);
+ __ mov(Operand(esp, 3 * kInt32Size), w_reg);
+ __ movups(result_reg, Operand(esp, 0 * kInt32Size));
+ __ add(esp, Immediate(kInt32x4Size));
+ return;
+ }
+ case kInt32x4Bool: {
+ ASSERT(instr->hydrogen()->x()->representation().IsTagged());
+ ASSERT(instr->hydrogen()->y()->representation().IsTagged());
+ ASSERT(instr->hydrogen()->z()->representation().IsTagged());
+ ASSERT(instr->hydrogen()->w()->representation().IsTagged());
+ HType x_type = instr->hydrogen()->x()->type();
+ HType y_type = instr->hydrogen()->y()->type();
+ HType z_type = instr->hydrogen()->z()->type();
+ HType w_type = instr->hydrogen()->w()->type();
+ if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
+ !z_type.IsBoolean() || !w_type.IsBoolean()) {
+ Comment(";;; deoptimize: other types for int32x4.bool.");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ Register x_reg = ToRegister(instr->x());
+ Register y_reg = ToRegister(instr->y());
+ Register z_reg = ToRegister(instr->z());
+ Register w_reg = ToRegister(instr->w());
+ Label load_false_x, done_x, load_false_y, done_y,
+ load_false_z, done_z, load_false_w, done_w;
+ __ sub(esp, Immediate(kInt32x4Size));
+
+ __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_x, Label::kNear);
+ __ mov(Operand(esp, 0 * kInt32Size), Immediate(-1));
+ __ jmp(&done_x, Label::kNear);
+ __ bind(&load_false_x);
+ __ mov(Operand(esp, 0 * kInt32Size), Immediate(0x0));
+ __ bind(&done_x);
+
+ __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_y, Label::kNear);
+ __ mov(Operand(esp, 1 * kInt32Size), Immediate(-1));
+ __ jmp(&done_y, Label::kNear);
+ __ bind(&load_false_y);
+ __ mov(Operand(esp, 1 * kInt32Size), Immediate(0x0));
+ __ bind(&done_y);
+
+ __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_z, Label::kNear);
+ __ mov(Operand(esp, 2 * kInt32Size), Immediate(-1));
+ __ jmp(&done_z, Label::kNear);
+ __ bind(&load_false_z);
+ __ mov(Operand(esp, 2 * kInt32Size), Immediate(0x0));
+ __ bind(&done_z);
+
+ __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_w, Label::kNear);
+ __ mov(Operand(esp, 3 * kInt32Size), Immediate(-1));
+ __ jmp(&done_w, Label::kNear);
+ __ bind(&load_false_w);
+ __ mov(Operand(esp, 3 * kInt32Size), Immediate(0x0));
+ __ bind(&done_w);
+
+ __ movups(result_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kInt32x4Size));
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
#undef __
} } // namespace v8::internal
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
+ XMMRegister ToFloat32x4Register(LOperand* op) const;
+ XMMRegister ToFloat64x2Register(LOperand* op) const;
+ XMMRegister ToInt32x4Register(LOperand* op) const;
+ XMMRegister ToSIMD128Register(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+ void DoDeferredFloat32x4ToTagged(LInstruction* instr);
+ void DoDeferredInt32x4ToTagged(LInstruction* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
Register index);
+ void DoDeferredSIMD128ToTagged(LInstruction* instr, Runtime::FunctionId id);
+
+ template<class T>
+ void HandleTaggedToSIMD128(LTaggedToSIMD128* instr);
+ template<class T>
+ void HandleSIMD128ToTagged(LSIMD128ToTagged* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
+ XMMRegister ToFloat32x4Register(int index) const;
+ XMMRegister ToFloat64x2Register(int index) const;
+ XMMRegister ToInt32x4Register(int index) const;
+ XMMRegister ToSIMD128Register(int index) const;
int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void HandleExternalArrayOpRequiresTemp(LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind);
+ template<class T>
+ void DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ template<class T>
+ void DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
__ movsd(xmm0, src);
__ movsd(dst, xmm0);
}
+ } else if (source->IsSIMD128Register()) {
+ XMMRegister src = cgen_->ToSIMD128Register(source);
+ if (destination->IsSIMD128Register()) {
+ __ movaps(cgen_->ToSIMD128Register(destination), src);
+ } else {
+ ASSERT(destination->IsSIMD128StackSlot());
+ __ movups(cgen_->ToOperand(destination), src);
+ }
+ } else if (source->IsSIMD128StackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsSIMD128Register()) {
+ __ movups(cgen_->ToSIMD128Register(destination), src);
+ } else {
+ ASSERT(destination->IsSIMD128StackSlot());
+ __ movups(xmm0, src);
+ __ movups(cgen_->ToOperand(destination), xmm0);
+ }
} else {
UNREACHABLE();
}
__ mov(dst1, tmp);
__ movsd(src0, xmm0);
+ } else if ((source->IsSIMD128StackSlot() &&
+ destination->IsSIMD128StackSlot())) {
+ // Swap two XMM stack slots.
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ Register tmp = EnsureTempRegister();
+ __ movups(xmm0, src);
+ for (int offset = 0; offset < kSIMD128Size; offset += kPointerSize) {
+ __ mov(tmp, Operand(dst, offset));
+ __ mov(Operand(src, offset), tmp);
+ }
+ __ movups(dst, xmm0);
+
+ } else if (source->IsSIMD128Register() && destination->IsSIMD128Register()) {
+ // Swap two XMM registers.
+ XMMRegister source_reg = cgen_->ToSIMD128Register(source);
+ XMMRegister destination_reg = cgen_->ToSIMD128Register(destination);
+ __ movaps(xmm0, source_reg);
+ __ movaps(source_reg, destination_reg);
+ __ movaps(destination_reg, xmm0);
+
+ } else if (source->IsSIMD128Register() || destination->IsSIMD128Register()) {
+ // Swap a xmm register and a xmm stack slot.
+ ASSERT((source->IsSIMD128Register() &&
+ destination->IsSIMD128StackSlot()) ||
+ (source->IsSIMD128StackSlot() &&
+ destination->IsSIMD128Register()));
+ XMMRegister reg = cgen_->ToSIMD128Register(source->IsSIMD128Register()
+ ? source
+ : destination);
+ LOperand* other = source->IsSIMD128Register() ? destination : source;
+ ASSERT(other->IsSIMD128StackSlot());
+ Operand other_operand = cgen_->ToOperand(other);
+ __ movups(xmm0, other_operand);
+ __ movups(other_operand, reg);
+ __ movaps(reg, xmm0);
+
} else {
// No other combinations are possible.
UNREACHABLE();
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) {
- spill_slot_count_++;
- spill_slot_count_ |= 1;
- num_double_slots_++;
+ switch (kind) {
+ case GENERAL_REGISTERS: return spill_slot_count_++;
+ case DOUBLE_REGISTERS: {
+ // Skip a slot if for a double-width slot.
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ num_double_slots_++;
+ return spill_slot_count_++;
+ }
+ case FLOAT32x4_REGISTERS:
+ case FLOAT64x2_REGISTERS:
+ case INT32x4_REGISTERS: {
+ // Skip three slots if for a quad-width slot.
+ spill_slot_count_ += 3;
+ num_double_slots_ += 2; // for dynamic frame alignment
+ return spill_slot_count_++;
+ }
+ default:
+ UNREACHABLE();
+ return -1;
}
- return spill_slot_count_++;
}
LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- ASSERT(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
+ switch (kind) {
+ case GENERAL_REGISTERS: return LStackSlot::Create(index, zone());
+ case DOUBLE_REGISTERS: return LDoubleStackSlot::Create(index, zone());
+ case FLOAT32x4_REGISTERS: return LFloat32x4StackSlot::Create(index, zone());
+ case FLOAT64x2_REGISTERS: return LFloat64x2StackSlot::Create(index, zone());
+ case INT32x4_REGISTERS: return LInt32x4StackSlot::Create(index, zone());
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+
LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
if (!easy_case &&
DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
if (!val->representation().IsSmi()) result = AssignEnvironment(result);
return result;
+ } else if (to.IsSIMD128()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LTaggedToSIMD128* res = new(zone()) LTaggedToSIMD128(value, temp, to);
+ return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
}
}
+ } else if (from.IsSIMD128()) {
+ ASSERT(to.IsTagged());
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ // Make sure that temp and result_temp are different registers.
+ LUnallocated* result_temp = TempRegister();
+ LSIMD128ToTagged* result = new(zone()) LSIMD128ToTagged(value, temp, temp2);
+ return AssignPointerMap(Define(result, result_temp));
}
UNREACHABLE();
return NULL;
(instr->representation().IsInteger32() &&
!(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
(instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ (IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->representation().IsFloat32x4()
+ : instr->representation().IsTagged() &&
+ (IsFloat32x4ElementsKind(instr->elements_kind()))) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->representation().IsFloat64x2()
+ : instr->representation().IsTagged() &&
+ (IsFloat64x2ElementsKind(instr->elements_kind()))) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->representation().IsInt32x4()
+ : instr->representation().IsTagged() &&
+ (IsInt32x4ElementsKind(instr->elements_kind()))));
LOperand* backing_store = UseRegister(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
(instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
+ IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->value()->representation().IsFloat32x4()
+ : instr->value()->representation().IsTagged() &&
+ IsFloat32x4ElementsKind(elements_kind)) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->value()->representation().IsFloat64x2()
+ : instr->value()->representation().IsTagged() &&
+ IsFloat64x2ElementsKind(elements_kind)) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->value()->representation().IsInt32x4()
+ : instr->value()->representation().IsTagged() &&
+ IsInt32x4ElementsKind(elements_kind)));
ASSERT((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
}
+const char* LNullarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoNullarySIMDOperation(
+ HNullarySIMDOperation* instr) {
+ LNullarySIMDOperation* result =
+ new(zone()) LNullarySIMDOperation(instr->op());
+ switch (instr->op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name:
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LUnarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+ case kSIMD128Change: return "SIMD128-change";
+#define SIMD_UNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnarySIMDOperation(HUnarySIMDOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LUnarySIMDOperation* result =
+ new(zone()) LUnarySIMDOperation(input, instr->op());
+ switch (instr->op()) {
+ case kSIMD128Change:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kFloat32x4Abs:
+ case kFloat32x4Neg:
+ case kFloat32x4Reciprocal:
+ case kFloat32x4ReciprocalSqrt:
+ case kFloat32x4Sqrt:
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt:
+ case kInt32x4Neg:
+ case kInt32x4Not:
+ return DefineSameAsFirst(result);
+ case kFloat32x4BitsToInt32x4:
+ case kFloat32x4ToInt32x4:
+ case kInt32x4BitsToFloat32x4:
+ case kInt32x4ToFloat32x4:
+ case kFloat32x4Splat:
+ case kInt32x4Splat:
+ case kFloat32x4GetSignMask:
+ case kFloat32x4GetX:
+ case kFloat32x4GetY:
+ case kFloat32x4GetZ:
+ case kFloat32x4GetW:
+ case kFloat64x2GetSignMask:
+ case kFloat64x2GetX:
+ case kFloat64x2GetY:
+ case kInt32x4GetSignMask:
+ case kInt32x4GetX:
+ case kInt32x4GetY:
+ case kInt32x4GetZ:
+ case kInt32x4GetW:
+ case kInt32x4GetFlagX:
+ case kInt32x4GetFlagY:
+ case kInt32x4GetFlagZ:
+ case kInt32x4GetFlagW:
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LBinarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBinarySIMDOperation(
+ HBinarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ case kFloat32x4Div:
+ case kFloat32x4Max:
+ case kFloat32x4Min:
+ case kFloat32x4Mul:
+ case kFloat32x4Sub:
+ case kFloat32x4Scale:
+ case kFloat32x4WithX:
+ case kFloat32x4WithY:
+ case kFloat32x4WithZ:
+ case kFloat32x4WithW:
+ case kFloat64x2Add:
+ case kFloat64x2Div:
+ case kFloat64x2Max:
+ case kFloat64x2Min:
+ case kFloat64x2Mul:
+ case kFloat64x2Sub:
+ case kFloat64x2Scale:
+ case kFloat64x2WithX:
+ case kFloat64x2WithY:
+ case kInt32x4Add:
+ case kInt32x4And:
+ case kInt32x4Mul:
+ case kInt32x4Or:
+ case kInt32x4Sub:
+ case kInt32x4Xor:
+ case kInt32x4WithX:
+ case kInt32x4WithY:
+ case kInt32x4WithZ:
+ case kInt32x4WithW:
+ case kInt32x4WithFlagX:
+ case kInt32x4WithFlagY:
+ case kInt32x4WithFlagZ:
+ case kInt32x4WithFlagW:
+ case kInt32x4GreaterThan:
+ case kInt32x4Equal:
+ case kInt32x4LessThan: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ if (instr->op() == kInt32x4WithFlagX ||
+ instr->op() == kInt32x4WithFlagY ||
+ instr->op() == kInt32x4WithFlagZ ||
+ instr->op() == kInt32x4WithFlagW) {
+ return AssignEnvironment(DefineSameAsFirst(result));
+ } else {
+ return DefineSameAsFirst(result);
+ }
+ }
+ case kFloat64x2Constructor: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return DefineAsRegister(result);
+ }
+ case kFloat32x4Shuffle:
+ case kInt32x4Shuffle:
+ case kInt32x4ShiftLeft:
+ case kInt32x4ShiftRight:
+ case kInt32x4ShiftRightArithmetic: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstant(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+ case kFloat32x4LessThan:
+ case kFloat32x4LessThanOrEqual:
+ case kFloat32x4Equal:
+ case kFloat32x4NotEqual:
+ case kFloat32x4GreaterThanOrEqual:
+ case kFloat32x4GreaterThan: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return DefineAsRegister(result);
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LTernarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6, \
+ p7) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTernarySIMDOperation(
+ HTernarySIMDOperation* instr) {
+ LOperand* first = UseRegisterAtStart(instr->first());
+ LOperand* second = UseRegisterAtStart(instr->second());
+ LOperand* third = instr->op() == kFloat32x4ShuffleMix
+ ? UseOrConstant(instr->third())
+ : UseRegisterAtStart(instr->third());
+ LTernarySIMDOperation* result =
+ new(zone()) LTernarySIMDOperation(first, second, third, instr->op());
+ switch (instr->op()) {
+ case kInt32x4Select: {
+ return DefineAsRegister(result);
+ }
+ case kFloat32x4ShuffleMix: {
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+ case kFloat32x4Clamp:
+ case kFloat64x2Clamp: {
+ return DefineSameAsFirst(result);
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LQuarternarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, \
+ p6, p7, p8) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoQuarternarySIMDOperation(
+ HQuarternarySIMDOperation* instr) {
+ LOperand* x = UseRegisterAtStart(instr->x());
+ LOperand* y = UseRegisterAtStart(instr->y());
+ LOperand* z = UseRegisterAtStart(instr->z());
+ LOperand* w = UseRegisterAtStart(instr->w());
+ LQuarternarySIMDOperation* result =
+ new(zone()) LQuarternarySIMDOperation(x, y, z, w, instr->op());
+ if (instr->op() == kInt32x4Bool) {
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ return DefineAsRegister(result);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
V(MathSqrt) \
V(ModByConstI) \
V(ModByPowerOf2I) \
+ V(NullarySIMDOperation) \
+ V(UnarySIMDOperation) \
+ V(BinarySIMDOperation) \
+ V(TernarySIMDOperation) \
+ V(QuarternarySIMDOperation) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
+ V(SIMD128ToTagged) \
+ V(TaggedToSIMD128) \
V(OsrEntry) \
V(Parameter) \
V(Power) \
};
+class LNullarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LNullarySIMDOperation(BuiltinFunctionId op)
+ : op_(op) {
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kNullarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LNullarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsNullarySIMDOperation());
+ return reinterpret_cast<LNullarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(NullarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LUnarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LUnarySIMDOperation(LOperand* value, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kUnarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LUnarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsUnarySIMDOperation());
+ return reinterpret_cast<LUnarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(UnarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LBinarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBinarySIMDOperation(LOperand* left, LOperand* right, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kBinarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LBinarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsBinarySIMDOperation());
+ return reinterpret_cast<LBinarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(BinarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LTernarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LTernarySIMDOperation(LOperand* first, LOperand* second, LOperand* third,
+ BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = first;
+ inputs_[1] = second;
+ inputs_[2] = third;
+ }
+
+ LOperand* first() { return inputs_[0]; }
+ LOperand* second() { return inputs_[1]; }
+ LOperand* third() { return inputs_[2]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kTernarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LTernarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsTernarySIMDOperation());
+ return reinterpret_cast<LTernarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(TernarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LQuarternarySIMDOperation V8_FINAL
+ : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LQuarternarySIMDOperation(LOperand* x, LOperand* y, LOperand* z,
+ LOperand* w, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = x;
+ inputs_[1] = y;
+ inputs_[2] = z;
+ inputs_[3] = w;
+ }
+
+ LOperand* x() { return inputs_[0]; }
+ LOperand* y() { return inputs_[1]; }
+ LOperand* z() { return inputs_[2]; }
+ LOperand* w() { return inputs_[3]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kQuarternarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LQuarternarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsQuarternarySIMDOperation());
+ return reinterpret_cast<LQuarternarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(QuarternarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
};
+inline static bool ExternalArrayOpRequiresPreScale(
+ Representation key_representation,
+ ElementsKind kind) {
+ int shift_size = ElementsKindToShiftSize(kind);
+ return key_representation.IsSmi()
+ ? shift_size > static_cast<int>(maximal_scale_factor) + kSmiTagSize
+ : shift_size > static_cast<int>(maximal_scale_factor);
+}
+
+
inline static bool ExternalArrayOpRequiresTemp(
Representation key_representation,
ElementsKind elements_kind) {
- // Operations that require the key to be divided by two to be converted into
- // an index cannot fold the scale operation into a load and need an extra
- // temp register to do the work.
- return key_representation.IsSmi() &&
- (elements_kind == EXTERNAL_INT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
- elements_kind == UINT8_ELEMENTS ||
- elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
+ // Operations that require the key to be scaled by a factor or divided by two
+ // to be converted into an index cannot fold the scale operation into a load
+ // and need an extra temp register to do the work.
+ return ExternalArrayOpRequiresPreScale(key_representation, elements_kind) ||
+ (key_representation.IsSmi() &&
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS));
}
};
+class LSIMD128ToTagged V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LSIMD128ToTagged(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SIMD128ToTagged, "simd128-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
};
+class LTaggedToSIMD128 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LTaggedToSIMD128(LOperand* value, LOperand* temp,
+ Representation representation)
+ : representation_(representation) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Representation representation() const { return representation_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToSIMD128, "simd128-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
+ private:
+ Representation representation_;
+};
+
+
class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumRegisters * kSIMD128Size +
argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movups(Operand(ebp, offset - ((i + 1) * kSIMD128Size)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movups(reg, Operand(ebp, offset - ((i + 1) * kSIMD128Size)));
}
}
}
+#define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
+ V(Float32x4, float32x4) \
+ V(Float64x2, float64x2) \
+ V(Int32x4, int32x4)
+
+#define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
+void MacroAssembler::Allocate##TYPE(Register result, \
+ Register scratch1, \
+ Register scratch2, \
+ Label* gc_required) { \
+ /* Allocate SIMD128 object */ \
+ Allocate(TYPE::kSize, result, scratch1, no_reg, gc_required, TAG_OBJECT);\
+ \
+ mov(FieldOperand(result, JSObject::kMapOffset), \
+ Immediate(reinterpret_cast<intptr_t>( \
+ isolate()->native_context()->type##_function()->initial_map())));\
+ mov(FieldOperand(result, JSObject::kPropertiesOffset), \
+ Immediate(isolate()->factory()->empty_fixed_array())); \
+ mov(FieldOperand(result, JSObject::kElementsOffset), \
+ Immediate(isolate()->factory()->empty_fixed_array())); \
+ /* Allocate FixedTypedArray object */ \
+ Allocate(FixedTypedArrayBase::kDataOffset + k##TYPE##Size, \
+ scratch1, scratch2, no_reg, gc_required, TAG_OBJECT); \
+ \
+ mov(FieldOperand(scratch1, FixedTypedArrayBase::kMapOffset), \
+ Immediate(isolate()->factory()->fixed_##type##_array_map())); \
+ mov(scratch2, Immediate(1)); \
+ SmiTag(scratch2); \
+ mov(FieldOperand(scratch1, FixedTypedArrayBase::kLengthOffset), \
+ scratch2); \
+ /* Assign TifxedTypedArray object to SIMD128 object */ \
+ mov(FieldOperand(result, TYPE::kValueOffset), scratch1); \
+}
+
+SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
+
+
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
}
+void MacroAssembler::absps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_absolute_constant =
+ { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
+ andps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_absolute_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::abspd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } double_absolute_constant =
+ { 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF };
+ andps(dst,
+ Operand(reinterpret_cast<int32_t>(&double_absolute_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::notps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_not_constant =
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+ xorps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_not_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::negateps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_negate_constant =
+ { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
+ xorps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_negate_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::negatepd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } double_negate_constant =
+ { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
+ xorpd(dst,
+ Operand(reinterpret_cast<int32_t>(&double_negate_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::pnegd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
+ notps(dst);
+ paddd(dst,
+ Operand(reinterpret_cast<int32_t>(&int32_one_constant),
+ RelocInfo::NONE32));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
Register scratch2,
Label* gc_required);
+ // Allocate a float32x4, float64x2 and int32x4 object in new space with
+ // undefined value.
+ // Returns tagged pointer in result register, or jumps to gc_required if new
+ // space is full.
+ void AllocateFloat32x4(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ void AllocateInt32x4(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ void AllocateFloat64x2(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// Allocate a sequential string. All the header fields of the string object
// are initialized.
void AllocateTwoByteString(Register result,
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
+ // SIMD macros.
+ void absps(XMMRegister dst);
+ void abspd(XMMRegister dst);
+ void negateps(XMMRegister dst);
+ void negatepd(XMMRegister dst);
+ void notps(XMMRegister dst);
+ void pnegd(XMMRegister dst);
+
+ // ---------------------------------------------------------------------------
// String utilities.
// Generate code to do a lookup in the number string cache. If the number in
void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
- if (range->Kind() == DOUBLE_REGISTERS) {
+ if (range->Kind() == DOUBLE_REGISTERS ||
+ IsSIMD128RegisterKind(range->Kind())) {
assigned_double_registers_->Add(reg);
} else {
ASSERT(range->Kind() == GENERAL_REGISTERS);
case DOUBLE_REGISTERS:
op = LDoubleRegister::Create(assigned_register(), zone);
break;
+ case FLOAT32x4_REGISTERS:
+ op = LFloat32x4Register::Create(assigned_register(), zone);
+ break;
+ case FLOAT64x2_REGISTERS:
+ op = LFloat64x2Register::Create(assigned_register(), zone);
+ break;
+ case INT32x4_REGISTERS:
+ op = LInt32x4Register::Create(assigned_register(), zone);
+ break;
default:
UNREACHABLE();
}
if (use_pos->HasOperand()) {
ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
- !use_pos->RequiresRegister());
+ op->IsSIMD128Register() || !use_pos->RequiresRegister());
use_pos->operand()->ConvertTo(op->kind(), op->index());
}
use_pos = use_pos->next();
active_live_ranges_(8, zone()),
inactive_live_ranges_(8, zone()),
reusable_slots_(8, zone()),
+ reusable_simd128_slots_(8, zone()),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
mode_(UNALLOCATED_REGISTERS),
double_artificial_registers_.Add(
cur_input->virtual_register() - first_artificial_register_,
zone());
+ } else if (RequiredRegisterKind(input_copy->virtual_register()) ==
+ FLOAT32x4_REGISTERS) {
+ float32x4_artificial_registers_.Add(
+ cur_input->virtual_register() - first_artificial_register_,
+ zone());
+ } else if (RequiredRegisterKind(input_copy->virtual_register()) ==
+ FLOAT64x2_REGISTERS) {
+ float64x2_artificial_registers_.Add(
+ cur_input->virtual_register() - first_artificial_register_,
+ zone());
+ } else if (RequiredRegisterKind(input_copy->virtual_register()) ==
+ INT32x4_REGISTERS) {
+ int32x4_artificial_registers_.Add(
+ cur_input->virtual_register() - first_artificial_register_,
+ zone());
}
AddConstraintsGapMove(gap_index, input_copy, cur_input);
if (branch->HasPointerMap()) {
if (HasTaggedValue(range->id())) {
branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
- } else if (!cur_op->IsDoubleStackSlot() &&
- !cur_op->IsDoubleRegister()) {
+ } else if (!cur_op->IsDoubleStackSlot() &&
+ !cur_op->IsDoubleRegister() &&
+ !cur_op->IsSIMD128StackSlot() &&
+ !cur_op->IsSIMD128Register()) {
branch->pointer_map()->RemovePointer(cur_op);
}
}
if (live_ranges_[i] != NULL) {
if (live_ranges_[i]->Kind() == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
+ } else if (mode_ == DOUBLE_REGISTERS &&
+ IsSIMD128RegisterKind(live_ranges_[i]->Kind())) {
+ AddToUnhandledUnsorted(live_ranges_[i]);
}
}
}
ASSERT(UnhandledIsSorted());
ASSERT(reusable_slots_.is_empty());
+ ASSERT(reusable_simd128_slots_.is_empty());
ASSERT(active_live_ranges_.is_empty());
ASSERT(inactive_live_ranges_.is_empty());
}
reusable_slots_.Rewind(0);
+ reusable_simd128_slots_.Rewind(0);
active_live_ranges_.Rewind(0);
inactive_live_ranges_.Rewind(0);
}
HValue* value = graph_->LookupValue(virtual_register);
if (value != NULL && value->representation().IsDouble()) {
return DOUBLE_REGISTERS;
+ } else if (value != NULL && (value->representation().IsFloat32x4())) {
+ return FLOAT32x4_REGISTERS;
+ } else if (value != NULL && (value->representation().IsFloat64x2())) {
+ return FLOAT64x2_REGISTERS;
+ } else if (value != NULL && (value->representation().IsInt32x4())) {
+ return INT32x4_REGISTERS;
}
} else if (double_artificial_registers_.Contains(
virtual_register - first_artificial_register_)) {
return DOUBLE_REGISTERS;
+ } else if (float32x4_artificial_registers_.Contains(
+ virtual_register - first_artificial_register_)) {
+ return FLOAT32x4_REGISTERS;
+ } else if (float64x2_artificial_registers_.Contains(
+ virtual_register - first_artificial_register_)) {
+ return FLOAT64x2_REGISTERS;
+ } else if (int32x4_artificial_registers_.Contains(
+ virtual_register - first_artificial_register_)) {
+ return INT32x4_REGISTERS;
}
return GENERAL_REGISTERS;
int index = range->TopLevel()->GetSpillOperand()->index();
if (index >= 0) {
- reusable_slots_.Add(range, zone());
+ if (IsSIMD128RegisterKind(range->Kind())) {
+ reusable_simd128_slots_.Add(range, zone());
+ } else {
+ reusable_slots_.Add(range, zone());
+ }
}
}
LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
- if (reusable_slots_.is_empty()) return NULL;
- if (reusable_slots_.first()->End().Value() >
+ ZoneList<LiveRange*>* reusable_slots = IsSIMD128RegisterKind(range->Kind())
+ ? &reusable_simd128_slots_
+ : &reusable_slots_;
+ if (reusable_slots->is_empty()) return NULL;
+ if (reusable_slots->first()->End().Value() >
range->TopLevel()->Start().Value()) {
return NULL;
}
- LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
- reusable_slots_.Remove(0);
+ LOperand* result = reusable_slots->first()->TopLevel()->GetSpillOperand();
+ reusable_slots->Remove(0);
return result;
}
}
LOperand* hint = current->FirstHint();
- if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+ if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister() ||
+ hint->IsSIMD128Register())) {
int register_index = hint->index();
TraceAlloc(
"Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind());
+ if (op == NULL) {
+ op = chunk_->GetNextSpillSlot(range->Kind());
+ } else if (range->Kind() == FLOAT32x4_REGISTERS &&
+ op->kind() != LOperand::FLOAT32x4_STACK_SLOT) {
+ // Convert to Float32x4StackSlot.
+ op = LFloat32x4StackSlot::Create(op->index(), zone());
+ } else if (range->Kind() == FLOAT64x2_REGISTERS &&
+ op->kind() != LOperand::FLOAT64x2_STACK_SLOT) {
+ // Convert to Float64x2StackSlot.
+ op = LFloat64x2StackSlot::Create(op->index(), zone());
+ } else if (range->Kind() == INT32x4_REGISTERS &&
+ op->kind() != LOperand::INT32x4_STACK_SLOT) {
+ // Convert to Int32x4StackSlot.
+ op = LInt32x4StackSlot::Create(op->index(), zone());
+ }
first->SetSpillOperand(op);
}
range->MakeSpilled(chunk()->zone());
enum RegisterKind {
UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
- DOUBLE_REGISTERS
+ DOUBLE_REGISTERS,
+ FLOAT32x4_REGISTERS,
+ FLOAT64x2_REGISTERS,
+ INT32x4_REGISTERS
};
+inline bool IsSIMD128RegisterKind(RegisterKind kind) {
+ return kind == FLOAT32x4_REGISTERS || kind == FLOAT64x2_REGISTERS ||
+ kind == INT32x4_REGISTERS;
+}
+
+
// A register-allocator view of a Lithium instruction. It contains the id of
// the output operand and a list of input operand uses.
ZoneList<LiveRange*> active_live_ranges_;
ZoneList<LiveRange*> inactive_live_ranges_;
ZoneList<LiveRange*> reusable_slots_;
+ // Slots reusable for float32x4, float64x2 and int32x4 register spilling.
+ ZoneList<LiveRange*> reusable_simd128_slots_;
// Next virtual register number to be assigned to temporaries.
int next_virtual_register_;
int first_artificial_register_;
GrowableBitVector double_artificial_registers_;
+ GrowableBitVector float32x4_artificial_registers_;
+ GrowableBitVector float64x2_artificial_registers_;
+ GrowableBitVector int32x4_artificial_registers_;
RegisterKind mode_;
int num_registers_;
case DOUBLE_STACK_SLOT:
stream->Add("[double_stack:%d]", index());
break;
+ case FLOAT32x4_STACK_SLOT:
+ stream->Add("[float32x4_stack:%d]", index());
+ break;
+ case FLOAT64x2_STACK_SLOT:
+ stream->Add("[float64x2_stack:%d]", index());
+ break;
+ case INT32x4_STACK_SLOT:
+ stream->Add("[int32x4_stack:%d]", index());
+ break;
case REGISTER:
stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
break;
case DOUBLE_REGISTER:
stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
break;
+ case FLOAT32x4_REGISTER:
+ stream->Add("[%s|R]",
+ SIMD128Register::AllocationIndexToString(index()));
+ break;
+ case FLOAT64x2_REGISTER:
+ stream->Add("[%s|R]",
+ SIMD128Register::AllocationIndexToString(index()));
+ break;
+ case INT32x4_REGISTER:
+ stream->Add("[%s|R]",
+ SIMD128Register::AllocationIndexToString(index()));
+ break;
}
}
void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
+ !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
+ !op->IsFloat64x2Register() && !op->IsFloat64x2StackSlot() &&
+ !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
pointer_operands_.Add(op, zone);
}
void LPointerMap::RemovePointer(LOperand* op) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
+ !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
+ !op->IsFloat64x2Register() && !op->IsFloat64x2StackSlot() &&
+ !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
for (int i = 0; i < pointer_operands_.length(); ++i) {
if (pointer_operands_[i]->Equals(op)) {
pointer_operands_.Remove(i);
void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
+ !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
+ !op->IsFloat64x2Register() && !op->IsFloat64x2StackSlot() &&
+ !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
untagged_operands_.Add(op, zone);
}
namespace v8 {
namespace internal {
-#define LITHIUM_OPERAND_LIST(V) \
- V(ConstantOperand, CONSTANT_OPERAND, 128) \
- V(StackSlot, STACK_SLOT, 128) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
- V(Register, REGISTER, 16) \
- V(DoubleRegister, DOUBLE_REGISTER, 16)
+#define LITHIUM_OPERAND_LIST(V) \
+ V(ConstantOperand, CONSTANT_OPERAND, 128) \
+ V(StackSlot, STACK_SLOT, 128) \
+ V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
+ V(Float32x4StackSlot, FLOAT32x4_STACK_SLOT, 128) \
+ V(Float64x2StackSlot, FLOAT64x2_STACK_SLOT, 128) \
+ V(Int32x4StackSlot, INT32x4_STACK_SLOT, 128) \
+ V(Register, REGISTER, 16) \
+ V(DoubleRegister, DOUBLE_REGISTER, 16) \
+ V(Float32x4Register, FLOAT32x4_REGISTER, 16) \
+ V(Float64x2Register, FLOAT64x2_REGISTER, 16) \
+ V(Int32x4Register, INT32x4_REGISTER, 16)
class LOperand : public ZoneObject {
CONSTANT_OPERAND,
STACK_SLOT,
DOUBLE_STACK_SLOT,
+ FLOAT32x4_STACK_SLOT,
+ FLOAT64x2_STACK_SLOT,
+ INT32x4_STACK_SLOT,
REGISTER,
- DOUBLE_REGISTER
+ DOUBLE_REGISTER,
+ FLOAT32x4_REGISTER,
+ FLOAT64x2_REGISTER,
+ INT32x4_REGISTER
};
LOperand() : value_(KindField::encode(INVALID)) { }
LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
#undef LITHIUM_OPERAND_PREDICATE
- bool Equals(LOperand* other) const { return value_ == other->value_; }
+ bool IsSIMD128Register() const {
+ return kind() == FLOAT32x4_REGISTER || kind() == FLOAT64x2_REGISTER ||
+ kind() == INT32x4_REGISTER;
+ }
+ bool IsSIMD128StackSlot() const {
+ return kind() == FLOAT32x4_STACK_SLOT || kind() == FLOAT64x2_STACK_SLOT ||
+ kind() == INT32x4_STACK_SLOT;
+ }
+ bool Equals(LOperand* other) const {
+ return value_ == other->value_ || (index() == other->index() &&
+ ((IsSIMD128Register() && other->IsSIMD128Register()) ||
+ (IsSIMD128StackSlot() && other->IsSIMD128StackSlot())));
+ }
void PrintTo(StringStream* stream);
void ConvertTo(Kind kind, int index) {
static void TearDownCaches();
protected:
- static const int kKindFieldWidth = 3;
+ static const int kKindFieldWidth = 4;
class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
LOperand(Kind kind, int index) { ConvertTo(kind, index); }
// because it accommodates a larger pay-load.
//
// For FIXED_SLOT policy:
- // +------------------------------------------+
- // | slot_index | vreg | 0 | 001 |
- // +------------------------------------------+
+ // +-------------------------------------------+
+ // | slot_index | vreg | 0 | 0001 |
+ // +-------------------------------------------+
//
// For all other (extended) policies:
- // +------------------------------------------+
- // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
- // +------------------------------------------+ P ... Policy
+ // +-------------------------------------------+
+ // | reg_index | L | PPP | vreg | 1 | 0001 | L ... Lifetime
+ // +-------------------------------------------+ P ... Policy
//
// The slot index is a signed value which requires us to decode it manually
// instead of using the BitField utility class.
// The superclass has a KindField.
- STATIC_ASSERT(kKindFieldWidth == 3);
+ STATIC_ASSERT(kKindFieldWidth == 4);
// BitFields for all unallocated operands.
- class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
- class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+ class BasicPolicyField : public BitField<BasicPolicy, 4, 1> {};
+ class VirtualRegisterField : public BitField<unsigned, 5, 18> {};
// BitFields specific to BasicPolicy::FIXED_SLOT.
- class FixedSlotIndexField : public BitField<int, 22, 10> {};
+ class FixedSlotIndexField : public BitField<int, 23, 9> {};
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
- class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
- class LifetimeField : public BitField<Lifetime, 25, 1> {};
- class FixedRegisterField : public BitField<int, 26, 6> {};
+ class ExtendedPolicyField : public BitField<ExtendedPolicy, 23, 3> {};
+ class LifetimeField : public BitField<Lifetime, 26, 1> {};
+ class FixedRegisterField : public BitField<int, 27, 5> {};
static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
+macro IsFloat32x4(arg) = (%_ClassOf(arg) === 'float32x4');
+macro IsFloat64x2(arg) = (%_ClassOf(arg) === 'float64x2');
+macro IsInt32x4(arg) = (%_ClassOf(arg) === 'int32x4');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
case JS_ARRAY_TYPE:
case JS_DATE_TYPE:
case JS_OBJECT_TYPE:
+ case FLOAT32x4_TYPE:
+ case FLOAT64x2_TYPE:
+ case INT32x4_TYPE:
case JS_REGEXP_TYPE:
VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
break;
case HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberVerify();
break;
+ case FLOAT32x4_TYPE:
+ Float32x4::cast(this)->Float32x4Verify();
+ break;
+ case FLOAT64x2_TYPE:
+ Float64x2::cast(this)->Float64x2Verify();
+ break;
+ case INT32x4_TYPE:
+ Int32x4::cast(this)->Int32x4Verify();
+ break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
}
+void Float32x4::Float32x4Verify() {
+ CHECK(IsFloat32x4());
+}
+
+
+void Float64x2::Float64x2Verify() {
+ CHECK(IsFloat64x2());
+}
+
+
+void Int32x4::Int32x4Verify() {
+ CHECK(IsInt32x4());
+}
+
+
void ByteArray::ByteArrayVerify() {
CHECK(IsByteArray());
}
TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
+TYPE_CHECKER(Float32x4, FLOAT32x4_TYPE)
+TYPE_CHECKER(Float64x2, FLOAT64x2_TYPE)
+TYPE_CHECKER(Int32x4, INT32x4_TYPE)
bool Object::IsJSArrayBufferView() {
return IsJSDataView() || IsJSTypedArray();
write_double_field(p, offset, value)
#endif // V8_TARGET_ARCH_MIPS
+#define READ_FLOAT32x4_FIELD(p, offset) \
+ (*reinterpret_cast<float32x4_value_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_FLOAT32x4_FIELD(p, offset, value) \
+ (*reinterpret_cast<float32x4_value_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_FLOAT64x2_FIELD(p, offset) \
+ (*reinterpret_cast<float64x2_value_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_FLOAT64x2_FIELD(p, offset, value) \
+ (*reinterpret_cast<float64x2_value_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT32x4_FIELD(p, offset) \
+ (*reinterpret_cast<int32x4_value_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT32x4_FIELD(p, offset, value) \
+ (*reinterpret_cast<int32x4_value_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_FLOAT_FIELD(p, offset) \
+ (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_FLOAT_FIELD(p, offset, value) \
+ (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT_FIELD(p, offset) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
}
+ACCESSORS(Float32x4, value, Object, kValueOffset)
+ACCESSORS(Float64x2, value, Object, kValueOffset)
+ACCESSORS(Int32x4, value, Object, kValueOffset)
+
+
+const char* Float32x4::Name() {
+ return "float32x4";
+}
+
+
+int Float32x4::kRuntimeAllocatorId() {
+ return Runtime::kAllocateFloat32x4;
+}
+
+
+float Float32x4::getAt(int index) {
+ ASSERT(index >= 0 && index < kLanes);
+ return get().storage[index];
+}
+
+
+float32x4_value_t Float32x4::get() {
+ return FixedFloat32x4Array::cast(value())->get_scalar(0);
+}
+
+
+void Float32x4::set(float32x4_value_t f32x4) {
+ FixedFloat32x4Array::cast(value())->set(0, f32x4);
+}
+
+
+const char* Float64x2::Name() {
+ return "float64x2";
+}
+
+
+int Float64x2::kRuntimeAllocatorId() {
+ return Runtime::kAllocateFloat64x2;
+}
+
+
+double Float64x2::getAt(int index) {
+ ASSERT(index >= 0 && index < kLanes);
+ return get().storage[index];
+}
+
+float64x2_value_t Float64x2::get() {
+ return FixedFloat64x2Array::cast(value())->get_scalar(0);
+}
+
+
+void Float64x2::set(float64x2_value_t f64x2) {
+ FixedFloat64x2Array::cast(value())->set(0, f64x2);
+}
+
+
+const char* Int32x4::Name() {
+ return "int32x4";
+}
+
+
+int Int32x4::kRuntimeAllocatorId() {
+ return Runtime::kAllocateInt32x4;
+}
+
+
+int32_t Int32x4::getAt(int index) {
+ ASSERT(index >= 0 && index < kLanes);
+ return get().storage[index];;
+}
+
+
+int32x4_value_t Int32x4::get() {
+ return FixedInt32x4Array::cast(value())->get_scalar(0);
+}
+
+
+void Int32x4::set(int32x4_value_t i32x4) {
+ FixedInt32x4Array::cast(value())->set(0, i32x4);
+}
+
+
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
return JSTypedArray::kSize;
case JS_DATA_VIEW_TYPE:
return JSDataView::kSize;
+ case FLOAT32x4_TYPE:
+ return Float32x4::kSize;
+ case FLOAT64x2_TYPE:
+ return Float64x2::kSize;
+ case INT32x4_TYPE:
+ return Int32x4::kSize;
case JS_SET_TYPE:
return JSSet::kSize;
case JS_MAP_TYPE:
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(HeapNumber)
+CAST_ACCESSOR(Float32x4)
+CAST_ACCESSOR(Float64x2)
+CAST_ACCESSOR(Int32x4)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(ExternalInt16Array)
CAST_ACCESSOR(ExternalUint16Array)
CAST_ACCESSOR(ExternalInt32Array)
+CAST_ACCESSOR(ExternalInt32x4Array)
CAST_ACCESSOR(ExternalUint32Array)
CAST_ACCESSOR(ExternalFloat32Array)
+CAST_ACCESSOR(ExternalFloat32x4Array)
CAST_ACCESSOR(ExternalFloat64Array)
+CAST_ACCESSOR(ExternalFloat64x2Array)
CAST_ACCESSOR(ExternalUint8ClampedArray)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(AccessorInfo)
}
+float32x4_value_t ExternalFloat32x4Array::get_scalar(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ float* ptr = static_cast<float*>(external_pointer());
+ float32x4_value_t value;
+ value.storage[0] = ptr[index * 4 + 0];
+ value.storage[1] = ptr[index * 4 + 1];
+ value.storage[2] = ptr[index * 4 + 2];
+ value.storage[3] = ptr[index * 4 + 3];
+ return value;
+}
+
+
+Handle<Object> ExternalFloat32x4Array::get(Handle<ExternalFloat32x4Array> array,
+ int index) {
+ float32x4_value_t value = array->get_scalar(index);
+ return array->GetIsolate()->factory()->NewFloat32x4(value);
+}
+
+
+void ExternalFloat32x4Array::set(int index, const float32x4_value_t& value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ float* ptr = static_cast<float*>(external_pointer());
+ ptr[index * 4 + 0] = value.storage[0];
+ ptr[index * 4 + 1] = value.storage[1];
+ ptr[index * 4 + 2] = value.storage[2];
+ ptr[index * 4 + 3] = value.storage[3];
+}
+
+
+float64x2_value_t ExternalFloat64x2Array::get_scalar(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ double* ptr = static_cast<double*>(external_pointer());
+ float64x2_value_t value;
+ value.storage[0] = ptr[index * 2 + 0];
+ value.storage[1] = ptr[index * 2 + 1];
+ return value;
+}
+
+
+Handle<Object> ExternalFloat64x2Array::get(Handle<ExternalFloat64x2Array> array,
+ int index) {
+ float64x2_value_t value = array->get_scalar(index);
+ return array->GetIsolate()->factory()->NewFloat64x2(value);
+}
+
+
+void ExternalFloat64x2Array::set(int index, const float64x2_value_t& value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ double* ptr = static_cast<double*>(external_pointer());
+ ptr[index * 2 + 0] = value.storage[0];
+ ptr[index * 2 + 1] = value.storage[1];
+}
+
+
+int32x4_value_t ExternalInt32x4Array::get_scalar(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ int32_t* ptr = static_cast<int32_t*>(external_pointer());
+ int32x4_value_t value;
+ value.storage[0] = ptr[index * 4 + 0];
+ value.storage[1] = ptr[index * 4 + 1];
+ value.storage[2] = ptr[index * 4 + 2];
+ value.storage[3] = ptr[index * 4 + 3];
+ return value;
+}
+
+
+Handle<Object> ExternalInt32x4Array::get(Handle<ExternalInt32x4Array> array,
+ int index) {
+ int32x4_value_t value = array->get_scalar(index);
+ return array->GetIsolate()->factory()->NewInt32x4(value);
+}
+
+
+void ExternalInt32x4Array::set(int index, const int32x4_value_t& value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ int32_t* ptr = static_cast<int32_t*>(external_pointer());
+ ptr[index * 4 + 0] = value.storage[0];
+ ptr[index * 4 + 1] = value.storage[1];
+ ptr[index * 4 + 2] = value.storage[2];
+ ptr[index * 4 + 3] = value.storage[3];
+}
+
+
double ExternalFloat64Array::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
double* ptr = static_cast<double*>(external_pointer());
return Traits::ToHandle(array->GetIsolate(), cast_value);
}
+template<> inline
+Handle<Object> FixedTypedArray<Float32x4ArrayTraits>::SetValue(
+ Handle<FixedTypedArray<Float32x4ArrayTraits> > array,
+ uint32_t index, Handle<Object> value) {
+ float32x4_value_t cast_value;
+ cast_value.storage[0] = static_cast<float>(OS::nan_value());
+ cast_value.storage[1] = static_cast<float>(OS::nan_value());
+ cast_value.storage[2] = static_cast<float>(OS::nan_value());
+ cast_value.storage[3] = static_cast<float>(OS::nan_value());
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsFloat32x4()) {
+ cast_value = Handle<Float32x4>::cast(value)->get();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return Float32x4ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
+}
+
+
+template<> inline
+Handle<Object> FixedTypedArray<Float64x2ArrayTraits>::SetValue(
+ Handle<FixedTypedArray<Float64x2ArrayTraits> > array,
+ uint32_t index, Handle<Object> value) {
+ float64x2_value_t cast_value;
+ cast_value.storage[0] = OS::nan_value();
+ cast_value.storage[1] = OS::nan_value();
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsFloat64x2()) {
+ cast_value = Handle<Float64x2>::cast(value)->get();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return Float64x2ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
+}
+
+
+template<> inline
+Handle<Object> FixedTypedArray<Int32x4ArrayTraits>::SetValue(
+ Handle<FixedTypedArray<Int32x4ArrayTraits> > array,
+ uint32_t index, Handle<Object> value) {
+ int32x4_value_t cast_value;
+ cast_value.storage[0] = 0;
+ cast_value.storage[1] = 0;
+ cast_value.storage[2] = 0;
+ cast_value.storage[3] = 0;
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsInt32x4()) {
+ cast_value = Handle<Int32x4>::cast(value)->get();
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return Int32x4ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
+}
+
Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
return handle(Smi::FromInt(scalar), isolate);
}
+Handle<Object> Int32x4ArrayTraits::ToHandle(
+ Isolate* isolate, int32x4_value_t scalar) {
+ return isolate->factory()->NewInt32x4(scalar);
+}
+
+
+Handle<Object> Float32x4ArrayTraits::ToHandle(
+ Isolate* isolate, float32x4_value_t scalar) {
+ return isolate->factory()->NewFloat32x4(scalar);
+}
+
+
+Handle<Object> Float64x2ArrayTraits::ToHandle(
+ Isolate* isolate, float64x2_value_t scalar) {
+ return isolate->factory()->NewFloat64x2(scalar);
+}
+
+
Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
return isolate->factory()->NewNumber(scalar);
}
case HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberPrint(out);
break;
+ case FLOAT32x4_TYPE:
+ Float32x4::cast(this)->Float32x4Print(out);
+ break;
+ case FLOAT64x2_TYPE:
+ Float64x2::cast(this)->Float64x2Print(out);
+ break;
+ case INT32x4_TYPE:
+ Int32x4::cast(this)->Int32x4Print(out);
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
break;
}
+template<class T>
+static void DoPrintFloat32x4Elements(FILE* out, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ float32x4_value_t value = p->get_scalar(i);
+ PrintF(out, " %d: (%f, %f, %f, %f)\n",
+ i, value.storage[0], value.storage[1],
+ value.storage[2], value.storage[3]);
+ }
+}
+
+
+template<class T>
+static void DoPrintFloat64x2Elements(FILE* out, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ float64x2_value_t value = p->get_scalar(i);
+ PrintF(out, " %d: (%f, %f)\n", i, value.storage[0], value.storage[1]);
+ }
+}
+
+
+template<class T>
+static void DoPrintInt32x4Elements(FILE* out, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ int32x4_value_t value = p->get_scalar(i);
+ PrintF(out, " %d: (%d, %d, %d, %d)\n",
+ i, value.storage[0], value.storage[1],
+ value.storage[2], value.storage[3]);
+ }
+}
+
+
void JSObject::PrintElements(FILE* out) {
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
break; \
}
+#define PRINT_FLOAT32x4_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintFloat32x4Elements<Type>(out, elements()); \
+ break; \
+ }
+
+#define PRINT_FLOAT64x2_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintFloat64x2Elements<Type>(out, elements()); \
+ break; \
+ }
+
+#define PRINT_INT32x4_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintInt32x4Elements<Type>(out, elements()); \
+ break; \
+ }
+
PRINT_ELEMENTS(EXTERNAL_UINT8_CLAMPED_ELEMENTS, ExternalUint8ClampedArray)
PRINT_ELEMENTS(EXTERNAL_INT8_ELEMENTS, ExternalInt8Array)
PRINT_ELEMENTS(EXTERNAL_UINT8_ELEMENTS,
ExternalUint32Array)
PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array)
PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array)
+ PRINT_FLOAT32x4_ELEMENTS(EXTERNAL_FLOAT32x4_ELEMENTS,
+ ExternalFloat32x4Array)
+ PRINT_FLOAT64x2_ELEMENTS(EXTERNAL_FLOAT64x2_ELEMENTS,
+ ExternalFloat64x2Array)
+ PRINT_INT32x4_ELEMENTS(EXTERNAL_INT32x4_ELEMENTS, ExternalInt32x4Array)
PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
PRINT_DOUBLE_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
PRINT_DOUBLE_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
+ PRINT_FLOAT32x4_ELEMENTS(FLOAT32x4_ELEMENTS, FixedFloat32x4Array)
+ PRINT_FLOAT64x2_ELEMENTS(FLOAT64x2_ELEMENTS, FixedFloat64x2Array)
+ PRINT_INT32x4_ELEMENTS(INT32x4_ELEMENTS, FixedInt32x4Array)
#undef PRINT_DOUBLE_ELEMENTS
#undef PRINT_ELEMENTS
case JS_MESSAGE_OBJECT_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
+ case FLOAT32x4_TYPE:
+ case FLOAT64x2_TYPE:
+ case INT32x4_TYPE:
return GetVisitorIdForSize(kVisitJSObject,
kVisitJSObjectGeneric,
instance_size);
case FIXED_UINT32_ARRAY_TYPE:
case FIXED_INT32_ARRAY_TYPE:
case FIXED_FLOAT32_ARRAY_TYPE:
+ case FIXED_INT32x4_ARRAY_TYPE:
+ case FIXED_FLOAT32x4_ARRAY_TYPE:
+ case FIXED_FLOAT64x2_ARRAY_TYPE:
case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
return kVisitFixedTypedArray;
HeapNumber::cast(this)->HeapNumberPrint(accumulator);
accumulator->Put('>');
break;
+ case FLOAT32x4_TYPE:
+ accumulator->Add("<Float32x4: ");
+ Float32x4::cast(this)->Float32x4Print(accumulator);
+ accumulator->Put('>');
+ break;
+ case FLOAT64x2_TYPE:
+ accumulator->Add("<Float64x2: ");
+ Float64x2::cast(this)->Float64x2Print(accumulator);
+ accumulator->Put('>');
+ break;
+ case INT32x4_TYPE:
+ accumulator->Add("<Int32x4: ");
+ Int32x4::cast(this)->Int32x4Print(accumulator);
+ accumulator->Put('>');
+ break;
case JS_PROXY_TYPE:
accumulator->Add("<JSProxy>");
break;
break;
case FIXED_DOUBLE_ARRAY_TYPE:
break;
+ case FLOAT32x4_TYPE:
+ case FLOAT64x2_TYPE:
+ case INT32x4_TYPE:
+ break;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
}
+void Float32x4::Float32x4Print(FILE* out) {
+ PrintF(out, "%.16g %.16g %.16g %.16g", x(), y(), z(), w());
+}
+
+
+void Float32x4::Float32x4Print(StringStream* accumulator) {
+ // The Windows version of vsnprintf can allocate when printing a %g string
+ // into a buffer that may not be big enough. We don't want random memory
+ // allocation when producing post-crash stack traces, so we print into a
+ // buffer that is plenty big enough for any floating point number, then
+ // print that using vsnprintf (which may truncate but never allocate if
+ // there is no more space in the buffer).
+ EmbeddedVector<char, 100> buffer;
+ SNPrintF(buffer, "%.16g %.16g %.16g %.16g", x(), y(), z(), w());
+ accumulator->Add("%s", buffer.start());
+}
+
+
+void Int32x4::Int32x4Print(FILE* out) {
+ PrintF(out, "%u %u %u %u", x(), y(), z(), w());
+}
+
+
+void Int32x4::Int32x4Print(StringStream* accumulator) {
+ // The Windows version of vsnprintf can allocate when printing a %g string
+ // into a buffer that may not be big enough. We don't want random memory
+ // allocation when producing post-crash stack traces, so we print into a
+ // buffer that is plenty big enough for any floating point number, then
+ // print that using vsnprintf (which may truncate but never allocate if
+ // there is no more space in the buffer).
+ EmbeddedVector<char, 100> buffer;
+ SNPrintF(buffer, "%u %u %u %u", x(), y(), z(), w());
+ accumulator->Add("%s", buffer.start());
+}
+
+
+void Float64x2::Float64x2Print(FILE* out) {
+ PrintF(out, "%.16g %.16g", x(), y());
+}
+
+
+void Float64x2::Float64x2Print(StringStream* accumulator) {
+ // The Windows version of vsnprintf can allocate when printing a %g string
+ // into a buffer that may not be big enough. We don't want random memory
+ // allocation when producing post-crash stack traces, so we print into a
+ // buffer that is plenty big enough for any floating point number, then
+ // print that using vsnprintf (which may truncate but never allocate if
+ // there is no more space in the buffer).
+ EmbeddedVector<char, 100> buffer;
+ SNPrintF(buffer, "%.16g %.16g", x(), y());
+ accumulator->Add("%s", buffer.start());
+}
+
+
String* JSReceiver::class_name() {
if (IsJSFunction() && IsJSFunctionProxy()) {
return GetHeap()->function_class_string();
case kTagged: return "t";
case kSmi: return "s";
case kDouble: return "d";
+ case kFloat32x4: return "float32x4";
+ case kFloat64x2: return "float64x2";
+ case kInt32x4: return "int32x44";
case kInteger32: return "i";
case kHeapObject: return "h";
case kExternal: return "x";
break;
}
+ case Translation::FLOAT32x4_REGISTER: {
+ int reg_code = iterator.Next();
+ PrintF(out, "{input=%s}",
+ SIMD128Register::AllocationIndexToString(reg_code));
+ break;
+ }
+
+ case Translation::FLOAT64x2_REGISTER: {
+ int reg_code = iterator.Next();
+ PrintF(out, "{input=%s}",
+ SIMD128Register::AllocationIndexToString(reg_code));
+ break;
+ }
+
+ case Translation::INT32x4_REGISTER: {
+ int reg_code = iterator.Next();
+ PrintF(out, "{input=%s}",
+ SIMD128Register::AllocationIndexToString(reg_code));
+ break;
+ }
+
case Translation::STACK_SLOT: {
int input_slot_index = iterator.Next();
PrintF(out, "{input=%d}", input_slot_index);
break;
}
+ case Translation::FLOAT32x4_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ PrintF(out, "{input=%d}", input_slot_index);
+ break;
+ }
+
+ case Translation::FLOAT64x2_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ PrintF(out, "{input=%d}", input_slot_index);
+ break;
+ }
+
+ case Translation::INT32x4_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ PrintF(out, "{input=%d}", input_slot_index);
+ break;
+ }
+
case Translation::LITERAL: {
unsigned literal_index = iterator.Next();
PrintF(out, "{literal_id=%u}", literal_index);
if (object->HasExternalArrayElements() ||
object->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsFloat32x4() && !value->IsFloat64x2() &&
+ !value->IsInt32x4() && !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value,
Execution::ToNumber(isolate, value), Object);
}
+Handle<Object> ExternalFloat32x4Array::SetValue(
+ Handle<ExternalFloat32x4Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ float32x4_value_t cast_value;
+ cast_value.storage[0] = static_cast<float>(OS::nan_value());
+ cast_value.storage[1] = static_cast<float>(OS::nan_value());
+ cast_value.storage[2] = static_cast<float>(OS::nan_value());
+ cast_value.storage[3] = static_cast<float>(OS::nan_value());
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsFloat32x4()) {
+ cast_value = Handle<Float32x4>::cast(value)->get();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return array->GetIsolate()->factory()->NewFloat32x4(cast_value);
+}
+
+
+Handle<Object> ExternalInt32x4Array::SetValue(
+ Handle<ExternalInt32x4Array> array, uint32_t index, Handle<Object> value) {
+ int32x4_value_t cast_value;
+ cast_value.storage[0] = 0;
+ cast_value.storage[1] = 0;
+ cast_value.storage[2] = 0;
+ cast_value.storage[3] = 0;
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsInt32x4()) {
+ cast_value = Handle<Int32x4>::cast(value)->get();
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return array->GetIsolate()->factory()->NewInt32x4(cast_value);
+}
+
+
+Handle<Object> ExternalFloat64x2Array::SetValue(
+ Handle<ExternalFloat64x2Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ float64x2_value_t cast_value;
+ cast_value.storage[0] = OS::nan_value();
+ cast_value.storage[1] = OS::nan_value();
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsFloat64x2()) {
+ cast_value = Handle<Float64x2>::cast(value)->get();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return array->GetIsolate()->factory()->NewFloat64x2(cast_value);
+}
+
+
PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
// - JSArrayBufferView
// - JSTypedArray
// - JSDataView
+// - Float32x4
+// - Float64x2
+// - Int32x4
// - JSSet
// - JSMap
// - JSSetIterator
// - ExternalInt32Array
// - ExternalUint32Array
// - ExternalFloat32Array
+// - ExternalFloat32x4Array
+// - ExternalFloat64x2Array
+// - ExternalInt32x4Array
// - Name
// - String
// - SeqString
V(EXTERNAL_INT32_ARRAY_TYPE) \
V(EXTERNAL_UINT32_ARRAY_TYPE) \
V(EXTERNAL_FLOAT32_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT32x4_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT64x2_ARRAY_TYPE) \
+ V(EXTERNAL_INT32x4_ARRAY_TYPE) \
V(EXTERNAL_FLOAT64_ARRAY_TYPE) \
V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE) \
\
V(FIXED_INT16_ARRAY_TYPE) \
V(FIXED_UINT16_ARRAY_TYPE) \
V(FIXED_INT32_ARRAY_TYPE) \
+ V(FIXED_INT32x4_ARRAY_TYPE) \
V(FIXED_UINT32_ARRAY_TYPE) \
V(FIXED_FLOAT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT32x4_ARRAY_TYPE) \
V(FIXED_FLOAT64_ARRAY_TYPE) \
+ V(FIXED_FLOAT64x2_ARRAY_TYPE) \
V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
\
V(FILLER_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
V(JS_TYPED_ARRAY_TYPE) \
V(JS_DATA_VIEW_TYPE) \
+ V(FLOAT32x4_TYPE) \
+ V(FLOAT64x2_TYPE) \
+ V(INT32x4_TYPE) \
V(JS_PROXY_TYPE) \
V(JS_SET_TYPE) \
V(JS_MAP_TYPE) \
EXTERNAL_INT32_ARRAY_TYPE,
EXTERNAL_UINT32_ARRAY_TYPE,
EXTERNAL_FLOAT32_ARRAY_TYPE,
+ EXTERNAL_FLOAT32x4_ARRAY_TYPE,
+ EXTERNAL_FLOAT64x2_ARRAY_TYPE,
+ EXTERNAL_INT32x4_ARRAY_TYPE,
EXTERNAL_FLOAT64_ARRAY_TYPE,
EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE,
FIXED_INT32_ARRAY_TYPE,
+ FIXED_INT32x4_ARRAY_TYPE,
FIXED_UINT32_ARRAY_TYPE,
FIXED_FLOAT32_ARRAY_TYPE,
+ FIXED_FLOAT32x4_ARRAY_TYPE,
+ FIXED_FLOAT64x2_ARRAY_TYPE,
FIXED_FLOAT64_ARRAY_TYPE,
FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
JS_ARRAY_BUFFER_TYPE,
JS_TYPED_ARRAY_TYPE,
JS_DATA_VIEW_TYPE,
+ FLOAT32x4_TYPE,
+ FLOAT64x2_TYPE,
+ INT32x4_TYPE,
JS_SET_TYPE,
JS_MAP_TYPE,
JS_SET_ITERATOR_TYPE,
V(ExternalInt32Array) \
V(ExternalUint32Array) \
V(ExternalFloat32Array) \
+ V(ExternalFloat32x4Array) \
+ V(ExternalFloat64x2Array) \
+ V(ExternalInt32x4Array) \
V(ExternalFloat64Array) \
V(ExternalUint8ClampedArray) \
V(FixedTypedArrayBase) \
V(FixedUint32Array) \
V(FixedInt32Array) \
V(FixedFloat32Array) \
+ V(FixedFloat32x4Array) \
+ V(FixedFloat64x2Array) \
+ V(FixedInt32x4Array) \
V(FixedFloat64Array) \
V(FixedUint8ClampedArray) \
V(ByteArray) \
V(JSArrayBufferView) \
V(JSTypedArray) \
V(JSDataView) \
+ V(Float32x4) \
+ V(Float64x2) \
+ V(Int32x4) \
V(JSProxy) \
V(JSFunctionProxy) \
V(JSSet) \
inline bool HasExternalInt32Elements();
inline bool HasExternalUint32Elements();
inline bool HasExternalFloat32Elements();
+ inline bool HasExternalFloat32x4Elements();
+ inline bool HasExternalFloat64x2Elements();
+ inline bool HasExternalInt32x4Elements();
inline bool HasExternalFloat64Elements();
inline bool HasFixedTypedArrayElements();
inline bool HasFixedUint32Elements();
inline bool HasFixedFloat32Elements();
inline bool HasFixedFloat64Elements();
+ inline bool HasFixedFloat32x4Elements();
+ inline bool HasFixedFloat64x2Elements();
+ inline bool HasFixedInt32x4Elements();
bool HasFastArgumentsElements();
bool HasDictionaryArgumentsElements();
// V has parameters (Type, type, TYPE, C type, element_size)
-#define TYPED_ARRAYS(V) \
+#define BUILTIN_TYPED_ARRAY(V) \
V(Uint8, uint8, UINT8, uint8_t, 1) \
V(Int8, int8, INT8, int8_t, 1) \
V(Uint16, uint16, UINT16, uint16_t, 2) \
V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
+#define SIMD128_TYPED_ARRAY(V) \
+ V(Float32x4, float32x4, FLOAT32x4, v8::internal::float32x4_value_t, 16) \
+ V(Float64x2, float64x2, FLOAT64x2, v8::internal::float64x2_value_t, 16) \
+ V(Int32x4, int32x4, INT32x4, v8::internal::int32x4_value_t, 16)
+
+
+#define TYPED_ARRAYS(V) \
+ BUILTIN_TYPED_ARRAY(V) \
+ SIMD128_TYPED_ARRAY(V)
+
// An ExternalArray represents a fixed-size array of primitive values
// which live outside the JavaScript heap. Its subclasses are used to
};
+class ExternalFloat32x4Array: public ExternalArray {
+ public:
+ // Setter and getter.
+ inline float32x4_value_t get_scalar(int index);
+ static inline Handle<Object> get(Handle<ExternalFloat32x4Array> array,
+ int index);
+ inline void set(int index, const float32x4_value_t& value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ static Handle<Object> SetValue(Handle<ExternalFloat32x4Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
+ // Casting.
+ static inline ExternalFloat32x4Array* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalFloat32x4Array)
+ DECLARE_VERIFIER(ExternalFloat32x4Array)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat32x4Array);
+};
+
+
+class ExternalFloat64x2Array: public ExternalArray {
+ public:
+ // Setter and getter.
+ inline float64x2_value_t get_scalar(int index);
+ static inline Handle<Object> get(Handle<ExternalFloat64x2Array> array,
+ int index);
+ inline void set(int index, const float64x2_value_t& value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ static Handle<Object> SetValue(Handle<ExternalFloat64x2Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
+ // Casting.
+ static inline ExternalFloat64x2Array* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalFloat64x2Array)
+ DECLARE_VERIFIER(ExternalFloat64x2Array)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat64x2Array);
+};
+
+
+class ExternalInt32x4Array: public ExternalArray {
+ public:
+ // Setter and getter.
+ inline int32x4_value_t get_scalar(int index);
+ static inline Handle<Object> get(Handle<ExternalInt32x4Array> array,
+ int index);
+ inline void set(int index, const int32x4_value_t& value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ static Handle<Object> SetValue(Handle<ExternalInt32x4Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
+ // Casting.
+ static inline ExternalInt32x4Array* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalInt32x4Array)
+ DECLARE_VERIFIER(ExternalInt32x4Array)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt32x4Array);
+};
+
+
class ExternalFloat64Array: public ExternalArray {
public:
// Setter and getter.
V(Math, min, MathMin) \
V(Math, imul, MathImul)
+#define SIMD_NULLARY_OPERATIONS(V) \
+ V(SIMD.float32x4, zero, Float32x4Zero, Float32x4) \
+ V(SIMD.float64x2, zero, Float64x2Zero, Float64x2) \
+ V(SIMD.int32x4, zero, Int32x4Zero, Int32x4)
+
+#define SIMD_UNARY_OPERATIONS(V) \
+ V(SIMD.float32x4, abs, Float32x4Abs, Float32x4, Float32x4) \
+ V(SIMD.float32x4, bitsToInt32x4, Float32x4BitsToInt32x4, Int32x4, Float32x4) \
+ V(SIMD.float32x4, neg, Float32x4Neg, Float32x4, Float32x4) \
+ V(SIMD.float32x4, reciprocal, Float32x4Reciprocal, Float32x4, Float32x4) \
+ V(SIMD.float32x4, reciprocalSqrt, Float32x4ReciprocalSqrt, \
+ Float32x4, Float32x4) \
+ V(SIMD.float32x4, splat, Float32x4Splat, Float32x4, Double) \
+ V(SIMD.float32x4, sqrt, Float32x4Sqrt, Float32x4, Float32x4) \
+ V(SIMD.float32x4, toInt32x4, Float32x4ToInt32x4, Int32x4, Float32x4) \
+ V(SIMD.float64x2, abs, Float64x2Abs, Float64x2, Float64x2) \
+ V(SIMD.float64x2, neg, Float64x2Neg, Float64x2, Float64x2) \
+ V(SIMD.float64x2, sqrt, Float64x2Sqrt, Float64x2, Float64x2) \
+ V(SIMD.int32x4, bitsToFloat32x4, Int32x4BitsToFloat32x4, Float32x4, Int32x4) \
+ V(SIMD.int32x4, neg, Int32x4Neg, Int32x4, Int32x4) \
+ V(SIMD.int32x4, not, Int32x4Not, Int32x4, Int32x4) \
+ V(SIMD.int32x4, splat, Int32x4Splat, Int32x4, Integer32) \
+ V(SIMD.int32x4, toFloat32x4, Int32x4ToFloat32x4, Float32x4, Int32x4)
+
+// Do not need to install them in InstallExperimentalSIMDBuiltinFunctionIds.
+#define SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(V) \
+ V(SIMD.float32x4.prototype, signMask, Float32x4GetSignMask, Integer32, \
+ Float32x4) \
+ V(SIMD.float32x4.prototype, x, Float32x4GetX, Double, Float32x4) \
+ V(SIMD.float32x4.prototype, y, Float32x4GetY, Double, Float32x4) \
+ V(SIMD.float32x4.prototype, z, Float32x4GetZ, Double, Float32x4) \
+ V(SIMD.float32x4.prototype, w, Float32x4GetW, Double, Float32x4) \
+ V(SIMD.float64x2.prototype, signMask, Float64x2GetSignMask, Integer32, \
+ Float64x2) \
+ V(SIMD.float64x2.prototype, x, Float64x2GetX, Double, Float64x2) \
+ V(SIMD.float64x2.prototype, y, Float64x2GetY, Double, Float64x2) \
+ V(SIMD.int32x4.prototype, signMask, Int32x4GetSignMask, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, x, Int32x4GetX, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, y, Int32x4GetY, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, z, Int32x4GetZ, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, w, Int32x4GetW, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, flagX, Int32x4GetFlagX, Tagged, Int32x4) \
+ V(SIMD.int32x4.prototype, flagY, Int32x4GetFlagY, Tagged, Int32x4) \
+ V(SIMD.int32x4.prototype, flagZ, Int32x4GetFlagZ, Tagged, Int32x4) \
+ V(SIMD.int32x4.prototype, flagW, Int32x4GetFlagW, Tagged, Int32x4)
+
+#define SIMD_BINARY_OPERATIONS(V) \
+ V(SIMD.float32x4, add, Float32x4Add, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, div, Float32x4Div, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, max, Float32x4Max, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, min, Float32x4Min, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, mul, Float32x4Mul, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, sub, Float32x4Sub, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, equal, Float32x4Equal, Int32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, notEqual, Float32x4NotEqual, Int32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float32x4, greaterThan, Float32x4GreaterThan, Int32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float32x4, greaterThanOrEqual, Float32x4GreaterThanOrEqual, Int32x4, \
+ Float32x4, Float32x4) \
+ V(SIMD.float32x4, lessThan, Float32x4LessThan, Int32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float32x4, lessThanOrEqual, Float32x4LessThanOrEqual, Int32x4, \
+ Float32x4, Float32x4) \
+ V(SIMD.float32x4, shuffle, Float32x4Shuffle, Float32x4, Float32x4, \
+ Integer32) \
+ V(SIMD.float32x4, scale, Float32x4Scale, Float32x4, Float32x4, Double) \
+ V(SIMD.float32x4, withX, Float32x4WithX, Float32x4, Float32x4, Double) \
+ V(SIMD.float32x4, withY, Float32x4WithY, Float32x4, Float32x4, Double) \
+ V(SIMD.float32x4, withZ, Float32x4WithZ, Float32x4, Float32x4, Double) \
+ V(SIMD.float32x4, withW, Float32x4WithW, Float32x4, Float32x4, Double) \
+ V(SIMD.float64x2, add, Float64x2Add, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, div, Float64x2Div, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, max, Float64x2Max, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, min, Float64x2Min, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, mul, Float64x2Mul, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, sub, Float64x2Sub, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, scale, Float64x2Scale, Float64x2, Float64x2, Double) \
+ V(SIMD.float64x2, withX, Float64x2WithX, Float64x2, Float64x2, Double) \
+ V(SIMD.float64x2, withY, Float64x2WithY, Float64x2, Float64x2, Double) \
+ V(SIMD, float64x2, Float64x2Constructor, Float64x2, Double, Double) \
+ V(SIMD.int32x4, add, Int32x4Add, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, and, Int32x4And, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, mul, Int32x4Mul, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, or, Int32x4Or, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, sub, Int32x4Sub, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, xor, Int32x4Xor, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, shuffle, Int32x4Shuffle, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withX, Int32x4WithX, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withY, Int32x4WithY, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withZ, Int32x4WithZ, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withW, Int32x4WithW, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withFlagX, Int32x4WithFlagX, Int32x4, Int32x4, Tagged) \
+ V(SIMD.int32x4, withFlagY, Int32x4WithFlagY, Int32x4, Int32x4, Tagged) \
+ V(SIMD.int32x4, withFlagZ, Int32x4WithFlagZ, Int32x4, Int32x4, Tagged) \
+ V(SIMD.int32x4, withFlagW, Int32x4WithFlagW, Int32x4, Int32x4, Tagged) \
+ V(SIMD.int32x4, greaterThan, Int32x4GreaterThan, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, equal, Int32x4Equal, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, lessThan, Int32x4LessThan, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, shiftLeft, Int32x4ShiftLeft, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, shiftRight, Int32x4ShiftRight, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, shiftRightArithmetic, Int32x4ShiftRightArithmetic, Int32x4, \
+ Int32x4, Integer32)
+
+#define SIMD_TERNARY_OPERATIONS(V) \
+ V(SIMD.float32x4, clamp, Float32x4Clamp, Float32x4, Float32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float32x4, shuffleMix, Float32x4ShuffleMix, Float32x4, Float32x4, \
+ Float32x4, Integer32) \
+ V(SIMD.float64x2, clamp, Float64x2Clamp, Float64x2, Float64x2, Float64x2, \
+ Float64x2) \
+ V(SIMD.int32x4, select, Int32x4Select, Float32x4, Int32x4, Float32x4, \
+ Float32x4)
+
+#define SIMD_QUARTERNARY_OPERATIONS(V) \
+ V(SIMD, float32x4, Float32x4Constructor, Float32x4, Double, Double, Double, \
+ Double) \
+ V(SIMD, int32x4, Int32x4Constructor, Int32x4, Integer32, Integer32, \
+ Integer32, Integer32) \
+ V(SIMD.int32x4, bool, Int32x4Bool, Int32x4, Tagged, Tagged, Tagged, Tagged)
+
+#define SIMD_ARRAY_OPERATIONS(V) \
+ V(Float32x4Array.prototype, getAt, Float32x4ArrayGetAt) \
+ V(Float32x4Array.prototype, setAt, Float32x4ArraySetAt) \
+ V(Float64x2Array.prototype, getAt, Float64x2ArrayGetAt) \
+ V(Float64x2Array.prototype, setAt, Float64x2ArraySetAt) \
+ V(Int32x4Array.prototype, getAt, Int32x4ArrayGetAt) \
+ V(Int32x4Array.prototype, setAt, Int32x4ArraySetAt)
+
+// Do not need to install them in InstallExperimentalSIMDBuiltinFunctionIds.
+#define SIMD_FAKE_ID_LISTS(V) \
+ V(SIMD, unreachable, SIMD128Unreachable) \
+ V(SIMD, change, SIMD128Change)
+
enum BuiltinFunctionId {
kArrayCode,
#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
k##name,
FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
-#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
kMathPowHalf,
// Installed only on --harmony-maths.
- kMathClz32
+ kMathClz32,
+ SIMD_FAKE_ID_LISTS(DECLARE_FUNCTION_ID)
+ SIMD_ARRAY_OPERATIONS(DECLARE_FUNCTION_ID)
+#undef DECLARE_FUNCTION_ID
+#define DECLARE_SIMD_NULLARY_FUNCTION_ID(i1, i2, name, i3) \
+ k##name,
+ SIMD_NULLARY_OPERATIONS(DECLARE_SIMD_NULLARY_FUNCTION_ID)
+#undef DECLARE_SIMD_NULLARY_FUNCTION_ID
+#define DECLARE_SIMD_UNARY_FUNCTION_ID(i1, i2, name, i3, i4) \
+ k##name,
+ SIMD_UNARY_OPERATIONS(DECLARE_SIMD_UNARY_FUNCTION_ID)
+ SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(DECLARE_SIMD_UNARY_FUNCTION_ID)
+#undef DECLARE_SIMD_UNARY_FUNCTION_ID
+#define DECLARE_SIMD_BINARY_FUNCTION_ID(i1, i2, name, i3, i4, i5) \
+ k##name,
+ SIMD_BINARY_OPERATIONS(DECLARE_SIMD_BINARY_FUNCTION_ID)
+#undef DECLARE_SIMD_BINARY_FUNCTION_ID
+#define DECLARE_SIMD_TERNARY_FUNCTION_ID(i1, i2, name, i3, i4, i5, i6) \
+ k##name,
+ SIMD_TERNARY_OPERATIONS(DECLARE_SIMD_TERNARY_FUNCTION_ID)
+#undef DECLARE_SIMD_TERNARY_FUNCTION_ID
+#define DECLARE_SIMD_QUARTERNARY_FUNCTION_ID(i1, i2, name, i3, i4, i5, i6, i7) \
+ k##name,
+ SIMD_QUARTERNARY_OPERATIONS(DECLARE_SIMD_QUARTERNARY_FUNCTION_ID)
+#undef DECLARE_SIMD_QUARTERNARY_FUNCTION_ID
+ kNumberOfBuiltinFunction
};
};
+class Float32x4: public JSObject {
+ public:
+ typedef float32x4_value_t value_t;
+ static const int kValueSize = kFloat32x4Size;
+ static const InstanceType kInstanceType = FLOAT32x4_TYPE;
+ static inline const char* Name();
+ static inline int kRuntimeAllocatorId();
+
+ // [value]: the FixedFloat32x4Array with length 1.
+ DECL_ACCESSORS(value, Object)
+
+ // Casting.
+ static inline Float32x4* cast(Object* obj);
+
+ // Dispatched behavior.
+ void Float32x4Print(FILE* out);
+ void Float32x4Print(StringStream* accumulator);
+ DECLARE_VERIFIER(Float32x4)
+
+ // Helpers.
+ static const int kLanes = 4;
+ inline float getAt(int index);
+ inline float x() { return getAt(0); }
+ inline float y() { return getAt(1); }
+ inline float z() { return getAt(2); }
+ inline float w() { return getAt(3); }
+ inline float32x4_value_t get();
+ inline void set(float32x4_value_t f32x4);
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Float32x4);
+};
+
+
+class Float64x2: public JSObject {
+ public:
+ typedef float64x2_value_t value_t;
+ static const int kValueSize = kFloat64x2Size;
+ static const InstanceType kInstanceType = FLOAT64x2_TYPE;
+ static inline const char* Name();
+ static inline int kRuntimeAllocatorId();
+
+ // [value]: the FixedFloat64x2Array with length 1.
+ DECL_ACCESSORS(value, Object)
+
+ // Casting.
+ static inline Float64x2* cast(Object* obj);
+
+ // Dispatched behavior.
+ void Float64x2Print(FILE* out);
+ void Float64x2Print(StringStream* accumulator);
+ DECLARE_VERIFIER(Float64x2)
+
+ // Helpers.
+ static const int kLanes = 2;
+ inline double getAt(int index);
+ inline double x() { return getAt(0); }
+ inline double y() { return getAt(1); }
+ inline float64x2_value_t get();
+ inline void set(float64x2_value_t f64x2);
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Float64x2);
+};
+
+
+class Int32x4: public JSObject {
+ public:
+ typedef int32x4_value_t value_t;
+ static const int kValueSize = kInt32x4Size;
+ static const InstanceType kInstanceType = INT32x4_TYPE;
+ static inline const char* Name();
+ static inline int kRuntimeAllocatorId();
+
+ // [value]: the FixedInt32x4Array with length 1.
+ DECL_ACCESSORS(value, Object)
+
+ // Casting.
+ static inline Int32x4* cast(Object* obj);
+
+ // Dispatched behavior.
+ void Int32x4Print(FILE* out);
+ void Int32x4Print(StringStream* accumulator);
+ DECLARE_VERIFIER(Int32x4)
+
+ // Helpers.
+ static const int kLanes = 4;
+ inline int32_t getAt(int32_t index);
+ inline int32_t x() { return getAt(0); }
+ inline int32_t y() { return getAt(1); }
+ inline int32_t z() { return getAt(2); }
+ inline int32_t w() { return getAt(3); }
+ inline int32x4_value_t get();
+ inline void set(int32x4_value_t i32x4);
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Int32x4);
+};
+
+
// Foreign describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
// placed in old_data_space.
kSmi,
kInteger32,
kDouble,
+ kFloat32x4,
+ kFloat64x2,
+ kInt32x4,
kHeapObject,
kTagged,
kExternal,
static Representation Smi() { return Representation(kSmi); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
+ static Representation Float32x4() { return Representation(kFloat32x4); }
+ static Representation Float64x2() { return Representation(kFloat64x2); }
+ static Representation Int32x4() { return Representation(kInt32x4); }
static Representation HeapObject() { return Representation(kHeapObject); }
static Representation External() { return Representation(kExternal); }
if (IsHeapObject()) return other.IsNone();
if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false;
if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false;
+ if (IsSIMD128() && other.IsSIMD128()) return false;
return kind_ > other.kind_;
}
bool IsInteger32() const { return kind_ == kInteger32; }
bool IsSmiOrInteger32() const { return IsSmi() || IsInteger32(); }
bool IsDouble() const { return kind_ == kDouble; }
+ bool IsFloat32x4() const { return kind_ == kFloat32x4; }
+ bool IsFloat64x2() const { return kind_ == kFloat64x2; }
+ bool IsInt32x4() const { return kind_ == kInt32x4; }
+ bool IsSIMD128() const {
+ return IsFloat32x4() || IsFloat64x2() || IsInt32x4();
+ }
bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
JSObject::ValidateElements(js_object);
if (js_object->HasExternalArrayElements() ||
js_object->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsFloat32x4() &&
+ !value->IsFloat64x2() && !value->IsInt32x4() &&
+ !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, Execution::ToNumber(isolate, value), Object);
}
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
if (js_object->HasExternalArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsFloat32x4() &&
+ !value->IsFloat64x2() && !value->IsInt32x4() &&
+ !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, Execution::ToNumber(isolate, value), Object);
}
}
+RUNTIME_FUNCTION(Runtime_AllocateFloat32x4) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+
+ float32x4_value_t zero = {{0, 0, 0, 0}};
+ return *isolate->factory()->NewFloat32x4(zero);
+}
+
+
+RUNTIME_FUNCTION(Runtime_AllocateFloat64x2) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+
+ float64x2_value_t zero = {{0, 0}};
+ return *isolate->factory()->NewFloat64x2(zero);
+}
+
+
+RUNTIME_FUNCTION(Runtime_AllocateInt32x4) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+
+ int32x4_value_t zero = {{0, 0, 0, 0}};
+ return *isolate->factory()->NewInt32x4(zero);
+}
+
+
RUNTIME_FUNCTION(Runtime_NumberAdd) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
}
+static void IterateExternalFloat32x4ArrayElements(Isolate* isolate,
+ Handle<JSObject> receiver,
+ ArrayConcatVisitor* visitor) {
+ Handle<ExternalFloat32x4Array> array(
+ ExternalFloat32x4Array::cast(receiver->elements()));
+ uint32_t len = static_cast<uint32_t>(array->length());
+
+ ASSERT(visitor != NULL);
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewFloat32x4(array->get_scalar(j));
+ visitor->visit(j, e);
+ }
+}
+
+
+static void IterateExternalFloat64x2ArrayElements(Isolate* isolate,
+ Handle<JSObject> receiver,
+ ArrayConcatVisitor* visitor) {
+ Handle<ExternalFloat64x2Array> array(
+ ExternalFloat64x2Array::cast(receiver->elements()));
+ uint32_t len = static_cast<uint32_t>(array->length());
+
+ ASSERT(visitor != NULL);
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewFloat64x2(array->get_scalar(j));
+ visitor->visit(j, e);
+ }
+}
+
+
+static void IterateExternalInt32x4ArrayElements(Isolate* isolate,
+ Handle<JSObject> receiver,
+ ArrayConcatVisitor* visitor) {
+ Handle<ExternalInt32x4Array> array(
+ ExternalInt32x4Array::cast(receiver->elements()));
+ uint32_t len = static_cast<uint32_t>(array->length());
+
+ ASSERT(visitor != NULL);
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewInt32x4(array->get_scalar(j));
+ visitor->visit(j, e);
+ }
+}
+
+
// Used for sorting indices in a List<uint32_t>.
static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
uint32_t a = *ap;
isolate, receiver, false, false, visitor);
break;
}
+ case EXTERNAL_FLOAT32x4_ELEMENTS: {
+ IterateExternalFloat32x4ArrayElements(isolate, receiver, visitor);
+ break;
+ }
+ case EXTERNAL_FLOAT64x2_ELEMENTS: {
+ IterateExternalFloat64x2ArrayElements(isolate, receiver, visitor);
+ break;
+ }
+ case EXTERNAL_INT32x4_ELEMENTS: {
+ IterateExternalInt32x4ArrayElements(isolate, receiver, visitor);
+ break;
+ }
case EXTERNAL_FLOAT64_ELEMENTS: {
IterateExternalArrayElements<ExternalFloat64Array, double>(
isolate, receiver, false, false, visitor);
}
+#define RETURN_Float32x4_RESULT(value) \
+ return *isolate->factory()->NewFloat32x4(value);
+
+
+#define RETURN_Float64x2_RESULT(value) \
+ return *isolate->factory()->NewFloat64x2(value);
+
+
+#define RETURN_Int32x4_RESULT(value) \
+ return *isolate->factory()->NewInt32x4(value);
+
+
+RUNTIME_FUNCTION(Runtime_CreateFloat32x4) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
+ RUNTIME_ASSERT(args[0]->IsNumber());
+ RUNTIME_ASSERT(args[1]->IsNumber());
+ RUNTIME_ASSERT(args[2]->IsNumber());
+ RUNTIME_ASSERT(args[3]->IsNumber());
+
+ float32x4_value_t value;
+ value.storage[0] = static_cast<float>(args.number_at(0));
+ value.storage[1] = static_cast<float>(args.number_at(1));
+ value.storage[2] = static_cast<float>(args.number_at(2));
+ value.storage[3] = static_cast<float>(args.number_at(3));
+
+ RETURN_Float32x4_RESULT(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateFloat64x2) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ RUNTIME_ASSERT(args[0]->IsNumber());
+ RUNTIME_ASSERT(args[1]->IsNumber());
+
+ float64x2_value_t value;
+ value.storage[0] = args.number_at(0);
+ value.storage[1] = args.number_at(1);
+
+ RETURN_Float64x2_RESULT(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateInt32x4) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 4);
+ RUNTIME_ASSERT(args[0]->IsNumber());
+ RUNTIME_ASSERT(args[1]->IsNumber());
+ RUNTIME_ASSERT(args[2]->IsNumber());
+ RUNTIME_ASSERT(args[3]->IsNumber());
+
+ int32x4_value_t value;
+ value.storage[0] = NumberToInt32(args[0]);
+ value.storage[1] = NumberToInt32(args[1]);
+ value.storage[2] = NumberToInt32(args[2]);
+ value.storage[3] = NumberToInt32(args[3]);
+
+ RETURN_Int32x4_RESULT(value);
+}
+
+
+// Used to convert between uint32_t and float32 without breaking strict
+// aliasing rules.
+union float32_uint32 {
+ float f;
+ uint32_t u;
+ float32_uint32(float v) {
+ f = v;
+ }
+ float32_uint32(uint32_t v) {
+ u = v;
+ }
+};
+
+
+union float64_uint64 {
+ double f;
+ uint64_t u;
+ float64_uint64(double v) {
+ f = v;
+ }
+ float64_uint64(uint64_t v) {
+ u = v;
+ }
+};
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4GetSignMask) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Float32x4, self, 0);
+ float32_uint32 x(self->x());
+ float32_uint32 y(self->y());
+ float32_uint32 z(self->z());
+ float32_uint32 w(self->w());
+ uint32_t mx = (x.u & 0x80000000) >> 31;
+ uint32_t my = (y.u & 0x80000000) >> 31;
+ uint32_t mz = (z.u & 0x80000000) >> 31;
+ uint32_t mw = (w.u & 0x80000000) >> 31;
+ uint32_t value = mx | (my << 1) | (mz << 2) | (mw << 3);
+ return *isolate->factory()->NewNumberFromUint(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float64x2GetSignMask) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Float64x2, self, 0);
+ float64_uint64 x(self->x());
+ float64_uint64 y(self->y());
+ uint64_t mx = x.u >> 63;
+ uint64_t my = y.u >> 63;
+ uint32_t value = static_cast<uint32_t>(mx | (my << 1));
+ return *isolate->factory()->NewNumberFromUint(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Int32x4GetSignMask) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Int32x4, self, 0);
+ uint32_t mx = (self->x() & 0x80000000) >> 31;
+ uint32_t my = (self->y() & 0x80000000) >> 31;
+ uint32_t mz = (self->z() & 0x80000000) >> 31;
+ uint32_t mw = (self->w() & 0x80000000) >> 31;
+ uint32_t value = mx | (my << 1) | (mz << 2) | (mw << 3);
+ return *isolate->factory()->NewNumberFromUint(value);
+}
+
+
+#define LANE_VALUE(VALUE, LANE) \
+ VALUE->LANE()
+
+
+#define LANE_FLAG(VALUE, LANE) \
+ VALUE->LANE() != 0
+
+
+#define SIMD128_LANE_ACCESS_FUNCTIONS(V) \
+ V(Float32x4, GetX, NewNumber, x, LANE_VALUE) \
+ V(Float32x4, GetY, NewNumber, y, LANE_VALUE) \
+ V(Float32x4, GetZ, NewNumber, z, LANE_VALUE) \
+ V(Float32x4, GetW, NewNumber, w, LANE_VALUE) \
+ V(Float64x2, GetX, NewNumber, x, LANE_VALUE) \
+ V(Float64x2, GetY, NewNumber, y, LANE_VALUE) \
+ V(Int32x4, GetX, NewNumberFromInt, x, LANE_VALUE) \
+ V(Int32x4, GetY, NewNumberFromInt, y, LANE_VALUE) \
+ V(Int32x4, GetZ, NewNumberFromInt, z, LANE_VALUE) \
+ V(Int32x4, GetW, NewNumberFromInt, w, LANE_VALUE) \
+ V(Int32x4, GetFlagX, ToBoolean, x, LANE_FLAG) \
+ V(Int32x4, GetFlagY, ToBoolean, y, LANE_FLAG) \
+ V(Int32x4, GetFlagZ, ToBoolean, z, LANE_FLAG) \
+ V(Int32x4, GetFlagW, ToBoolean, w, LANE_FLAG)
+
+
+#define DECLARE_SIMD_LANE_ACCESS_FUNCTION( \
+ TYPE, NAME, HEAP_FUNCTION, LANE, ACCESS_FUNCTION) \
+RUNTIME_FUNCTION(Runtime_##TYPE##NAME) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 1); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ \
+ return *isolate->factory()->HEAP_FUNCTION( \
+ ACCESS_FUNCTION(a, LANE)); \
+}
+
+
+SIMD128_LANE_ACCESS_FUNCTIONS(DECLARE_SIMD_LANE_ACCESS_FUNCTION)
+
+
+template<typename T>
+static inline T Neg(T a) {
+ return -a;
+}
+
+
+template<typename T>
+static inline T Not(T a) {
+ return ~a;
+}
+
+
+template<typename T>
+static inline T Reciprocal(T a) {
+ UNIMPLEMENTED();
+}
+
+
+template<>
+inline float Reciprocal<float>(float a) {
+ return 1.0f / a;
+}
+
+
+template<typename T>
+static inline T ReciprocalSqrt(T a) {
+ UNIMPLEMENTED();
+}
+
+
+template<>
+inline float ReciprocalSqrt<float>(float a) {
+ return sqrtf(1.0f / a);
+}
+
+
+template<typename T>
+static inline T Sqrt(T a) {
+ UNIMPLEMENTED();
+}
+
+
+template<>
+inline float Sqrt<float>(float a) {
+ return sqrtf(a);
+}
+
+
+template<>
+inline double Sqrt<double>(double a) {
+ return sqrt(a);
+}
+
+
+#define SIMD128_UNARY_FUNCTIONS(V) \
+ V(Float32x4, Abs) \
+ V(Float32x4, Neg) \
+ V(Float32x4, Reciprocal) \
+ V(Float32x4, ReciprocalSqrt) \
+ V(Float32x4, Sqrt) \
+ V(Float64x2, Abs) \
+ V(Float64x2, Neg) \
+ V(Float64x2, Sqrt) \
+ V(Int32x4, Neg) \
+ V(Int32x4, Not)
+
+
+#define DECLARE_SIMD_UNARY_FUNCTION(TYPE, FUNCTION) \
+RUNTIME_FUNCTION(Runtime_##TYPE##FUNCTION) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 1); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ \
+ TYPE::value_t result; \
+ for (int i = 0; i < TYPE::kLanes; i++) { \
+ result.storage[i] = FUNCTION(a->getAt(i)); \
+ } \
+ \
+ RETURN_##TYPE##_RESULT(result); \
+}
+
+
+SIMD128_UNARY_FUNCTIONS(DECLARE_SIMD_UNARY_FUNCTION)
+
+
+template<typename T1, typename T2>
+inline void BitsTo(T1 s, T2* t) {
+ memcpy(t, &s, sizeof(T2));
+}
+
+
+template<typename T1, typename T2>
+inline void To(T1 s, T2* t) {
+}
+
+
+template<>
+inline void To<int32_t, float>(int32_t s, float* t) {
+ *t = static_cast<float>(s);
+}
+
+
+template<>
+inline void To<float, int32_t>(float s, int32_t* t) {
+ *t = DoubleToInt32(static_cast<double>(s));
+}
+
+
+#define SIMD128_CONVERSION_FUNCTIONS(V) \
+ V(Float32x4, BitsTo, Int32x4) \
+ V(Float32x4, To, Int32x4) \
+ V(Int32x4, BitsTo, Float32x4) \
+ V(Int32x4, To, Float32x4)
+
+
+#define DECLARE_SIMD_CONVERSION_FUNCTION( \
+ SOURCE_TYPE, FUNCTION, TARGET_TYPE) \
+RUNTIME_FUNCTION( \
+ Runtime_##SOURCE_TYPE##FUNCTION##TARGET_TYPE) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 1); \
+ \
+ CONVERT_ARG_CHECKED(SOURCE_TYPE, a, 0); \
+ \
+ TARGET_TYPE::value_t result; \
+ for (int i = 0; i < SOURCE_TYPE::kLanes; i++) { \
+ FUNCTION(a->getAt(i), &result.storage[i]); \
+ } \
+ \
+ RETURN_##TARGET_TYPE##_RESULT(result); \
+}
+
+
+SIMD128_CONVERSION_FUNCTIONS(DECLARE_SIMD_CONVERSION_FUNCTION)
+
+
+template<typename T>
+static inline T Add(T a, T b) {
+ return a + b;
+}
+
+
+template<typename T>
+static inline T Div(T a, T b) {
+ return a / b;
+}
+
+
+template<typename T>
+static inline T Mul(T a, T b) {
+ return a * b;
+}
+
+
+template<typename T>
+static inline T Sub(T a, T b) {
+ return a - b;
+}
+
+
+template<typename T>
+static inline int32_t Equal(T a, T b) {
+ return a == b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t NotEqual(T a, T b) {
+ return a != b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t GreaterThanOrEqual(T a, T b) {
+ return a >= b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t GreaterThan(T a, T b) {
+ return a > b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t LessThan(T a, T b) {
+ return a < b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t LessThanOrEqual(T a, T b) {
+ return a <= b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline T And(T a, T b) {
+ return a & b;
+}
+
+
+template<typename T>
+static inline T Or(T a, T b) {
+ return a | b;
+}
+
+
+template<typename T>
+static inline T Xor(T a, T b) {
+ return a ^ b;
+}
+
+
+#define SIMD128_BINARY_FUNCTIONS(V) \
+ V(Float32x4, Add, Float32x4) \
+ V(Float32x4, Div, Float32x4) \
+ V(Float32x4, Max, Float32x4) \
+ V(Float32x4, Min, Float32x4) \
+ V(Float32x4, Mul, Float32x4) \
+ V(Float32x4, Sub, Float32x4) \
+ V(Float32x4, Equal, Int32x4) \
+ V(Float32x4, NotEqual, Int32x4) \
+ V(Float32x4, GreaterThanOrEqual, Int32x4) \
+ V(Float32x4, GreaterThan, Int32x4) \
+ V(Float32x4, LessThan, Int32x4) \
+ V(Float32x4, LessThanOrEqual, Int32x4) \
+ V(Float64x2, Add, Float64x2) \
+ V(Float64x2, Div, Float64x2) \
+ V(Float64x2, Max, Float64x2) \
+ V(Float64x2, Min, Float64x2) \
+ V(Float64x2, Mul, Float64x2) \
+ V(Float64x2, Sub, Float64x2) \
+ V(Int32x4, Add, Int32x4) \
+ V(Int32x4, And, Int32x4) \
+ V(Int32x4, Mul, Int32x4) \
+ V(Int32x4, Or, Int32x4) \
+ V(Int32x4, Sub, Int32x4) \
+ V(Int32x4, Xor, Int32x4) \
+ V(Int32x4, Equal, Int32x4) \
+ V(Int32x4, GreaterThan, Int32x4) \
+ V(Int32x4, LessThan, Int32x4)
+
+
+#define DECLARE_SIMD_BINARY_FUNCTION( \
+ TYPE, FUNCTION, RETURN_TYPE) \
+RUNTIME_FUNCTION(Runtime_##TYPE##FUNCTION) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 2); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ CONVERT_ARG_CHECKED(TYPE, b, 1); \
+ \
+ RETURN_TYPE::value_t result; \
+ for (int i = 0; i < TYPE::kLanes; i++) { \
+ result.storage[i] = FUNCTION(a->getAt(i), b->getAt(i)); \
+ } \
+ \
+ RETURN_##RETURN_TYPE##_RESULT(result); \
+}
+
+
+SIMD128_BINARY_FUNCTIONS(DECLARE_SIMD_BINARY_FUNCTION)
+
+
+#define SIMD128_SHUFFLE_FUNCTIONS(V) \
+ V(Float32x4) \
+ V(Int32x4)
+
+
+#define DECLARE_SIMD_SHUFFLE_FUNCTION(TYPE) \
+RUNTIME_FUNCTION(Runtime_##TYPE##Shuffle) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 2); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ RUNTIME_ASSERT(args[1]->IsNumber()); \
+ uint32_t m = NumberToUint32(args[1]); \
+ \
+ TYPE::value_t result; \
+ for (int i = 0; i < TYPE::kLanes; i++) { \
+ result.storage[i] = a->getAt((m >> (i * 2)) & 0x3); \
+ } \
+ \
+ RETURN_##TYPE##_RESULT(result); \
+}
+
+
+SIMD128_SHUFFLE_FUNCTIONS(DECLARE_SIMD_SHUFFLE_FUNCTION)
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4Scale) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(Float32x4, self, 0);
+ RUNTIME_ASSERT(args[1]->IsNumber());
+
+ float _s = static_cast<float>(args.number_at(1));
+ float32x4_value_t result;
+ result.storage[0] = self->x() * _s;
+ result.storage[1] = self->y() * _s;
+ result.storage[2] = self->z() * _s;
+ result.storage[3] = self->w() * _s;
+
+ RETURN_Float32x4_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float64x2Scale) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(Float64x2, self, 0);
+ RUNTIME_ASSERT(args[1]->IsNumber());
+
+ double _s = args.number_at(1);
+ float64x2_value_t result;
+ result.storage[0] = self->x() * _s;
+ result.storage[1] = self->y() * _s;
+
+ RETURN_Float64x2_RESULT(result);
+}
+
+
+#define ARG_TO_FLOAT32(x) \
+ CONVERT_DOUBLE_ARG_CHECKED(t, 1); \
+ float x = static_cast<float>(t);
+
+
+#define ARG_TO_FLOAT64(x) \
+ CONVERT_DOUBLE_ARG_CHECKED(x, 1); \
+
+
+#define ARG_TO_INT32(x) \
+ RUNTIME_ASSERT(args[1]->IsNumber()); \
+ int32_t x = NumberToInt32(args[1]);
+
+
+#define ARG_TO_BOOLEAN(x) \
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 1); \
+ int32_t x = flag ? -1 : 0;
+
+#define SIMD128_SET_LANE_FUNCTIONS(V) \
+ V(Float32x4, WithX, ARG_TO_FLOAT32, 0) \
+ V(Float32x4, WithY, ARG_TO_FLOAT32, 1) \
+ V(Float32x4, WithZ, ARG_TO_FLOAT32, 2) \
+ V(Float32x4, WithW, ARG_TO_FLOAT32, 3) \
+ V(Float64x2, WithX, ARG_TO_FLOAT64, 0) \
+ V(Float64x2, WithY, ARG_TO_FLOAT64, 1) \
+ V(Int32x4, WithX, ARG_TO_INT32, 0) \
+ V(Int32x4, WithY, ARG_TO_INT32, 1) \
+ V(Int32x4, WithZ, ARG_TO_INT32, 2) \
+ V(Int32x4, WithW, ARG_TO_INT32, 3) \
+ V(Int32x4, WithFlagX, ARG_TO_BOOLEAN, 0) \
+ V(Int32x4, WithFlagY, ARG_TO_BOOLEAN, 1) \
+ V(Int32x4, WithFlagZ, ARG_TO_BOOLEAN, 2) \
+ V(Int32x4, WithFlagW, ARG_TO_BOOLEAN, 3)
+
+
+#define DECLARE_SIMD_SET_LANE_FUNCTION( \
+ TYPE, NAME, ARG_FUNCTION, LANE) \
+RUNTIME_FUNCTION(Runtime_##TYPE##NAME) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 2); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ ARG_FUNCTION(value); \
+ \
+ TYPE::value_t result; \
+ for (int i = 0; i < TYPE::kLanes; i++) { \
+ if (i != LANE) \
+ result.storage[i] = a->getAt(i); \
+ else \
+ result.storage[i] = value; \
+ } \
+ \
+ RETURN_##TYPE##_RESULT(result); \
+}
+
+
+SIMD128_SET_LANE_FUNCTIONS(DECLARE_SIMD_SET_LANE_FUNCTION)
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4Clamp) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Float32x4, self, 0);
+ CONVERT_ARG_CHECKED(Float32x4, lo, 1);
+ CONVERT_ARG_CHECKED(Float32x4, hi, 2);
+
+ float32x4_value_t result;
+ float _x = self->x() > lo->x() ? self->x() : lo->x();
+ float _y = self->y() > lo->y() ? self->y() : lo->y();
+ float _z = self->z() > lo->z() ? self->z() : lo->z();
+ float _w = self->w() > lo->w() ? self->w() : lo->w();
+ result.storage[0] = _x > hi->x() ? hi->x() : _x;
+ result.storage[1] = _y > hi->y() ? hi->y() : _y;
+ result.storage[2] = _z > hi->z() ? hi->z() : _z;
+ result.storage[3] = _w > hi->w() ? hi->w() : _w;
+
+ RETURN_Float32x4_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float64x2Clamp) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Float64x2, self, 0);
+ CONVERT_ARG_CHECKED(Float64x2, lo, 1);
+ CONVERT_ARG_CHECKED(Float64x2, hi, 2);
+
+ float64x2_value_t result;
+ double _x = self->x() > lo->x() ? self->x() : lo->x();
+ double _y = self->y() > lo->y() ? self->y() : lo->y();
+ result.storage[0] = _x > hi->x() ? hi->x() : _x;
+ result.storage[1] = _y > hi->y() ? hi->y() : _y;
+
+ RETURN_Float64x2_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4ShuffleMix) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Float32x4, first, 0);
+ CONVERT_ARG_CHECKED(Float32x4, second, 1);
+ RUNTIME_ASSERT(args[2]->IsNumber());
+
+ uint32_t m = NumberToUint32(args[2]);
+ float32x4_value_t result;
+ float data1[4] = { first->x(), first->y(), first->z(), first->w() };
+ float data2[4] = { second->x(), second->y(), second->z(), second->w() };
+ result.storage[0] = data1[m & 0x3];
+ result.storage[1] = data1[(m >> 2) & 0x3];
+ result.storage[2] = data2[(m >> 4) & 0x3];
+ result.storage[3] = data2[(m >> 6) & 0x3];
+
+ RETURN_Float32x4_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Int32x4Select) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Int32x4, self, 0);
+ CONVERT_ARG_CHECKED(Float32x4, tv, 1);
+ CONVERT_ARG_CHECKED(Float32x4, fv, 2);
+
+ uint32_t _maskX = self->x();
+ uint32_t _maskY = self->y();
+ uint32_t _maskZ = self->z();
+ uint32_t _maskW = self->w();
+ // Extract floats and interpret them as masks.
+ float32_uint32 tvx(tv->x());
+ float32_uint32 tvy(tv->y());
+ float32_uint32 tvz(tv->z());
+ float32_uint32 tvw(tv->w());
+ float32_uint32 fvx(fv->x());
+ float32_uint32 fvy(fv->y());
+ float32_uint32 fvz(fv->z());
+ float32_uint32 fvw(fv->w());
+ // Perform select.
+ float32_uint32 tempX((_maskX & tvx.u) | (~_maskX & fvx.u));
+ float32_uint32 tempY((_maskY & tvy.u) | (~_maskY & fvy.u));
+ float32_uint32 tempZ((_maskZ & tvz.u) | (~_maskZ & fvz.u));
+ float32_uint32 tempW((_maskW & tvw.u) | (~_maskW & fvw.u));
+
+ float32x4_value_t result;
+ result.storage[0] = tempX.f;
+ result.storage[1] = tempY.f;
+ result.storage[2] = tempZ.f;
+ result.storage[3] = tempW.f;
+
+ RETURN_Float32x4_RESULT(result);
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of Runtime
F(RoundNumber, 1, 1) \
F(MathFround, 1, 1) \
\
+ /* Float32x4 and Int32x4 */ \
+ F(AllocateFloat32x4, 0, 1) \
+ F(AllocateFloat64x2, 0, 1) \
+ F(AllocateInt32x4, 0, 1) \
+ \
+ /* SIMD */ \
+ F(Float32x4Abs, 1, 1) \
+ F(Float32x4BitsToInt32x4, 1, 1) \
+ F(Float32x4Neg, 1, 1) \
+ F(Float32x4Reciprocal, 1, 1) \
+ F(Float32x4ReciprocalSqrt, 1, 1) \
+ F(Float32x4Sqrt, 1, 1) \
+ F(Float32x4ToInt32x4, 1, 1) \
+ F(Float32x4Add, 2, 1) \
+ F(Float32x4Div, 2, 1) \
+ F(Float32x4Max, 2, 1) \
+ F(Float32x4Min, 2, 1) \
+ F(Float32x4Mul, 2, 1) \
+ F(Float32x4Sub, 2, 1) \
+ F(Float32x4Equal, 2, 1) \
+ F(Float32x4NotEqual, 2, 1) \
+ F(Float32x4GreaterThanOrEqual, 2, 1) \
+ F(Float32x4GreaterThan, 2, 1) \
+ F(Float32x4LessThan, 2, 1) \
+ F(Float32x4LessThanOrEqual, 2, 1) \
+ F(Float32x4Shuffle, 2, 1) \
+ F(Float32x4Scale, 2, 1) \
+ F(Float32x4WithX, 2, 1) \
+ F(Float32x4WithY, 2, 1) \
+ F(Float32x4WithZ, 2, 1) \
+ F(Float32x4WithW, 2, 1) \
+ F(Float32x4Clamp, 3, 1) \
+ F(Float32x4ShuffleMix, 3, 1) \
+ F(Float64x2Abs, 1, 1) \
+ F(Float64x2Neg, 1, 1) \
+ F(Float64x2Sqrt, 1, 1) \
+ F(Float64x2Add, 2, 1) \
+ F(Float64x2Div, 2, 1) \
+ F(Float64x2Max, 2, 1) \
+ F(Float64x2Min, 2, 1) \
+ F(Float64x2Mul, 2, 1) \
+ F(Float64x2Sub, 2, 1) \
+ F(Float64x2Scale, 2, 1) \
+ F(Float64x2WithX, 2, 1) \
+ F(Float64x2WithY, 2, 1) \
+ F(Float64x2Clamp, 3, 1) \
+ F(Int32x4BitsToFloat32x4, 1, 1) \
+ F(Int32x4Neg, 1, 1) \
+ F(Int32x4Not, 1, 1) \
+ F(Int32x4ToFloat32x4, 1, 1) \
+ F(Int32x4And, 2, 1) \
+ F(Int32x4Or, 2, 1) \
+ F(Int32x4Xor, 2, 1) \
+ F(Int32x4Add, 2, 1) \
+ F(Int32x4Sub, 2, 1) \
+ F(Int32x4Mul, 2, 1) \
+ F(Int32x4Shuffle, 2, 1) \
+ F(Int32x4WithX, 2, 1) \
+ F(Int32x4WithY, 2, 1) \
+ F(Int32x4WithZ, 2, 1) \
+ F(Int32x4WithW, 2, 1) \
+ F(Int32x4WithFlagX, 2, 1) \
+ F(Int32x4WithFlagY, 2, 1) \
+ F(Int32x4WithFlagZ, 2, 1) \
+ F(Int32x4WithFlagW, 2, 1) \
+ F(Int32x4GreaterThan, 2, 1) \
+ F(Int32x4Equal, 2, 1) \
+ F(Int32x4LessThan, 2, 1) \
+ F(Int32x4Select, 3, 1) \
+ \
/* Regular expressions */ \
F(RegExpCompile, 3, 1) \
F(RegExpExecMultiple, 4, 1) \
F(DateSetValue, 3, 1) \
F(DateCacheVersion, 0, 1) \
\
+ /* Float32x4, Float64x2 and Int32x4 */ \
+ F(CreateFloat32x4, 4, 1) \
+ F(Float32x4GetX, 1, 1) \
+ F(Float32x4GetY, 1, 1) \
+ F(Float32x4GetZ, 1, 1) \
+ F(Float32x4GetW, 1, 1) \
+ F(Float32x4GetSignMask, 1, 1) \
+ F(CreateFloat64x2, 2, 1) \
+ F(Float64x2GetX, 1, 1) \
+ F(Float64x2GetY, 1, 1) \
+ F(Float64x2GetSignMask, 1, 1) \
+ F(CreateInt32x4, 4, 1) \
+ F(Int32x4GetX, 1, 1) \
+ F(Int32x4GetY, 1, 1) \
+ F(Int32x4GetZ, 1, 1) \
+ F(Int32x4GetW, 1, 1) \
+ F(Int32x4GetFlagX, 1, 1) \
+ F(Int32x4GetFlagY, 1, 1) \
+ F(Int32x4GetFlagZ, 1, 1) \
+ F(Int32x4GetFlagW, 1, 1) \
+ F(Int32x4GetSignMask, 1, 1) \
+ \
/* Globals */ \
F(CompileString, 2, 1) \
\
F(HasExternalInt32Elements, 1, 1) \
F(HasExternalUint32Elements, 1, 1) \
F(HasExternalFloat32Elements, 1, 1) \
+ F(HasExternalFloat32x4Elements, 1, 1) \
+ F(HasExternalInt32x4Elements, 1, 1) \
F(HasExternalFloat64Elements, 1, 1) \
+ F(HasExternalFloat64x2Elements, 1, 1) \
F(HasFixedUint8ClampedElements, 1, 1) \
F(HasFixedInt8Elements, 1, 1) \
F(HasFixedUint8Elements, 1, 1) \
ARRAY_ID_FLOAT32 = 7,
ARRAY_ID_FLOAT64 = 8,
ARRAY_ID_UINT8_CLAMPED = 9,
-
+ ARRAY_ID_FLOAT32x4 = 10,
+ ARRAY_ID_FLOAT64x2 = 11,
+ ARRAY_ID_INT32x4 = 12,
ARRAY_ID_FIRST = ARRAY_ID_UINT8,
- ARRAY_ID_LAST = ARRAY_ID_UINT8_CLAMPED
+ ARRAY_ID_LAST = ARRAY_ID_INT32x4
};
static void ArrayIdToTypeAndSize(int array_id,
if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ if (IsFloat32x4(y) || IsFloat64x2(y) || IsInt32x4(y)) {
+ return %StringEquals(x, %ToString(y));
+ }
y = %ToPrimitive(y, NO_HINT);
}
} else if (IS_SYMBOL(x)) {
if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
return 1; // not equal
+ } else if (IsFloat32x4(x)) {
+ while (true) {
+ if (IsFloat32x4(y) || IsInt32x4(y)) {
+ return (x.x == y.x && x.y == y.y && x.z == y.z && x.w == y.w) ? 0 : 1;
+ }
+ if (IS_STRING(y)) return %StringEquals(%ToString(x), y);
+ if (IS_NUMBER(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
+ if (IS_BOOLEAN(y)) return y ? 0 : 1;
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+ }
+ } else if (IsFloat64x2(x)) {
+ while (true) {
+ if (IsFloat64x2(y)) {
+ return (x.x == y.x && x.y == y.y) ? 0 : 1;
+ }
+ if (IS_STRING(y)) return %StringEquals(%ToString(x), y);
+ if (IS_NUMBER(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
+ if (IS_BOOLEAN(y)) return y ? 0 : 1;
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+ }
+ } else if (IsInt32x4(x)) {
+ while (true) {
+ if (IsFloat32x4(y) || IsInt32x4(y)) {
+ return (x.x == y.x && x.y == y.y && x.z == y.z && x.w == y.w) ? 0 : 1;
+ }
+ if (IS_STRING(y)) return %StringEquals(%ToString(x), y);
+ if (IS_NUMBER(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
+ if (IS_BOOLEAN(y)) return y ? 0 : 1;
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+ }
} else if (IS_BOOLEAN(x)) {
if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
if (IS_NULL_OR_UNDEFINED(y)) return 1;
if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
if (IS_STRING(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
if (IS_SYMBOL(y)) return 1; // not equal
+ if (IsFloat32x4(y) || IsFloat64x2(y) || IsInt32x4(y)) return x ? 0 : 1;
// y is object.
x = %ToNumber(x);
y = %ToPrimitive(y, NO_HINT);
return %NumberEquals(this, x);
}
+ if (IsFloat32x4(this)) {
+ if (!IsFloat32x4(x)) return 1; // not equal
+ return (this.x == x.x && this.y == x.y &&
+ this.z == x.z && this.w == x.w) ? 0 : 1;
+ }
+
+ if (IsFloat64x2(this)) {
+ if (!IsFloat64x2(x)) return 1; // not equal
+ return (this.x == x.x && this.y == x.y) ? 0 : 1;
+ }
+
+ if (IsInt32x4(this)) {
+ if (!IsInt32x4(x)) return 1; // not equal
+ return (this.x == x.x && this.y == x.y &&
+ this.z == x.z && this.w == x.w) ? 0 : 1;
+ }
+
// If anything else gets here, we just do simple identity check.
// Objects (including functions), null, undefined and booleans were
// checked in the CompareStub, so there should be nothing left.
right = %ToPrimitive(x, NUMBER_HINT);
if (IS_STRING(left) && IS_STRING(right)) {
return %_StringCompare(left, right);
+ } else if ((IsFloat32x4(left) || IsInt32x4(left)) &&
+ (IsFloat32x4(right) || IsInt32x4(right))) {
+ if ((left.x == right.x) && (left.y == right.y) &&
+ (left.z == right.z) && (left.w == right.w)) {
+ return 0; // equal
+ }
+ if ((left.x < right.x) && (left.y < right.y) &&
+ (left.z < right.z) && (left.w < right.w)) {
+ return -1; // less
+ }
+ if ((left.x > right.x) && (left.y > right.y) &&
+ (left.z > right.z) && (left.w > right.w)) {
+ return 1; // great
+ }
+ } else if (IsFloat64x2(left) && IsFloat64x2(right)) {
+ if ((left.x == right.x) && (left.y == right.y)) {
+ return 0; // equal
+ }
+ if ((left.x < right.x) && (left.y < right.y)) {
+ return -1; // less
+ }
+ if ((left.x > right.x) && (left.y > right.y)) {
+ return 1; // great
+ }
} else {
var left_number = %ToNumber(left);
var right_number = %ToNumber(right);
if (IS_BOOLEAN(x)) return x ? 1 : 0;
if (IS_UNDEFINED(x)) return NAN;
if (IS_SYMBOL(x)) return NAN;
+ if (IsFloat32x4(x)) return NAN;
+ if (IsFloat64x2(x)) return NAN;
+ if (IsInt32x4(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
if (IS_UNDEFINED(x)) return NAN;
if (IS_SYMBOL(x)) return NAN;
+ if (IsFloat32x4(x)) return NAN;
+ if (IsFloat64x2(x)) return NAN;
+ if (IsInt32x4(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
if (IS_STRING(x)) return new $String(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
+ if (IsFloat32x4(x)) return new $Float32x4(x.x, x.y, x.z, x.w);
+ if (IsFloat64x2(x)) return new $Float64x2(x.x, x.y);
+ if (IsInt32x4(x)) return new $Int32x4(x.x, x.y, x.z, x.w);
if (IS_SYMBOL(x)) return %NewSymbolWrapper(x);
if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
throw %MakeTypeError('undefined_or_null_to_object', []);
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var $SIMD = global.SIMD;
+var $Float32x4 = $SIMD.float32x4;
+var $Float64x2 = $SIMD.float64x2;
+var $Int32x4 = $SIMD.int32x4;
+
+macro SIMD128_DATA_TYPES(FUNCTION)
+FUNCTION(Float32x4, float32x4)
+FUNCTION(Float64x2, float64x2)
+FUNCTION(Int32x4, int32x4)
+endmacro
+
+macro DECLARE_DATA_TYPE_COMMON_FUNCTION(NAME, TYPE)
+function ThrowNAMETypeError() {
+ throw MakeTypeError("this is not a TYPE object.");
+}
+
+function CheckNAME(arg) {
+ if (!(arg instanceof $NAME))
+ ThrowNAMETypeError();
+}
+endmacro
+
+SIMD128_DATA_TYPES(DECLARE_DATA_TYPE_COMMON_FUNCTION)
+
+function StringfyFloat32x4_() {
+ CheckFloat32x4(this);
+ return "float32x4(" + this.x + "," + this.y + "," + this.z + "," + this.w + ")";
+}
+
+function StringfyFloat64x2_() {
+ CheckFloat64x2(this);
+ return "float64x2(" + this.x + "," + this.y + ")";
+}
+
+function StringfyInt32x4_() {
+ CheckInt32x4(this);
+ return "int32x4(" + this.x + "," + this.y + "," + this.z + "," + this.w + ")";
+}
+
+macro SIMD128_DATA_TYPE_FUNCTIONS(FUNCTION)
+FUNCTION(Float32x4, GetX)
+FUNCTION(Float32x4, GetY)
+FUNCTION(Float32x4, GetZ)
+FUNCTION(Float32x4, GetW)
+FUNCTION(Float32x4, GetSignMask)
+FUNCTION(Float64x2, GetX)
+FUNCTION(Float64x2, GetY)
+FUNCTION(Float64x2, GetSignMask)
+FUNCTION(Int32x4, GetX)
+FUNCTION(Int32x4, GetY)
+FUNCTION(Int32x4, GetZ)
+FUNCTION(Int32x4, GetW)
+FUNCTION(Int32x4, GetFlagX)
+FUNCTION(Int32x4, GetFlagY)
+FUNCTION(Int32x4, GetFlagZ)
+FUNCTION(Int32x4, GetFlagW)
+FUNCTION(Int32x4, GetSignMask)
+endmacro
+
+macro DECLARE_DATA_TYPE_FUNCTION(TYPE, FUNCTION)
+function TYPEFUNCTION_() {
+ CheckTYPE(this);
+ return %TYPEFUNCTION(this);
+}
+endmacro
+
+SIMD128_DATA_TYPE_FUNCTIONS(DECLARE_DATA_TYPE_FUNCTION)
+
+function Float32x4Constructor(x, y, z, w) {
+ x = TO_NUMBER_INLINE(x);
+ y = TO_NUMBER_INLINE(y);
+ z = TO_NUMBER_INLINE(z);
+ w = TO_NUMBER_INLINE(w);
+
+ return %CreateFloat32x4(x, y, z, w);
+}
+
+function Float64x2Constructor(x, y) {
+ x = TO_NUMBER_INLINE(x);
+ y = TO_NUMBER_INLINE(y);
+
+ return %CreateFloat64x2(x, y);
+}
+
+function Int32x4Constructor(x, y, z, w) {
+ x = TO_INT32(x);
+ y = TO_INT32(y);
+ z = TO_INT32(z);
+ w = TO_INT32(w);
+
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function SetUpFloat32x4() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Float32x4, Float32x4Constructor);
+
+ %FunctionSetPrototype($Float32x4, new $Object());
+ %SetProperty($Float32x4.prototype, "constructor", $Float32x4, DONT_ENUM);
+
+ InstallGetter($Float32x4.prototype, "x", Float32x4GetX_);
+ InstallGetter($Float32x4.prototype, "y", Float32x4GetY_);
+ InstallGetter($Float32x4.prototype, "z", Float32x4GetZ_);
+ InstallGetter($Float32x4.prototype, "w", Float32x4GetW_);
+ InstallGetter($Float32x4.prototype, "signMask", Float32x4GetSignMask_);
+ InstallFunctions($Float32x4.prototype, DONT_ENUM, $Array(
+ "toString", StringfyFloat32x4_
+ ));
+}
+
+function SetUpFloat64x2() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Float64x2, Float64x2Constructor);
+
+ %FunctionSetPrototype($Float64x2, new $Object());
+ %SetProperty($Float64x2.prototype, "constructor", $Float64x2, DONT_ENUM);
+
+ InstallGetter($Float64x2.prototype, "x", Float64x2GetX_);
+ InstallGetter($Float64x2.prototype, "y", Float64x2GetY_);
+ InstallGetter($Float64x2.prototype, "signMask", Float64x2GetSignMask_);
+ InstallFunctions($Float64x2.prototype, DONT_ENUM, $Array(
+ "toString", StringfyFloat64x2_
+ ));
+}
+
+function SetUpInt32x4() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Int32x4, Int32x4Constructor);
+
+ %FunctionSetPrototype($Int32x4, new $Object());
+ %SetProperty($Int32x4.prototype, "constructor", $Int32x4, DONT_ENUM);
+
+ InstallGetter($Int32x4.prototype, "x", Int32x4GetX_);
+ InstallGetter($Int32x4.prototype, "y", Int32x4GetY_);
+ InstallGetter($Int32x4.prototype, "z", Int32x4GetZ_);
+ InstallGetter($Int32x4.prototype, "w", Int32x4GetW_);
+ InstallGetter($Int32x4.prototype, "flagX", Int32x4GetFlagX_);
+ InstallGetter($Int32x4.prototype, "flagY", Int32x4GetFlagY_);
+ InstallGetter($Int32x4.prototype, "flagZ", Int32x4GetFlagZ_);
+ InstallGetter($Int32x4.prototype, "flagW", Int32x4GetFlagW_);
+ InstallGetter($Int32x4.prototype, "signMask", Int32x4GetSignMask_);
+ InstallFunctions($Int32x4.prototype, DONT_ENUM, $Array(
+ "toString", StringfyInt32x4_
+ ));
+}
+
+SetUpFloat32x4();
+SetUpFloat64x2();
+SetUpInt32x4();
+
+//------------------------------------------------------------------------------
+macro SIMD128_UNARY_FUNCTIONS(FUNCTION)
+FUNCTION(Float32x4, Abs)
+FUNCTION(Float32x4, BitsToInt32x4)
+FUNCTION(Float32x4, Neg)
+FUNCTION(Float32x4, Reciprocal)
+FUNCTION(Float32x4, ReciprocalSqrt)
+FUNCTION(Float32x4, Sqrt)
+FUNCTION(Float32x4, ToInt32x4)
+FUNCTION(Float64x2, Abs)
+FUNCTION(Float64x2, Neg)
+FUNCTION(Float64x2, Sqrt)
+FUNCTION(Int32x4, BitsToFloat32x4)
+FUNCTION(Int32x4, Neg)
+FUNCTION(Int32x4, Not)
+FUNCTION(Int32x4, ToFloat32x4)
+endmacro
+
+macro SIMD128_BINARY_FUNCTIONS(FUNCTION)
+FUNCTION(Float32x4, Add)
+FUNCTION(Float32x4, Div)
+FUNCTION(Float32x4, Max)
+FUNCTION(Float32x4, Min)
+FUNCTION(Float32x4, Mul)
+FUNCTION(Float32x4, Sub)
+FUNCTION(Float32x4, Equal)
+FUNCTION(Float32x4, NotEqual)
+FUNCTION(Float32x4, GreaterThanOrEqual)
+FUNCTION(Float32x4, GreaterThan)
+FUNCTION(Float32x4, LessThan)
+FUNCTION(Float32x4, LessThanOrEqual)
+FUNCTION(Float64x2, Add)
+FUNCTION(Float64x2, Div)
+FUNCTION(Float64x2, Max)
+FUNCTION(Float64x2, Min)
+FUNCTION(Float64x2, Mul)
+FUNCTION(Float64x2, Sub)
+FUNCTION(Int32x4, Add)
+FUNCTION(Int32x4, And)
+FUNCTION(Int32x4, Mul)
+FUNCTION(Int32x4, Or)
+FUNCTION(Int32x4, Sub)
+FUNCTION(Int32x4, Xor)
+FUNCTION(Int32x4, Equal)
+FUNCTION(Int32x4, GreaterThan)
+FUNCTION(Int32x4, LessThan)
+endmacro
+
+macro SIMD128_BINARY_SHUFFLE_FUNCTIONS(FUNCTION)
+FUNCTION(Float32x4)
+FUNCTION(Int32x4)
+endmacro
+
+macro FLOAT32x4_BINARY_FUNCTIONS_WITH_FLOAT32_PARAMETER(FUNCTION)
+FUNCTION(Scale)
+FUNCTION(WithX)
+FUNCTION(WithY)
+FUNCTION(WithZ)
+FUNCTION(WithW)
+endmacro
+
+macro FLOAT64x2_BINARY_FUNCTIONS_WITH_FLOAT64_PARAMETER(FUNCTION)
+FUNCTION(Scale)
+FUNCTION(WithX)
+FUNCTION(WithY)
+endmacro
+
+macro INT32x4_BINARY_FUNCTIONS_WITH_INT32_PARAMETER(FUNCTION)
+FUNCTION(WithX)
+FUNCTION(WithY)
+FUNCTION(WithZ)
+FUNCTION(WithW)
+endmacro
+
+macro INT32x4_BINARY_FUNCTIONS_WITH_BOOLEAN_PARAMETER(FUNCTION)
+FUNCTION(WithFlagX)
+FUNCTION(WithFlagY)
+FUNCTION(WithFlagZ)
+FUNCTION(WithFlagW)
+endmacro
+
+macro DECLARE_SIMD_UNARY_FUNCTION(TYPE, FUNCTION)
+function TYPEFUNCTION_(x4) {
+ CheckTYPE(x4);
+ return %TYPEFUNCTION(x4);
+}
+endmacro
+
+macro DECLARE_SIMD_BINARY_FUNCTION(TYPE, FUNCTION)
+function TYPEFUNCTION_(a4, b4) {
+ CheckTYPE(a4);
+ CheckTYPE(b4);
+ return %TYPEFUNCTION(a4, b4);
+}
+endmacro
+
+macro DECLARE_SIMD_BINARY_SHUFFLE_FUNCTION(TYPE)
+function TYPEShuffle_(x4, mask) {
+ CheckTYPE(x4);
+ var value = TO_INT32(mask);
+ if ((value < 0) || (value > 0xFF)) {
+ throw MakeRangeError("invalid_simd_shuffle_mask");
+ }
+ return %TYPEShuffle(x4, mask);
+}
+endmacro
+
+macro DECLARE_FLOAT32x4_BINARY_FUNCTION_WITH_FLOAT32_PARAMETER(FUNCTION)
+function Float32x4FUNCTION_(x4, f) {
+ CheckFloat32x4(x4);
+ f = TO_NUMBER_INLINE(f);
+ return %Float32x4FUNCTION(x4, f);
+}
+endmacro
+
+macro DECLARE_FLOAT64x2_BINARY_FUNCTION_WITH_FLOAT64_PARAMETER(FUNCTION)
+function Float64x2FUNCTION_(x2, f) {
+ CheckFloat64x2(x2);
+ f = TO_NUMBER_INLINE(f);
+ return %Float64x2FUNCTION(x2, f);
+}
+endmacro
+
+macro DECLARE_INT32x4_BINARY_FUNCTION_WITH_INT32_PARAMETER(FUNCTION)
+function Int32x4FUNCTION_(x4, i) {
+ CheckInt32x4(x4);
+ i = TO_INT32(i);
+ return %Int32x4FUNCTION(x4, i);
+}
+endmacro
+
+macro DECLARE_INT32x4_BINARY_FUNCTION_WITH_BOOLEAN_PARAMETER(FUNCTION)
+function Int32x4FUNCTION_(x4, b) {
+ CheckInt32x4(x4);
+ b = ToBoolean(b);
+ return %Int32x4FUNCTION(x4, b);
+}
+endmacro
+
+SIMD128_UNARY_FUNCTIONS(DECLARE_SIMD_UNARY_FUNCTION)
+SIMD128_BINARY_FUNCTIONS(DECLARE_SIMD_BINARY_FUNCTION)
+SIMD128_BINARY_SHUFFLE_FUNCTIONS(DECLARE_SIMD_BINARY_SHUFFLE_FUNCTION)
+FLOAT32x4_BINARY_FUNCTIONS_WITH_FLOAT32_PARAMETER(DECLARE_FLOAT32x4_BINARY_FUNCTION_WITH_FLOAT32_PARAMETER)
+FLOAT64x2_BINARY_FUNCTIONS_WITH_FLOAT64_PARAMETER(DECLARE_FLOAT64x2_BINARY_FUNCTION_WITH_FLOAT64_PARAMETER)
+INT32x4_BINARY_FUNCTIONS_WITH_INT32_PARAMETER(DECLARE_INT32x4_BINARY_FUNCTION_WITH_INT32_PARAMETER)
+INT32x4_BINARY_FUNCTIONS_WITH_BOOLEAN_PARAMETER(DECLARE_INT32x4_BINARY_FUNCTION_WITH_BOOLEAN_PARAMETER)
+
+function Float32x4Splat_(f) {
+ f = TO_NUMBER_INLINE(f);
+ return %CreateFloat32x4(f, f, f, f);
+}
+
+function Float32x4Zero_() {
+ return %CreateFloat32x4(0.0, 0.0, 0.0, 0.0);
+}
+
+function Float32x4And_(a4, b4) {
+ a4 = Float32x4BitsToInt32x4_(a4);
+ b4 = Float32x4BitsToInt32x4_(b4);
+ return Int32x4BitsToFloat32x4_(Int32x4And_(a4, b4));
+}
+
+function Float32x4Or_(a4, b4) {
+ a4 = Float32x4BitsToInt32x4_(a4);
+ b4 = Float32x4BitsToInt32x4_(b4);
+ return Int32x4BitsToFloat32x4_(Int32x4Or_(a4, b4));
+}
+
+function Float32x4Xor_(a4, b4) {
+ a4 = Float32x4BitsToInt32x4_(a4);
+ b4 = Float32x4BitsToInt32x4_(b4);
+ return Int32x4BitsToFloat32x4_(Int32x4Xor_(a4, b4));
+}
+
+function Float32x4Not_(x4) {
+ x4 = Float32x4BitsToInt32x4_(x4);
+ return Int32x4BitsToFloat32x4_(Int32x4Not_(x4));
+}
+
+function Float32x4Clamp_(x4, lowerLimit, upperLimit) {
+ CheckFloat32x4(x4);
+ CheckFloat32x4(lowerLimit);
+ CheckFloat32x4(upperLimit);
+ return %Float32x4Clamp(x4, lowerLimit, upperLimit);
+}
+
+function Float32x4ShuffleMix_(a4, b4, mask) {
+ CheckFloat32x4(a4);
+ CheckFloat32x4(b4);
+ var value = TO_INT32(mask);
+ if ((value < 0) || (value > 0xFF)) {
+ throw MakeRangeError("invalid_simd_shuffleMix_mask");
+ }
+ return %Float32x4ShuffleMix(a4, b4, mask);
+}
+
+function Float64x2Splat_(f) {
+ f = TO_NUMBER_INLINE(f);
+ return %CreateFloat64x2(f, f);
+}
+
+function Float64x2Zero_() {
+ return %CreateFloat64x2(0.0, 0.0);
+}
+
+function Float64x2Clamp_(x2, lowerLimit, upperLimit) {
+ CheckFloat64x2(x2);
+ CheckFloat64x2(lowerLimit);
+ CheckFloat64x2(upperLimit);
+ return %Float64x2Clamp(x2, lowerLimit, upperLimit);
+}
+
+function Int32x4Zero_() {
+ return %CreateInt32x4(0, 0, 0, 0);
+}
+
+function Int32x4Bool_(x, y, z, w) {
+ x = x ? -1 : 0;
+ y = y ? -1 : 0;
+ z = z ? -1 : 0;
+ w = w ? -1 : 0;
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function Int32x4Splat_(s) {
+ s = TO_INT32(s);
+ return %CreateInt32x4(s, s, s, s);
+}
+
+function Int32x4Select_(x4, trueValue, falseValue) {
+ CheckInt32x4(x4);
+ CheckFloat32x4(trueValue);
+ CheckFloat32x4(falseValue);
+ return %Int32x4Select(x4, trueValue, falseValue);
+}
+
+function Int32x4ShiftLeft_(t, s) {
+ CheckInt32x4(t);
+ s = TO_NUMBER_INLINE(s);
+ var x = t.x << s;
+ var y = t.y << s;
+ var z = t.z << s;
+ var w = t.w << s;
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function Int32x4ShiftRight_(t, s) {
+ CheckInt32x4(t);
+ s = TO_NUMBER_INLINE(s);
+ var x = t.x >>> s;
+ var y = t.y >>> s;
+ var z = t.z >>> s;
+ var w = t.w >>> s;
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function Int32x4ShiftRightArithmetic_(t, s) {
+ CheckInt32x4(t);
+ s = TO_NUMBER_INLINE(s);
+ var x = t.x >> s;
+ var y = t.y >> s;
+ var z = t.z >> s;
+ var w = t.w >> s;
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function SetUpSIMD() {
+ %CheckIsBootstrapping();
+
+ %OptimizeObjectForAddingMultipleProperties($SIMD, 258);
+ %SetProperty($SIMD, "XXXX", 0x00, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXXY", 0x40, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXXZ", 0x80, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXXW", 0xC0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXYX", 0x10, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXYY", 0x50, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXYZ", 0x90, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXYW", 0xD0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXZX", 0x20, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXZY", 0x60, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXZZ", 0xA0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXZW", 0xE0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXWX", 0x30, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXWY", 0x70, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXWZ", 0xB0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XXWW", 0xF0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYXX", 0x04, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYXY", 0x44, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYXZ", 0x84, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYXW", 0xC4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYYX", 0x14, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYYY", 0x54, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYYZ", 0x94, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYYW", 0xD4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYZX", 0x24, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYZY", 0x64, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYZZ", 0xA4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYZW", 0xE4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYWX", 0x34, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYWY", 0x74, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYWZ", 0xB4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XYWW", 0xF4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZXX", 0x08, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZXY", 0x48, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZXZ", 0x88, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZXW", 0xC8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZYX", 0x18, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZYY", 0x58, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZYZ", 0x98, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZYW", 0xD8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZZX", 0x28, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZZY", 0x68, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZZZ", 0xA8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZZW", 0xE8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZWX", 0x38, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZWY", 0x78, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZWZ", 0xB8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XZWW", 0xF8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWXX", 0x0C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWXY", 0x4C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWXZ", 0x8C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWXW", 0xCC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWYX", 0x1C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWYY", 0x5C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWYZ", 0x9C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWYW", 0xDC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWZX", 0x2C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWZY", 0x6C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWZZ", 0xAC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWZW", 0xEC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWWX", 0x3C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWWY", 0x7C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWWZ", 0xBC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "XWWW", 0xFC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXXX", 0x01, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXXY", 0x41, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXXZ", 0x81, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXXW", 0xC1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXYX", 0x11, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXYY", 0x51, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXYZ", 0x91, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXYW", 0xD1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXZX", 0x21, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXZY", 0x61, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXZZ", 0xA1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXZW", 0xE1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXWX", 0x31, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXWY", 0x71, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXWZ", 0xB1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YXWW", 0xF1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYXX", 0x05, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYXY", 0x45, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYXZ", 0x85, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYXW", 0xC5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYYX", 0x15, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYYY", 0x55, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYYZ", 0x95, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYYW", 0xD5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYZX", 0x25, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYZY", 0x65, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYZZ", 0xA5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYZW", 0xE5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYWX", 0x35, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYWY", 0x75, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYWZ", 0xB5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YYWW", 0xF5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZXX", 0x09, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZXY", 0x49, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZXZ", 0x89, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZXW", 0xC9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZYX", 0x19, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZYY", 0x59, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZYZ", 0x99, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZYW", 0xD9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZZX", 0x29, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZZY", 0x69, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZZZ", 0xA9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZZW", 0xE9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZWX", 0x39, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZWY", 0x79, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZWZ", 0xB9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YZWW", 0xF9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWXX", 0x0D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWXY", 0x4D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWXZ", 0x8D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWXW", 0xCD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWYX", 0x1D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWYY", 0x5D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWYZ", 0x9D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWYW", 0xDD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWZX", 0x2D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWZY", 0x6D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWZZ", 0xAD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWZW", 0xED, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWWX", 0x3D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWWY", 0x7D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWWZ", 0xBD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "YWWW", 0xFD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXXX", 0x02, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXXY", 0x42, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXXZ", 0x82, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXXW", 0xC2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXYX", 0x12, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXYY", 0x52, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXYZ", 0x92, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXYW", 0xD2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXZX", 0x22, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXZY", 0x62, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXZZ", 0xA2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXZW", 0xE2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXWX", 0x32, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXWY", 0x72, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXWZ", 0xB2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZXWW", 0xF2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYXX", 0x06, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYXY", 0x46, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYXZ", 0x86, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYXW", 0xC6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYYX", 0x16, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYYY", 0x56, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYYZ", 0x96, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYYW", 0xD6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYZX", 0x26, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYZY", 0x66, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYZZ", 0xA6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYZW", 0xE6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYWX", 0x36, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYWY", 0x76, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYWZ", 0xB6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZYWW", 0xF6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZXX", 0x0A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZXY", 0x4A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZXZ", 0x8A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZXW", 0xCA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZYX", 0x1A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZYY", 0x5A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZYZ", 0x9A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZYW", 0xDA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZZX", 0x2A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZZY", 0x6A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZZZ", 0xAA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZZW", 0xEA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZWX", 0x3A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZWY", 0x7A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZWZ", 0xBA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZZWW", 0xFA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWXX", 0x0E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWXY", 0x4E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWXZ", 0x8E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWXW", 0xCE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWYX", 0x1E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWYY", 0x5E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWYZ", 0x9E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWYW", 0xDE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWZX", 0x2E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWZY", 0x6E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWZZ", 0xAE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWZW", 0xEE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWWX", 0x3E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWWY", 0x7E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWWZ", 0xBE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "ZWWW", 0xFE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXXX", 0x03, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXXY", 0x43, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXXZ", 0x83, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXXW", 0xC3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXYX", 0x13, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXYY", 0x53, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXYZ", 0x93, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXYW", 0xD3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXZX", 0x23, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXZY", 0x63, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXZZ", 0xA3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXZW", 0xE3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXWX", 0x33, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXWY", 0x73, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXWZ", 0xB3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WXWW", 0xF3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYXX", 0x07, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYXY", 0x47, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYXZ", 0x87, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYXW", 0xC7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYYX", 0x17, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYYY", 0x57, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYYZ", 0x97, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYYW", 0xD7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYZX", 0x27, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYZY", 0x67, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYZZ", 0xA7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYZW", 0xE7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYWX", 0x37, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYWY", 0x77, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYWZ", 0xB7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WYWW", 0xF7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZXX", 0x0B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZXY", 0x4B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZXZ", 0x8B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZXW", 0xCB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZYX", 0x1B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZYY", 0x5B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZYZ", 0x9B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZYW", 0xDB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZZX", 0x2B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZZY", 0x6B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZZZ", 0xAB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZZW", 0xEB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZWX", 0x3B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZWY", 0x7B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZWZ", 0xBB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WZWW", 0xFB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWXX", 0x0F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWXY", 0x4F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWXZ", 0x8F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWXW", 0xCF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWYX", 0x1F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWYY", 0x5F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWYZ", 0x9F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWYW", 0xDF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWZX", 0x2F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWZY", 0x6F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWZZ", 0xAF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWZW", 0xEF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWWX", 0x3F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWWY", 0x7F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWWZ", 0xBF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($SIMD, "WWWW", 0xFF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ %ToFastProperties($SIMD);
+
+ // Set up non-enumerable properties of the SIMD float32x4 object.
+ InstallFunctions($SIMD.float32x4, DONT_ENUM, $Array(
+ // Float32x4 operations
+ "splat", Float32x4Splat_,
+ "zero", Float32x4Zero_,
+ // Unary
+ "abs", Float32x4Abs_,
+ "bitsToInt32x4", Float32x4BitsToInt32x4_,
+ "neg", Float32x4Neg_,
+ "reciprocal", Float32x4Reciprocal_,
+ "reciprocalSqrt", Float32x4ReciprocalSqrt_,
+ "sqrt", Float32x4Sqrt_,
+ "toInt32x4", Float32x4ToInt32x4_,
+ // Binary
+ "add", Float32x4Add_,
+ "div", Float32x4Div_,
+ "max", Float32x4Max_,
+ "min", Float32x4Min_,
+ "mul", Float32x4Mul_,
+ "sub", Float32x4Sub_,
+ "lessThan", Float32x4LessThan_,
+ "lessThanOrEqual", Float32x4LessThanOrEqual_,
+ "equal", Float32x4Equal_,
+ "notEqual", Float32x4NotEqual_,
+ "greaterThanOrEqual", Float32x4GreaterThanOrEqual_,
+ "greaterThan", Float32x4GreaterThan_,
+ "and", Float32x4And_,
+ "or", Float32x4Or_,
+ "xor", Float32x4Xor_,
+ "not", Float32x4Not_,
+ "scale", Float32x4Scale_,
+ "withX", Float32x4WithX_,
+ "withY", Float32x4WithY_,
+ "withZ", Float32x4WithZ_,
+ "withW", Float32x4WithW_,
+ "shuffle", Float32x4Shuffle_,
+ // Ternary
+ "clamp", Float32x4Clamp_,
+ "shuffleMix", Float32x4ShuffleMix_
+ ));
+
+ // Set up non-enumerable properties of the SIMD float64x2 object.
+ InstallFunctions($SIMD.float64x2, DONT_ENUM, $Array(
+ // Float64x2 operations
+ "splat", Float64x2Splat_,
+ "zero", Float64x2Zero_,
+ // Unary
+ "abs", Float64x2Abs_,
+ "neg", Float64x2Neg_,
+ "sqrt", Float64x2Sqrt_,
+ // Binary
+ "add", Float64x2Add_,
+ "div", Float64x2Div_,
+ "max", Float64x2Max_,
+ "min", Float64x2Min_,
+ "mul", Float64x2Mul_,
+ "sub", Float64x2Sub_,
+ "scale", Float64x2Scale_,
+ "withX", Float64x2WithX_,
+ "withY", Float64x2WithY_,
+ // Ternary
+ "clamp", Float64x2Clamp_
+ ));
+
+ // Set up non-enumerable properties of the SIMD int32x4 object.
+ InstallFunctions($SIMD.int32x4, DONT_ENUM, $Array(
+ // Int32x4 operations
+ "zero", Int32x4Zero_,
+ "splat", Int32x4Splat_,
+ "bool", Int32x4Bool_,
+ // Unary
+ "bitsToFloat32x4", Int32x4BitsToFloat32x4_,
+ "neg", Int32x4Neg_,
+ "not", Int32x4Not_,
+ "toFloat32x4", Int32x4ToFloat32x4_,
+ // Binary
+ "add", Int32x4Add_,
+ "and", Int32x4And_,
+ "mul", Int32x4Mul_,
+ "or", Int32x4Or_,
+ "sub", Int32x4Sub_,
+ "xor", Int32x4Xor_,
+ "shuffle", Int32x4Shuffle_,
+ "withX", Int32x4WithX_,
+ "withY", Int32x4WithY_,
+ "withZ", Int32x4WithZ_,
+ "withW", Int32x4WithW_,
+ "withFlagX", Int32x4WithFlagX_,
+ "withFlagY", Int32x4WithFlagY_,
+ "withFlagZ", Int32x4WithFlagZ_,
+ "withFlagW", Int32x4WithFlagW_,
+ "greaterThan", Int32x4GreaterThan_,
+ "equal", Int32x4Equal_,
+ "lessThan", Int32x4LessThan_,
+ "shiftLeft", Int32x4ShiftLeft_,
+ "shiftRight", Int32x4ShiftRight_,
+ "shiftRightArithmetic", Int32x4ShiftRightArithmetic_,
+ // Ternary
+ "select", Int32x4Select_
+ ));
+}
+
+SetUpSIMD();
+
+//------------------------------------------------------------------------------
+macro SIMD128_TYPED_ARRAYS(FUNCTION)
+// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
+FUNCTION(10, Float32x4Array, 16)
+FUNCTION(11, Float64x2Array, 16)
+FUNCTION(12, Int32x4Array, 16)
+endmacro
+
+macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
+ function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset =
+ ToPositiveInteger(byteOffset, "invalid_typed_array_length");
+ }
+ if (!IS_UNDEFINED(length)) {
+ length = ToPositiveInteger(length, "invalid_typed_array_length");
+ }
+
+ var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
+ var offset;
+ if (IS_UNDEFINED(byteOffset)) {
+ offset = 0;
+ } else {
+ offset = byteOffset;
+
+ if (offset % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ ["start offset", "NAME", ELEMENT_SIZE]);
+ }
+ if (offset > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
+ }
+ }
+
+ var newByteLength;
+ var newLength;
+ if (IS_UNDEFINED(length)) {
+ if (bufferByteLength % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ ["byte length", "NAME", ELEMENT_SIZE]);
+ }
+ newByteLength = bufferByteLength - offset;
+ newLength = newByteLength / ELEMENT_SIZE;
+ } else {
+ var newLength = length;
+ newByteLength = newLength * ELEMENT_SIZE;
+ }
+ if ((offset + newByteLength > bufferByteLength)
+ || (newLength > %_MaxSmi())) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
+ }
+
+ function NAMEConstructByLength(obj, length) {
+ var l = IS_UNDEFINED(length) ?
+ 0 : ToPositiveInteger(length, "invalid_typed_array_length");
+ if (l > %_MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ var byteLength = l * ELEMENT_SIZE;
+ if (byteLength > %_TypedArrayMaxSizeInHeap()) {
+ var buffer = new $ArrayBuffer(byteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ } else {
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength);
+ }
+ }
+
+ function NAMEConstructByArrayLike(obj, arrayLike) {
+ var length = arrayLike.length;
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
+
+ if (l > %_MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
+ for (var i = 0; i < l; i++) {
+ // It is crucial that we let any execptions from arrayLike[i]
+ // propagate outside the function.
+ obj[i] = arrayLike[i];
+ }
+ }
+ }
+
+ function NAMEConstructor(arg1, arg2, arg3) {
+ if (%_IsConstructCall()) {
+ if (IS_ARRAYBUFFER(arg1)) {
+ NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
+ } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
+ IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
+ NAMEConstructByLength(this, arg1);
+ } else {
+ NAMEConstructByArrayLike(this, arg1);
+ }
+ } else {
+ throw MakeTypeError("constructor_not_function", ["NAME"])
+ }
+ }
+
+ function NAME_GetBuffer() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.buffer", this]);
+ }
+ return %TypedArrayGetBuffer(this);
+ }
+
+ function NAME_GetByteLength() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.byteLength", this]);
+ }
+ return %_ArrayBufferViewGetByteLength(this);
+ }
+
+ function NAME_GetByteOffset() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.byteOffset", this]);
+ }
+ return %_ArrayBufferViewGetByteOffset(this);
+ }
+
+ function NAME_GetLength() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.length", this]);
+ }
+ return %_TypedArrayGetLength(this);
+ }
+
+ var $NAME = global.NAME;
+
+ function NAMESubArray(begin, end) {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.subarray", this]);
+ }
+ var beginInt = TO_INTEGER(begin);
+ if (!IS_UNDEFINED(end)) {
+ end = TO_INTEGER(end);
+ }
+
+ var srcLength = %_TypedArrayGetLength(this);
+ if (beginInt < 0) {
+ beginInt = $MathMax(0, srcLength + beginInt);
+ } else {
+ beginInt = $MathMin(srcLength, beginInt);
+ }
+
+ var endInt = IS_UNDEFINED(end) ? srcLength : end;
+ if (endInt < 0) {
+ endInt = $MathMax(0, srcLength + endInt);
+ } else {
+ endInt = $MathMin(endInt, srcLength);
+ }
+ if (endInt < beginInt) {
+ endInt = beginInt;
+ }
+ var newLength = endInt - beginInt;
+ var beginByteOffset =
+ %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
+ return new $NAME(%TypedArrayGetBuffer(this),
+ beginByteOffset, newLength);
+ }
+endmacro
+
+SIMD128_TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
+
+function SetupSIMD128TypedArrays() {
+macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
+ %CheckIsBootstrapping();
+ %SetCode(global.NAME, NAMEConstructor);
+ %FunctionSetPrototype(global.NAME, new $Object());
+
+ %SetProperty(global.NAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+ %SetProperty(global.NAME.prototype,
+ "constructor", global.NAME, DONT_ENUM);
+ %SetProperty(global.NAME.prototype,
+ "BYTES_PER_ELEMENT", ELEMENT_SIZE,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+ InstallGetter(global.NAME.prototype, "buffer", NAME_GetBuffer);
+ InstallGetter(global.NAME.prototype, "byteOffset", NAME_GetByteOffset);
+ InstallGetter(global.NAME.prototype, "byteLength", NAME_GetByteLength);
+ InstallGetter(global.NAME.prototype, "length", NAME_GetLength);
+
+ InstallFunctions(global.NAME.prototype, DONT_ENUM, $Array(
+ "subarray", NAMESubArray,
+ "set", TypedArraySet
+ ));
+endmacro
+
+SIMD128_TYPED_ARRAYS(SETUP_TYPED_ARRAY)
+}
+
+SetupSIMD128TypedArrays();
+
+macro DECLARE_TYPED_ARRAY_FUNCTION(NAME)
+function NAMEArrayGet(i) {
+ return this[i];
+}
+
+function NAMEArraySet(i, v) {
+ CheckNAME(v);
+ this[i] = v;
+}
+
+function SetUpNAMEArray() {
+ InstallFunctions(global.NAMEArray.prototype, DONT_ENUM, $Array(
+ "getAt", NAMEArrayGet,
+ "setAt", NAMEArraySet
+ ));
+}
+endmacro
+
+DECLARE_TYPED_ARRAY_FUNCTION(Float32x4)
+DECLARE_TYPED_ARRAY_FUNCTION(Float64x2)
+DECLARE_TYPED_ARRAY_FUNCTION(Int32x4)
+
+SetUpFloat32x4Array();
+SetUpFloat64x2Array();
+SetUpInt32x4Array();
case JS_MAP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case FLOAT32x4_TYPE:
+ case FLOAT64x2_TYPE:
+ case INT32x4_TYPE:
if (map->is_undetectable()) return kUndetectable;
return kOtherObject;
case JS_ARRAY_TYPE:
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSIMD128InCrankshaft() { return true; }
// -----------------------------------------------------------------------------
if (rm_reg.high_bit()) emit(0x41);
}
+void Assembler::emit_optional_rex_32(XMMRegister reg) {
+ byte rex_bits = (reg.code() & 0x8) >> 1;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
void Assembler::emit_optional_rex_32(const Operand& op) {
if (op.rex_ != 0) emit(0x40 | op.rex_);
}
+void Assembler::addpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
}
+void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x21);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
+void Assembler::pinsrd(XMMRegister dst, Register src, byte imm8) {
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x22);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2); // double
}
+void Assembler::movups(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movups(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11);
+ emit_sse_operand(src, dst);
+}
+
+
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
- emit_optional_rex_32(src, dst);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC6);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
+void Assembler::shufpd(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xC6);
emit_sse_operand(dst, src);
}
+void Assembler::andpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
}
+void Assembler::xorpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
}
+void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(cmp);
+}
+
+
+void Assembler::cmpeqps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x0);
+}
+
+
+void Assembler::cmpltps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x1);
+}
+
+
+void Assembler::cmpleps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x2);
+}
+
+
+void Assembler::cmpneqps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x4);
+}
+
+
+void Assembler::cmpnltps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x5);
+}
+
+
+void Assembler::cmpnleps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x6);
+}
+
+
+void Assembler::pslld(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(shift);
+}
+
+
+void Assembler::pslld(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrld(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rdx, reg); // rdx == 2
+ emit(shift);
+}
+
+
+void Assembler::psrld(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xD2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrad(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rsp, reg); // rsp == 4
+ emit(shift);
+}
+
+
+void Assembler::psrad(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xE2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x76);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pcmpgtd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x66);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src,
Assembler::RoundingMode mode) {
ASSERT(IsEnabled(SSE4_1));
}
+void Assembler::minps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x53);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x53);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rsqrtps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x52);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x52);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::paddd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFE);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::paddd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFE);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psubd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFA);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psubd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFA);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x38);
+ emit(0x40);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmulld(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmuludq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpackldq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x62);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpackldq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x62);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrldq(XMMRegister dst, uint8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(dst);
+ emit(shift);
+}
+
+
+void Assembler::cvtps2dq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtps2dq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x70);
+ emit_sse_operand(dst, src);
+ emit(shuffle);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
}
+void Assembler::emit_sse_operand(XMMRegister dst) {
+ emit(0xD8 | dst.low_bits());
+}
+
+
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
emit(data);
typedef XMMRegister DoubleRegister;
+typedef XMMRegister SIMD128Register;
enum Condition {
times_2 = 1,
times_4 = 2,
times_8 = 3,
+ maximal_scale_factor = times_8,
times_int_size = times_4,
times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
};
// SSE instructions
void movaps(XMMRegister dst, XMMRegister src);
+ void movups(XMMRegister dst, const Operand& src);
+ void movups(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+ void shufpd(XMMRegister dst, XMMRegister src, byte imm8);
void cvttss2si(Register dst, const Operand& src);
void cvttss2si(Register dst, XMMRegister src);
void divps(XMMRegister dst, XMMRegister src);
void divps(XMMRegister dst, const Operand& src);
+ void addpd(XMMRegister dst, XMMRegister src);
+ void addpd(XMMRegister dst, const Operand& src);
+ void subpd(XMMRegister dst, XMMRegister src);
+ void subpd(XMMRegister dst, const Operand& src);
+ void mulpd(XMMRegister dst, XMMRegister src);
+ void mulpd(XMMRegister dst, const Operand& src);
+ void divpd(XMMRegister dst, XMMRegister src);
+ void divpd(XMMRegister dst, const Operand& src);
+
void movmskps(Register dst, XMMRegister src);
// SSE2 instructions
void divsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, const Operand& src);
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void xorpd(XMMRegister dst, const Operand& src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, const Operand& src);
// SSE 4.1 instruction
void extractps(Register dst, XMMRegister src, byte imm8);
+ void insertps(XMMRegister dst, XMMRegister src, byte imm8);
+ void pinsrd(XMMRegister dst, Register src, byte imm8);
+
+ void minps(XMMRegister dst, XMMRegister src);
+ void minps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, XMMRegister src);
+ void maxps(XMMRegister dst, const Operand& src);
+ void minpd(XMMRegister dst, XMMRegister src);
+ void minpd(XMMRegister dst, const Operand& src);
+ void maxpd(XMMRegister dst, XMMRegister src);
+ void maxpd(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, XMMRegister src);
+ void rcpps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, XMMRegister src);
+ void rsqrtps(XMMRegister dst, const Operand& src);
+ void sqrtps(XMMRegister dst, XMMRegister src);
+ void sqrtps(XMMRegister dst, const Operand& src);
+ void sqrtpd(XMMRegister dst, XMMRegister src);
+ void sqrtpd(XMMRegister dst, const Operand& src);
+ void paddd(XMMRegister dst, XMMRegister src);
+ void paddd(XMMRegister dst, const Operand& src);
+ void psubd(XMMRegister dst, XMMRegister src);
+ void psubd(XMMRegister dst, const Operand& src);
+ void pmulld(XMMRegister dst, XMMRegister src);
+ void pmulld(XMMRegister dst, const Operand& src);
+ void pmuludq(XMMRegister dst, XMMRegister src);
+ void pmuludq(XMMRegister dst, const Operand& src);
+ void punpackldq(XMMRegister dst, XMMRegister src);
+ void punpackldq(XMMRegister dst, const Operand& src);
+ void psrldq(XMMRegister dst, uint8_t shift);
+ void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+ void cvtps2dq(XMMRegister dst, XMMRegister src);
+ void cvtps2dq(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, XMMRegister src);
+ void cvtdq2ps(XMMRegister dst, const Operand& src);
enum RoundingMode {
kRoundToNearest = 0x0,
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
+ void cmpeqps(XMMRegister dst, XMMRegister src);
+ void cmpltps(XMMRegister dst, XMMRegister src);
+ void cmpleps(XMMRegister dst, XMMRegister src);
+ void cmpneqps(XMMRegister dst, XMMRegister src);
+ void cmpnltps(XMMRegister dst, XMMRegister src);
+ void cmpnleps(XMMRegister dst, XMMRegister src);
+
+ void pslld(XMMRegister reg, int8_t shift);
+ void pslld(XMMRegister dst, XMMRegister src);
+ void psrld(XMMRegister reg, int8_t shift);
+ void psrld(XMMRegister dst, XMMRegister src);
+ void psrad(XMMRegister reg, int8_t shift);
+ void psrad(XMMRegister dst, XMMRegister src);
+
+ void pcmpgtd(XMMRegister dst, XMMRegister src);
+ void pcmpeqd(XMMRegister dst, XMMRegister src);
+ void pcmpltd(XMMRegister dst, XMMRegister src);
+
// Debugging
void Print();
// the high bit set.
inline void emit_optional_rex_32(Register rm_reg);
+ // As for emit_optional_rex_32(Register), except that the register is
+ // an XMM register.
+ inline void emit_optional_rex_32(XMMRegister rm_reg);
+
// Optionally do as emit_rex_32(const Operand&) if the operand register
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister dst);
// Emit machine code for one of the operations ADD, ADC, SUB, SBC,
// AND, OR, XOR, or CMP. The encodings of these operations are all
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ simd128_value_t zero = {{0.0, 0.0}};
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
+ input_->SetSIMD128Register(i, zero);
}
// Fill the frame content from the actual data on the frame.
}
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+void Deoptimizer::CopySIMD128Registers(FrameDescription* output_frame) {
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
+ simd128_value_t xmm_value = input_->GetSIMD128Register(i);
+ output_frame->SetSIMD128Register(i, xmm_value);
}
}
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize *
+ const int kXMMRegsSize = kSIMD128Size *
XMMRegister::NumAllocatableRegisters();
- __ subp(rsp, Immediate(kDoubleRegsSize));
+ __ subp(rsp, Immediate(kXMMRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(rsp, offset), xmm_reg);
+ int offset = i * kSIMD128Size;
+ __ movups(Operand(rsp, offset), xmm_reg);
}
// We push all registers onto the stack, even though we do not need
}
const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
- kDoubleRegsSize;
+ kXMMRegsSize;
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
__ PopQuad(Operand(rbx, offset));
}
- // Fill in the double input registers.
- int double_regs_offset = FrameDescription::double_registers_offset();
+ // Fill in the xmm input registers.
+ STATIC_ASSERT(kSIMD128Size == 2 * kDoubleSize);
+ int xmm_regs_offset = FrameDescription::simd128_registers_offset();
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
+ int dst_offset = i * kSIMD128Size + xmm_regs_offset;
__ popq(Operand(rbx, dst_offset));
+ __ popq(Operand(rbx, dst_offset + kDoubleSize));
}
// Remove the bailout id and return address from the stack.
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
+ int src_offset = i * kSIMD128Size + xmm_regs_offset;
+ __ movups(xmm_reg, Operand(rbx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
}
+double FrameDescription::GetDoubleRegister(unsigned n) const {
+ ASSERT(n < ARRAY_SIZE(simd128_registers_));
+ return simd128_registers_[n].d[0];
+}
+
+
+void FrameDescription::SetDoubleRegister(unsigned n, double value) {
+ ASSERT(n < ARRAY_SIZE(simd128_registers_));
+ simd128_registers_[n].d[0] = value;
+}
+
+
#undef __
OPERAND_QUADWORD_SIZE = 3
};
+ enum {
+ rax = 0,
+ rcx = 1,
+ rdx = 2,
+ rbx = 3,
+ rsp = 4,
+ rbp = 5,
+ rsi = 6,
+ rdi = 7
+ };
+
const NameConverter& converter_;
v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
unsigned int tmp_buffer_pos_;
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
+ } else if (third_byte == 0x21) {
+ get_modrm(*current, &mod, ®op, &rm);
+ // insertps xmm, xmm, imm8
+ AppendToBuffer("insertps %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ (*(current + 1)) & 3);
+ current += 2;
+ } else if (third_byte == 0x22) {
+ get_modrm(*current, &mod, ®op, &rm);
+ // pinsrd xmm, reg32, imm8
+ AppendToBuffer("pinsrd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfCPURegister(rm),
+ (*(current + 1)) & 3);
+ current += 2;
} else if (third_byte == 0x0b) {
get_modrm(*current, &mod, ®op, &rm);
// roundsd xmm, xmm/m64, imm8
} else {
UnimplementedInstruction();
}
+ } else if (opcode == 0x38) {
+ byte third_byte = *current;
+ current = data + 3;
+ if (third_byte == 0x40) {
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("pmulld %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else {
+ UnimplementedInstruction();
+ }
} else {
get_modrm(*current, &mod, ®op, &rm);
if (opcode == 0x1f) {
AppendToBuffer("movdqa %s,",
NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x70) {
+ AppendToBuffer("pshufd %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 0xff);
+ current += 1;
+ } else if (opcode == 0x5B) {
+ AppendToBuffer("cvtps2dq %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xFE) {
+ AppendToBuffer("paddd %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xFA) {
+ AppendToBuffer("psubd %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x7E) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
ASSERT(regop == 6);
AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
current += 1;
+ } else if (opcode == 0x62) {
+ AppendToBuffer("punpackldq %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x72) {
+ AppendToBuffer(regop == rsi ? "pslld "
+ : regop == rdx ? "psrld" : "psrad");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 0xff);
+ current += 1;
+ } else if (opcode == 0xC6) {
+ AppendToBuffer("shufpd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 0xff);
+ current += 1;
+ } else if (opcode == 0xF4) {
+ AppendToBuffer("pmuludq %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
const char* mnemonic = "?";
- if (opcode == 0x54) {
+ if (opcode == 0x51) {
+ mnemonic = "sqrtpd";
+ } else if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x56) {
mnemonic = "orpd";
} else if (opcode == 0x57) {
mnemonic = "xorpd";
+ } else if (opcode == 0x58) {
+ mnemonic = "addpd";
+ } else if (opcode == 0x59) {
+ mnemonic = "mulpd";
+ } else if (opcode == 0x5C) {
+ mnemonic = "subpd";
+ } else if (opcode == 0x5D) {
+ mnemonic = "minpd";
+ } else if (opcode == 0x5E) {
+ mnemonic = "divpd";
+ } else if (opcode == 0x5F) {
+ mnemonic = "maxpd";
} else if (opcode == 0x2E) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
+ } else if (opcode == 0x66) {
+ mnemonic = "pcmpgtd";
+ } else if (opcode == 0x76) {
+ mnemonic = "pcmpeqd";
+ } else if (opcode == 0xD2) {
+ mnemonic = "psrld";
+ } else if (opcode == 0xE2) {
+ mnemonic = "psrad";
+ } else if (opcode == 0xF2) {
+ mnemonic = "pslld";
} else {
UnimplementedInstruction();
}
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x10) {
+ // movups xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("movups %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x11) {
+ // movups xmm/m128, xmm
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("movups ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
+
} else if (opcode == 0xA2) {
// CPUID
AppendToBuffer("%s", mnemonic);
AppendToBuffer(", %d", (*current) & 3);
current += 1;
+ } else if (opcode == 0xC6) {
+ // shufps xmm, xmm/m128, imm8
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("shufps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %d", (*current) & 3);
+ current += 1;
+
+ } else if (opcode == 0x54) {
+ // andps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("andps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x56) {
+ // orps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("orps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x58) {
+ // addps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("addps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x59) {
+ // mulps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("mulps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5C) {
+ // subps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("subps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5E) {
+ // divps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("divps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5D) {
+ // minps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("minps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5F) {
+ // maxps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("maxps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5B) {
+ // cvtdq2ps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("cvtdq2ps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+
+ } else if (opcode == 0x53) {
+ // rcpps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("rcpps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x52) {
+ // rsqrtps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("rsqrtps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x51) {
+ // sqrtps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("sqrtps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
} else if (opcode == 0x50) {
// movmskps reg, xmm
int mod, regop, rm;
AppendToBuffer("movmskps %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xC2) {
+ // Intel manual 2A, Table 3-11.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ const char* const pseudo_op[] = {
+ "cmpeqps",
+ "cmpltps",
+ "cmpleps",
+ "cmpunordps",
+ "cmpneqps",
+ "cmpnltps",
+ "cmpnleps",
+ "cmpordps"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[current[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ current += 2;
+
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
}
+XMMRegister LCodeGen::ToSIMD128Register(int index) const {
+ return XMMRegister::FromAllocationIndex(index);
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
}
+XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
+ ASSERT(op->IsFloat32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToFloat64x2Register(LOperand* op) const {
+ ASSERT(op->IsFloat64x2Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
+ ASSERT(op->IsInt32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
+ ASSERT(op->IsFloat32x4Register() || op->IsFloat64x2Register() ||
+ op->IsInt32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
Operand LCodeGen::ToOperand(LOperand* op) const {
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot() ||
+ op->IsFloat32x4StackSlot() || op->IsFloat64x2StackSlot() ||
+ op->IsInt32x4StackSlot());
if (NeedsEagerFrame()) {
return Operand(rbp, StackSlotOffset(op->index()));
} else {
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsFloat32x4StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT32x4_STACK_SLOT);
+ } else if (op->IsFloat64x2StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT64x2_STACK_SLOT);
+ } else if (op->IsInt32x4StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::INT32x4_STACK_SLOT);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
} else if (op->IsDoubleRegister()) {
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
+ } else if (op->IsFloat32x4Register()) {
+ XMMRegister reg = ToFloat32x4Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
+ } else if (op->IsFloat64x2Register()) {
+ XMMRegister reg = ToFloat64x2Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT64x2_REGISTER);
+ } else if (op->IsInt32x4Register()) {
+ XMMRegister reg = ToInt32x4Register(op);
+ translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
+ } else if (r.IsSIMD128()) {
+ ASSERT(!info()->IsStub());
+ EmitBranch(instr, no_condition);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
}
+bool LCodeGen::HandleExternalArrayOpRequiresPreScale(
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ Register key_reg = ToRegister(key);
+ if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
+ int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
+ static_cast<int>(maximal_scale_factor);
+ ASSERT(pre_shift_size > 0);
+ __ shll(key_reg, Immediate(pre_shift_size));
+ return true;
+ }
+ return false;
+}
+
+
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
Representation key_representation =
instr->hydrogen()->key()->representation();
if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
- __ SmiToInteger64(key_reg, key_reg);
+ if (!HandleExternalArrayOpRequiresPreScale(
+ key, key_representation, elements_kind))
+ __ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
+ } else if (kPointerSize == kInt64Size && !key->IsConstantOperand()) {
+ Representation key_representation =
+ instr->hydrogen()->key()->representation();
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind))
+ HandleExternalArrayOpRequiresPreScale(
+ key, key_representation, elements_kind);
}
+
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
+ } else if (IsSIMD128ElementsKind(elements_kind)) {
+ __ movups(ToSIMD128Register(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
if (constant_value & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
+
return Operand(elements_pointer_reg,
(constant_value << shift_size) + offset);
} else {
ASSERT(SmiValuesAre31Bits());
shift_size -= kSmiTagSize;
}
+ if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
+ // Make sure the key is pre-scaled against maximal_scale_factor.
+ shift_size = static_cast<int>(maximal_scale_factor);
+ }
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
}
+void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Zero: {
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ __ xorps(result_reg, result_reg);
+ return;
+ }
+ case kFloat64x2Zero: {
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ xorpd(result_reg, result_reg);
+ return;
+ }
+ case kInt32x4Zero: {
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ xorps(result_reg, result_reg);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
+ uint8_t select = 0;
+ switch (instr->op()) {
+ case kSIMD128Change: {
+ Comment(";;; deoptimize: can not perform representation change"
+ "for float32x4 or int32x4");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ case kFloat32x4Abs:
+ case kFloat32x4Neg:
+ case kFloat32x4Reciprocal:
+ case kFloat32x4ReciprocalSqrt:
+ case kFloat32x4Sqrt: {
+ ASSERT(instr->value()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ switch (instr->op()) {
+ case kFloat32x4Abs:
+ __ absps(input_reg);
+ break;
+ case kFloat32x4Neg:
+ __ negateps(input_reg);
+ break;
+ case kFloat32x4Reciprocal:
+ __ rcpps(input_reg, input_reg);
+ break;
+ case kFloat32x4ReciprocalSqrt:
+ __ rsqrtps(input_reg, input_reg);
+ break;
+ case kFloat32x4Sqrt:
+ __ sqrtps(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt: {
+ ASSERT(instr->value()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ switch (instr->op()) {
+ case kFloat64x2Abs:
+ __ abspd(input_reg);
+ break;
+ case kFloat64x2Neg:
+ __ negatepd(input_reg);
+ break;
+ case kFloat64x2Sqrt:
+ __ sqrtpd(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kInt32x4Not:
+ case kInt32x4Neg: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ switch (instr->op()) {
+ case kInt32x4Not:
+ __ notps(input_reg);
+ break;
+ case kInt32x4Neg:
+ __ pnegd(input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4BitsToInt32x4:
+ case kFloat32x4ToInt32x4: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ if (instr->op() == kFloat32x4BitsToInt32x4) {
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ } else {
+ ASSERT(instr->op() == kFloat32x4ToInt32x4);
+ __ cvtps2dq(result_reg, input_reg);
+ }
+ return;
+ }
+ case kInt32x4BitsToFloat32x4:
+ case kInt32x4ToFloat32x4: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ if (instr->op() == kInt32x4BitsToFloat32x4) {
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ } else {
+ ASSERT(instr->op() == kInt32x4ToFloat32x4);
+ __ cvtdq2ps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kFloat32x4Splat: {
+ ASSERT(instr->hydrogen()->value()->representation().IsDouble());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtsd2ss(xmm_scratch, input_reg);
+ __ shufps(xmm_scratch, xmm_scratch, 0x0);
+ __ movaps(result_reg, xmm_scratch);
+ return;
+ }
+ case kInt32x4Splat: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register input_reg = ToRegister(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ movd(result_reg, input_reg);
+ __ shufps(result_reg, result_reg, 0x0);
+ return;
+ }
+ case kInt32x4GetSignMask: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskps(result, input_reg);
+ return;
+ }
+ case kFloat32x4GetSignMask: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskps(result, input_reg);
+ return;
+ }
+ case kFloat32x4GetW:
+ select++;
+ case kFloat32x4GetZ:
+ select++;
+ case kFloat32x4GetY:
+ select++;
+ case kFloat32x4GetX: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
+
+ if (select == 0x0) {
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtss2sd(xmm_scratch, input_reg);
+ if (!xmm_scratch.is(result)) {
+ __ movaps(result, xmm_scratch);
+ }
+ } else {
+ __ pshufd(xmm_scratch, input_reg, select);
+ if (!xmm_scratch.is(result)) {
+ __ xorps(result, result);
+ }
+ __ cvtss2sd(result, xmm_scratch);
+ }
+ return;
+ }
+ case kFloat64x2GetSignMask: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskpd(result, input_reg);
+ return;
+ }
+ case kFloat64x2GetX: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ return;
+ }
+ case kFloat64x2GetY: {
+ ASSERT(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ __ shufpd(result, input_reg, 0x1);
+ return;
+ }
+ case kInt32x4GetX:
+ case kInt32x4GetY:
+ case kInt32x4GetZ:
+ case kInt32x4GetW:
+ case kInt32x4GetFlagX:
+ case kInt32x4GetFlagY:
+ case kInt32x4GetFlagZ:
+ case kInt32x4GetFlagW: {
+ ASSERT(instr->hydrogen()->value()->representation().IsInt32x4());
+ bool flag = false;
+ switch (instr->op()) {
+ case kInt32x4GetFlagX:
+ flag = true;
+ case kInt32x4GetX:
+ break;
+ case kInt32x4GetFlagY:
+ flag = true;
+ case kInt32x4GetY:
+ select = 0x1;
+ break;
+ case kInt32x4GetFlagZ:
+ flag = true;
+ case kInt32x4GetZ:
+ select = 0x2;
+ break;
+ case kInt32x4GetFlagW:
+ flag = true;
+ case kInt32x4GetW:
+ select = 0x3;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ if (select == 0x0) {
+ __ movd(result, input_reg);
+ } else {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ extractps(result, input_reg, select);
+ } else {
+ XMMRegister xmm_scratch = xmm0;
+ __ pshufd(xmm_scratch, input_reg, select);
+ __ movd(result, xmm_scratch);
+ }
+ }
+
+ if (flag) {
+ Label false_value, done;
+ __ testl(result, result);
+ __ j(zero, &false_value, Label::kNear);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done, Label::kNear);
+ __ bind(&false_value);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
+ uint8_t imm8 = 0; // for with operation
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ case kFloat32x4Sub:
+ case kFloat32x4Mul:
+ case kFloat32x4Div:
+ case kFloat32x4Min:
+ case kFloat32x4Max: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToFloat32x4Register(instr->right());
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ __ addps(left_reg, right_reg);
+ break;
+ case kFloat32x4Sub:
+ __ subps(left_reg, right_reg);
+ break;
+ case kFloat32x4Mul:
+ __ mulps(left_reg, right_reg);
+ break;
+ case kFloat32x4Div:
+ __ divps(left_reg, right_reg);
+ break;
+ case kFloat32x4Min:
+ __ minps(left_reg, right_reg);
+ break;
+ case kFloat32x4Max:
+ __ maxps(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4Scale: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister scratch_reg = xmm0;
+ __ xorps(scratch_reg, scratch_reg);
+ __ cvtsd2ss(scratch_reg, right_reg);
+ __ shufps(scratch_reg, scratch_reg, 0x0);
+ __ mulps(left_reg, scratch_reg);
+ return;
+ }
+ case kFloat64x2Add:
+ case kFloat64x2Sub:
+ case kFloat64x2Mul:
+ case kFloat64x2Div:
+ case kFloat64x2Min:
+ case kFloat64x2Max: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsFloat64x2());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToFloat64x2Register(instr->right());
+ switch (instr->op()) {
+ case kFloat64x2Add:
+ __ addpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Sub:
+ __ subpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Mul:
+ __ mulpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Div:
+ __ divpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Min:
+ __ minpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Max:
+ __ maxpd(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Scale: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ shufpd(right_reg, right_reg, 0x0);
+ __ mulpd(left_reg, right_reg);
+ return;
+ }
+ case kFloat32x4Shuffle: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ __ shufps(left_reg, left_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ }
+ case kInt32x4Shuffle: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ __ pshufd(left_reg, left_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ }
+ case kInt32x4ShiftLeft:
+ case kInt32x4ShiftRight:
+ case kInt32x4ShiftRightArithmetic: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t shift = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ switch (instr->op()) {
+ case kInt32x4ShiftLeft:
+ __ pslld(left_reg, shift);
+ break;
+ case kInt32x4ShiftRight:
+ __ psrld(left_reg, shift);
+ break;
+ case kInt32x4ShiftRightArithmetic:
+ __ psrad(left_reg, shift);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else {
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register shift = ToRegister(instr->right());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(xmm_scratch, shift);
+ switch (instr->op()) {
+ case kInt32x4ShiftLeft:
+ __ pslld(left_reg, xmm_scratch);
+ break;
+ case kInt32x4ShiftRight:
+ __ psrld(left_reg, xmm_scratch);
+ break;
+ case kInt32x4ShiftRightArithmetic:
+ __ psrad(left_reg, xmm_scratch);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+ }
+ case kFloat32x4LessThan:
+ case kFloat32x4LessThanOrEqual:
+ case kFloat32x4Equal:
+ case kFloat32x4NotEqual:
+ case kFloat32x4GreaterThanOrEqual:
+ case kFloat32x4GreaterThan: {
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsFloat32x4());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToFloat32x4Register(instr->right());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ switch (instr->op()) {
+ case kFloat32x4LessThan:
+ if (result_reg.is(left_reg)) {
+ __ cmpltps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpnltps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpltps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4LessThanOrEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpleps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpnleps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpleps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4Equal:
+ if (result_reg.is(left_reg)) {
+ __ cmpeqps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpeqps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpeqps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4NotEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpneqps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpneqps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpneqps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4GreaterThanOrEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpnltps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpltps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpnltps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4GreaterThan:
+ if (result_reg.is(left_reg)) {
+ __ cmpnleps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpleps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpnleps(result_reg, right_reg);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kInt32x4And:
+ case kInt32x4Or:
+ case kInt32x4Xor:
+ case kInt32x4Add:
+ case kInt32x4Sub:
+ case kInt32x4Mul:
+ case kInt32x4GreaterThan:
+ case kInt32x4Equal:
+ case kInt32x4LessThan: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsInt32x4());
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ XMMRegister right_reg = ToInt32x4Register(instr->right());
+ switch (instr->op()) {
+ case kInt32x4And:
+ __ andps(left_reg, right_reg);
+ break;
+ case kInt32x4Or:
+ __ orps(left_reg, right_reg);
+ break;
+ case kInt32x4Xor:
+ __ xorps(left_reg, right_reg);
+ break;
+ case kInt32x4Add:
+ __ paddd(left_reg, right_reg);
+ break;
+ case kInt32x4Sub:
+ __ psubd(left_reg, right_reg);
+ break;
+ case kInt32x4Mul:
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ pmulld(left_reg, right_reg);
+ } else {
+ // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
+ XMMRegister xmm_scratch = xmm0;
+ __ movaps(xmm_scratch, left_reg);
+ __ pmuludq(left_reg, right_reg);
+ __ psrldq(xmm_scratch, 4);
+ __ psrldq(right_reg, 4);
+ __ pmuludq(xmm_scratch, right_reg);
+ __ pshufd(left_reg, left_reg, 8);
+ __ pshufd(xmm_scratch, xmm_scratch, 8);
+ __ punpackldq(left_reg, xmm_scratch);
+ }
+ break;
+ case kInt32x4GreaterThan:
+ __ pcmpgtd(left_reg, right_reg);
+ break;
+ case kInt32x4Equal:
+ __ pcmpeqd(left_reg, right_reg);
+ break;
+ case kInt32x4LessThan: {
+ XMMRegister xmm_scratch = xmm0;
+ __ movaps(xmm_scratch, right_reg);
+ __ pcmpgtd(xmm_scratch, left_reg);
+ __ movaps(left_reg, xmm_scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4WithW:
+ imm8++;
+ case kFloat32x4WithZ:
+ imm8++;
+ case kFloat32x4WithY:
+ imm8++;
+ case kFloat32x4WithX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtsd2ss(xmm_scratch, right_reg);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ imm8 = imm8 << 4;
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ insertps(left_reg, xmm_scratch, imm8);
+ } else {
+ __ subq(rsp, Immediate(kFloat32x4Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ movss(Operand(rsp, imm8 * kFloatSize), xmm_scratch);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat32x4Size));
+ }
+ return;
+ }
+ case kFloat64x2WithX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ movsd(Operand(rsp, 0 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2WithY: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2Constructor: {
+ ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToDoubleRegister(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movsd(Operand(rsp, 0 * kDoubleSize), left_reg);
+ __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
+ __ movups(result_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kInt32x4WithW:
+ imm8++;
+ case kInt32x4WithZ:
+ imm8++;
+ case kInt32x4WithY:
+ imm8++;
+ case kInt32x4WithX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsInteger32());
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ pinsrd(left_reg, right_reg, imm8);
+ } else {
+ __ subq(rsp, Immediate(kInt32x4Size));
+ __ movdqu(Operand(rsp, 0), left_reg);
+ __ movl(Operand(rsp, imm8 * kFloatSize), right_reg);
+ __ movdqu(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kInt32x4Size));
+ }
+ return;
+ }
+ case kInt32x4WithFlagW:
+ imm8++;
+ case kInt32x4WithFlagZ:
+ imm8++;
+ case kInt32x4WithFlagY:
+ imm8++;
+ case kInt32x4WithFlagX: {
+ ASSERT(instr->left()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->left()->representation().IsInt32x4());
+ ASSERT(instr->hydrogen()->right()->representation().IsTagged());
+ HType type = instr->hydrogen()->right()->type();
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Label load_false_value, done;
+ if (type.IsBoolean()) {
+ __ subq(rsp, Immediate(kInt32x4Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_value, Label::kNear);
+ } else {
+ Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ // load true value.
+ __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
+ __ jmp(&done, Label::kNear);
+ __ bind(&load_false_value);
+ __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0x0));
+ __ bind(&done);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kInt32x4Size));
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kInt32x4Select: {
+ ASSERT(instr->hydrogen()->first()->representation().IsInt32x4());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
+
+ XMMRegister mask_reg = ToInt32x4Register(instr->first());
+ XMMRegister left_reg = ToFloat32x4Register(instr->second());
+ XMMRegister right_reg = ToFloat32x4Register(instr->third());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ XMMRegister temp_reg = xmm0;
+
+ // Copy mask.
+ __ movaps(temp_reg, mask_reg);
+ // Invert it.
+ __ notps(temp_reg);
+ // temp_reg = temp_reg & falseValue.
+ __ andps(temp_reg, right_reg);
+
+ if (!result_reg.is(mask_reg)) {
+ if (result_reg.is(left_reg)) {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, mask_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ } else {
+ __ movaps(result_reg, mask_reg);
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ } else {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ return;
+ }
+ case kFloat32x4ShuffleMix: {
+ ASSERT(instr->first()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->third()->representation().IsInteger32());
+ if (instr->hydrogen()->third()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister first_reg = ToFloat32x4Register(instr->first());
+ XMMRegister second_reg = ToFloat32x4Register(instr->second());
+ __ shufps(first_reg, second_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ }
+ case kFloat32x4Clamp: {
+ ASSERT(instr->first()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->first()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat32x4());
+ ASSERT(instr->hydrogen()->third()->representation().IsFloat32x4());
+
+ XMMRegister value_reg = ToFloat32x4Register(instr->first());
+ XMMRegister lower_reg = ToFloat32x4Register(instr->second());
+ XMMRegister upper_reg = ToFloat32x4Register(instr->third());
+ __ minps(value_reg, upper_reg);
+ __ maxps(value_reg, lower_reg);
+ return;
+ }
+ case kFloat64x2Clamp: {
+ ASSERT(instr->first()->Equals(instr->result()));
+ ASSERT(instr->hydrogen()->first()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->second()->representation().IsFloat64x2());
+ ASSERT(instr->hydrogen()->third()->representation().IsFloat64x2());
+
+ XMMRegister value_reg = ToFloat64x2Register(instr->first());
+ XMMRegister lower_reg = ToFloat64x2Register(instr->second());
+ XMMRegister upper_reg = ToFloat64x2Register(instr->third());
+ __ minpd(value_reg, upper_reg);
+ __ maxpd(value_reg, lower_reg);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Constructor: {
+ ASSERT(instr->hydrogen()->x()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->y()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->z()->representation().IsDouble());
+ ASSERT(instr->hydrogen()->w()->representation().IsDouble());
+ XMMRegister x_reg = ToDoubleRegister(instr->x());
+ XMMRegister y_reg = ToDoubleRegister(instr->y());
+ XMMRegister z_reg = ToDoubleRegister(instr->z());
+ XMMRegister w_reg = ToDoubleRegister(instr->w());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ __ subq(rsp, Immediate(kFloat32x4Size));
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, x_reg);
+ __ movss(Operand(rsp, 0 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, y_reg);
+ __ movss(Operand(rsp, 1 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, z_reg);
+ __ movss(Operand(rsp, 2 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, w_reg);
+ __ movss(Operand(rsp, 3 * kFloatSize), xmm0);
+ __ movups(result_reg, Operand(rsp, 0 * kFloatSize));
+ __ addq(rsp, Immediate(kFloat32x4Size));
+ return;
+ }
+ case kInt32x4Constructor: {
+ ASSERT(instr->hydrogen()->x()->representation().IsInteger32());
+ ASSERT(instr->hydrogen()->y()->representation().IsInteger32());
+ ASSERT(instr->hydrogen()->z()->representation().IsInteger32());
+ ASSERT(instr->hydrogen()->w()->representation().IsInteger32());
+ Register x_reg = ToRegister(instr->x());
+ Register y_reg = ToRegister(instr->y());
+ Register z_reg = ToRegister(instr->z());
+ Register w_reg = ToRegister(instr->w());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ subq(rsp, Immediate(kInt32x4Size));
+ __ movl(Operand(rsp, 0 * kInt32Size), x_reg);
+ __ movl(Operand(rsp, 1 * kInt32Size), y_reg);
+ __ movl(Operand(rsp, 2 * kInt32Size), z_reg);
+ __ movl(Operand(rsp, 3 * kInt32Size), w_reg);
+ __ movups(result_reg, Operand(rsp, 0 * kInt32Size));
+ __ addq(rsp, Immediate(kInt32x4Size));
+ return;
+ }
+ case kInt32x4Bool: {
+ ASSERT(instr->hydrogen()->x()->representation().IsTagged());
+ ASSERT(instr->hydrogen()->y()->representation().IsTagged());
+ ASSERT(instr->hydrogen()->z()->representation().IsTagged());
+ ASSERT(instr->hydrogen()->w()->representation().IsTagged());
+ HType x_type = instr->hydrogen()->x()->type();
+ HType y_type = instr->hydrogen()->y()->type();
+ HType z_type = instr->hydrogen()->z()->type();
+ HType w_type = instr->hydrogen()->w()->type();
+ if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
+ !z_type.IsBoolean() || !w_type.IsBoolean()) {
+ Comment(";;; deoptimize: other types for int32x4.bool.");
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ Register x_reg = ToRegister(instr->x());
+ Register y_reg = ToRegister(instr->y());
+ Register z_reg = ToRegister(instr->z());
+ Register w_reg = ToRegister(instr->w());
+ Label load_false_x, done_x, load_false_y, done_y,
+ load_false_z, done_z, load_false_w, done_w;
+ __ subq(rsp, Immediate(kInt32x4Size));
+
+ __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_x, Label::kNear);
+ __ movl(Operand(rsp, 0 * kInt32Size), Immediate(-1));
+ __ jmp(&done_x, Label::kNear);
+ __ bind(&load_false_x);
+ __ movl(Operand(rsp, 0 * kInt32Size), Immediate(0x0));
+ __ bind(&done_x);
+
+ __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_y, Label::kNear);
+ __ movl(Operand(rsp, 1 * kInt32Size), Immediate(-1));
+ __ jmp(&done_y, Label::kNear);
+ __ bind(&load_false_y);
+ __ movl(Operand(rsp, 1 * kInt32Size), Immediate(0x0));
+ __ bind(&done_y);
+
+ __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_z, Label::kNear);
+ __ movl(Operand(rsp, 2 * kInt32Size), Immediate(-1));
+ __ jmp(&done_z, Label::kNear);
+ __ bind(&load_false_z);
+ __ movl(Operand(rsp, 2 * kInt32Size), Immediate(0x0));
+ __ bind(&done_z);
+
+ __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_w, Label::kNear);
+ __ movl(Operand(rsp, 3 * kInt32Size), Immediate(-1));
+ __ jmp(&done_w, Label::kNear);
+ __ bind(&load_false_w);
+ __ movl(Operand(rsp, 3 * kInt32Size), Immediate(0x0));
+ __ bind(&done_w);
+
+ __ movups(result_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kInt32x4Size));
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
Representation key_representation =
instr->hydrogen()->key()->representation();
if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
- __ SmiToInteger64(key_reg, key_reg);
+ if (!HandleExternalArrayOpRequiresPreScale(
+ key, key_representation, elements_kind))
+ __ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
+ } else if (kPointerSize == kInt64Size && !key->IsConstantOperand()) {
+ Representation key_representation =
+ instr->hydrogen()->key()->representation();
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind))
+ HandleExternalArrayOpRequiresPreScale(
+ key, key_representation, elements_kind);
}
+
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
__ cvtsd2ss(value, value);
__ movss(operand, value);
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
+ } else if (IsSIMD128ElementsKind(elements_kind)) {
+ __ movups(operand, ToSIMD128Register(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
__ movl(operand, value);
break;
case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
}
+void LCodeGen::DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
+ Runtime::FunctionId id) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Smi::FromInt(0));
+
+ {
+ PushSafepointRegistersScope scope(this);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ movp(kScratchRegister, rax);
+ }
+ __ movp(reg, kScratchRegister);
+}
+
+
+template<class T>
+void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
+ class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
+ public:
+ DeferredSIMD128ToTagged(LCodeGen* codegen,
+ LSIMD128ToTagged* instr,
+ Runtime::FunctionId id)
+ : LDeferredCode(codegen), instr_(instr), id_(id) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LSIMD128ToTagged* instr_;
+ Runtime::FunctionId id_;
+ };
+
+ XMMRegister input_reg = ToSIMD128Register(instr->value());
+ Register reg = ToRegister(instr->result());
+ Register tmp = ToRegister(instr->temp());
+ Register tmp2 = ToRegister(instr->temp2());
+ Register tmp3 = ToRegister(instr->temp3());
+
+ DeferredSIMD128ToTagged* deferred =
+ new(zone()) DeferredSIMD128ToTagged(this, instr,
+ static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
+ if (FLAG_inline_new) {
+ if (T::kInstanceType == FLOAT32x4_TYPE) {
+ __ AllocateFloat32x4(reg, tmp, tmp2, tmp3, deferred->entry());
+ } else if (T::kInstanceType == INT32x4_TYPE) {
+ __ AllocateInt32x4(reg, tmp, tmp2, tmp3, deferred->entry());
+ } else if (T::kInstanceType == FLOAT64x2_TYPE) {
+ __ AllocateFloat64x2(reg, tmp, tmp2, tmp3, deferred->entry());
+ }
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+
+ // Load the inner FixedTypedArray object.
+ __ movp(tmp, FieldOperand(reg, T::kValueOffset));
+
+ __ movups(FieldOperand(tmp, FixedTypedArrayBase::kDataOffset), input_reg);
+}
+
+
+void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
+ if (instr->value()->IsFloat32x4Register()) {
+ HandleSIMD128ToTagged<Float32x4>(instr);
+ } else if (instr->value()->IsFloat64x2Register()) {
+ HandleSIMD128ToTagged<Float64x2>(instr);
+ } else {
+ ASSERT(instr->value()->IsInt32x4Register());
+ HandleSIMD128ToTagged<Int32x4>(instr);
+ }
+}
+
+
void LCodeGen::DoSmiTag(LSmiTag* instr) {
HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
}
+template<class T>
+void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsSIMD128Register());
+ LOperand* temp = instr->temp();
+ ASSERT(temp->IsRegister());
+
+ Register input_reg = ToRegister(input);
+ XMMRegister result_reg = ToSIMD128Register(result);
+ Register temp_reg = ToRegister(temp);
+
+ __ testp(input_reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+ __ CmpObjectType(input_reg, T::kInstanceType, kScratchRegister);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ // Load the inner FixedTypedArray object.
+ __ movp(temp_reg, FieldOperand(input_reg, T::kValueOffset));
+
+ __ movups(
+ result_reg, FieldOperand(temp_reg, FixedTypedArrayBase::kDataOffset));
+}
+
+
+void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
+ if (instr->representation().IsFloat32x4()) {
+ HandleTaggedToSIMD128<Float32x4>(instr);
+ } else if (instr->representation().IsFloat64x2()) {
+ HandleTaggedToSIMD128<Float64x2>(instr);
+ } else {
+ ASSERT(instr->representation().IsInt32x4());
+ HandleTaggedToSIMD128<Int32x4>(instr);
+ }
+}
+
+
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsDoubleRegister());
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
+ XMMRegister ToFloat32x4Register(LOperand* op) const;
+ XMMRegister ToFloat64x2Register(LOperand* op) const;
+ XMMRegister ToInt32x4Register(LOperand* op) const;
+ XMMRegister ToSIMD128Register(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
bool IsDehoistedKeyConstant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
Register index);
+ void DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
+ Runtime::FunctionId id);
+
+ template<class T>
+ void HandleTaggedToSIMD128(LTaggedToSIMD128* instr);
+ template<class T>
+ void HandleSIMD128ToTagged(LSIMD128ToTagged* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
+ XMMRegister ToSIMD128Register(int index) const;
Operand BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ bool HandleExternalArrayOpRequiresPreScale(LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
__ movsd(xmm0, src);
__ movsd(cgen_->ToOperand(destination), xmm0);
}
+ } else if (source->IsSIMD128Register()) {
+ XMMRegister src = cgen_->ToSIMD128Register(source);
+ if (destination->IsSIMD128Register()) {
+ __ movaps(cgen_->ToSIMD128Register(destination), src);
+ } else {
+ ASSERT(destination->IsSIMD128StackSlot());
+ __ movups(cgen_->ToOperand(destination), src);
+ }
+ } else if (source->IsSIMD128StackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsSIMD128Register()) {
+ __ movups(cgen_->ToSIMD128Register(destination), src);
+ } else {
+ ASSERT(destination->IsSIMD128StackSlot());
+ __ movups(xmm0, src);
+ __ movups(cgen_->ToOperand(destination), xmm0);
+ }
} else {
UNREACHABLE();
}
__ movsd(dst, xmm0);
__ movp(src, kScratchRegister);
+ } else if ((source->IsSIMD128StackSlot() &&
+ destination->IsSIMD128StackSlot())) {
+ // Swap two XMM stack slots.
+ STATIC_ASSERT(kSIMD128Size == 2 * kDoubleSize);
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ movups(xmm0, src);
+ __ movq(kScratchRegister, dst);
+ __ movq(src, kScratchRegister);
+ __ movq(kScratchRegister, Operand(dst, kDoubleSize));
+ __ movq(Operand(src, kDoubleSize), kScratchRegister);
+ __ movups(dst, xmm0);
+
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
__ movaps(source_reg, destination_reg);
__ movaps(destination_reg, xmm0);
+ } else if (source->IsSIMD128Register() && destination->IsSIMD128Register()) {
+ // Swap two XMM registers.
+ XMMRegister source_reg = cgen_->ToSIMD128Register(source);
+ XMMRegister destination_reg = cgen_->ToSIMD128Register(destination);
+ __ movaps(xmm0, source_reg);
+ __ movaps(source_reg, destination_reg);
+ __ movaps(destination_reg, xmm0);
+
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
__ movsd(other_operand, reg);
__ movaps(reg, xmm0);
+ } else if (source->IsSIMD128Register() || destination->IsSIMD128Register()) {
+ // Swap a xmm register and a xmm stack slot.
+ ASSERT((source->IsSIMD128Register() &&
+ destination->IsSIMD128StackSlot()) ||
+ (source->IsSIMD128StackSlot() &&
+ destination->IsSIMD128Register()));
+ XMMRegister reg = cgen_->ToSIMD128Register(source->IsSIMD128Register()
+ ? source
+ : destination);
+ LOperand* other = source->IsSIMD128Register() ? destination : source;
+ ASSERT(other->IsSIMD128StackSlot());
+ Operand other_operand = cgen_->ToOperand(other);
+ __ movups(xmm0, other_operand);
+ __ movups(other_operand, reg);
+ __ movaps(reg, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+ switch (kind) {
+ case GENERAL_REGISTERS: return spill_slot_count_++;
+ case DOUBLE_REGISTERS: return spill_slot_count_++;
+ case FLOAT32x4_REGISTERS:
+ case FLOAT64x2_REGISTERS:
+ case INT32x4_REGISTERS: {
+ spill_slot_count_++;
+ return spill_slot_count_++;
+ }
+ default:
+ UNREACHABLE();
+ return -1;
+ }
+
return spill_slot_count_++;
}
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- ASSERT(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
+ switch (kind) {
+ case GENERAL_REGISTERS: return LStackSlot::Create(index, zone());
+ case DOUBLE_REGISTERS: return LDoubleStackSlot::Create(index, zone());
+ case FLOAT32x4_REGISTERS: return LFloat32x4StackSlot::Create(index, zone());
+ case FLOAT64x2_REGISTERS: return LFloat64x2StackSlot::Create(index, zone());
+ case INT32x4_REGISTERS: return LInt32x4StackSlot::Create(index, zone());
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
}
+const char* LNullarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoNullarySIMDOperation(
+ HNullarySIMDOperation* instr) {
+ LNullarySIMDOperation* result =
+ new(zone()) LNullarySIMDOperation(instr->op());
+ switch (instr->op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name:
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LUnarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+ case kSIMD128Change: return "SIMD128-change";
+#define SIMD_UNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnarySIMDOperation(HUnarySIMDOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LUnarySIMDOperation* result =
+ new(zone()) LUnarySIMDOperation(input, instr->op());
+ switch (instr->op()) {
+ case kSIMD128Change:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kFloat32x4Abs:
+ case kFloat32x4Neg:
+ case kFloat32x4Reciprocal:
+ case kFloat32x4ReciprocalSqrt:
+ case kFloat32x4Sqrt:
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt:
+ case kInt32x4Neg:
+ case kInt32x4Not:
+ return DefineSameAsFirst(result);
+ case kFloat32x4BitsToInt32x4:
+ case kFloat32x4ToInt32x4:
+ case kInt32x4BitsToFloat32x4:
+ case kInt32x4ToFloat32x4:
+ case kFloat32x4Splat:
+ case kInt32x4Splat:
+ case kFloat32x4GetSignMask:
+ case kFloat32x4GetX:
+ case kFloat32x4GetY:
+ case kFloat32x4GetZ:
+ case kFloat32x4GetW:
+ case kFloat64x2GetSignMask:
+ case kFloat64x2GetX:
+ case kFloat64x2GetY:
+ case kInt32x4GetSignMask:
+ case kInt32x4GetX:
+ case kInt32x4GetY:
+ case kInt32x4GetZ:
+ case kInt32x4GetW:
+ case kInt32x4GetFlagX:
+ case kInt32x4GetFlagY:
+ case kInt32x4GetFlagZ:
+ case kInt32x4GetFlagW:
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LBinarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBinarySIMDOperation(
+ HBinarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ case kFloat32x4Div:
+ case kFloat32x4Max:
+ case kFloat32x4Min:
+ case kFloat32x4Mul:
+ case kFloat32x4Sub:
+ case kFloat32x4Scale:
+ case kFloat32x4WithX:
+ case kFloat32x4WithY:
+ case kFloat32x4WithZ:
+ case kFloat32x4WithW:
+ case kFloat64x2Add:
+ case kFloat64x2Div:
+ case kFloat64x2Max:
+ case kFloat64x2Min:
+ case kFloat64x2Mul:
+ case kFloat64x2Sub:
+ case kFloat64x2Scale:
+ case kFloat64x2WithX:
+ case kFloat64x2WithY:
+ case kInt32x4Add:
+ case kInt32x4And:
+ case kInt32x4Mul:
+ case kInt32x4Or:
+ case kInt32x4Sub:
+ case kInt32x4Xor:
+ case kInt32x4WithX:
+ case kInt32x4WithY:
+ case kInt32x4WithZ:
+ case kInt32x4WithW:
+ case kInt32x4WithFlagX:
+ case kInt32x4WithFlagY:
+ case kInt32x4WithFlagZ:
+ case kInt32x4WithFlagW:
+ case kInt32x4GreaterThan:
+ case kInt32x4Equal:
+ case kInt32x4LessThan: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ if (instr->op() == kInt32x4WithFlagX ||
+ instr->op() == kInt32x4WithFlagY ||
+ instr->op() == kInt32x4WithFlagZ ||
+ instr->op() == kInt32x4WithFlagW) {
+ return AssignEnvironment(DefineSameAsFirst(result));
+ } else {
+ return DefineSameAsFirst(result);
+ }
+ }
+ case kFloat64x2Constructor: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return DefineAsRegister(result);
+ }
+ case kFloat32x4Shuffle:
+ case kInt32x4Shuffle:
+ case kInt32x4ShiftLeft:
+ case kInt32x4ShiftRight:
+ case kInt32x4ShiftRightArithmetic: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstant(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+ case kFloat32x4LessThan:
+ case kFloat32x4LessThanOrEqual:
+ case kFloat32x4Equal:
+ case kFloat32x4NotEqual:
+ case kFloat32x4GreaterThanOrEqual:
+ case kFloat32x4GreaterThan: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return DefineAsRegister(result);
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LTernarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6, \
+ p7) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTernarySIMDOperation(
+ HTernarySIMDOperation* instr) {
+ LOperand* first = UseRegisterAtStart(instr->first());
+ LOperand* second = UseRegisterAtStart(instr->second());
+ LOperand* third = instr->op() == kFloat32x4ShuffleMix
+ ? UseOrConstant(instr->third())
+ : UseRegisterAtStart(instr->third());
+ LTernarySIMDOperation* result =
+ new(zone()) LTernarySIMDOperation(first, second, third, instr->op());
+ switch (instr->op()) {
+ case kFloat32x4Clamp:
+ case kFloat64x2Clamp: {
+ return DefineSameAsFirst(result);
+ }
+ case kFloat32x4ShuffleMix: {
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+ case kInt32x4Select: {
+ return DefineAsRegister(result);
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LQuarternarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, \
+ p6, p7, p8) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoQuarternarySIMDOperation(
+ HQuarternarySIMDOperation* instr) {
+ LOperand* x = UseRegisterAtStart(instr->x());
+ LOperand* y = UseRegisterAtStart(instr->y());
+ LOperand* z = UseRegisterAtStart(instr->z());
+ LOperand* w = UseRegisterAtStart(instr->w());
+ LQuarternarySIMDOperation* result =
+ new(zone()) LQuarternarySIMDOperation(x, y, z, w, instr->op());
+ if (instr->op() == kInt32x4Bool) {
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ return DefineAsRegister(result);
+ }
+}
+
+
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
if (!val->representation().IsSmi()) result = AssignEnvironment(result);
return result;
+ } else if (to.IsSIMD128()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LTaggedToSIMD128* res = new(zone()) LTaggedToSIMD128(value, temp, to);
+ return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
}
}
+ } else if (from.IsSIMD128()) {
+ ASSERT(to.IsTagged());
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+
+ // Make sure that temp and result_temp are different registers.
+ LUnallocated* result_temp = TempRegister();
+ LSIMD128ToTagged* result =
+ new(zone()) LSIMD128ToTagged(value, temp, temp2, temp3);
+ return AssignPointerMap(Define(result, result_temp));
}
UNREACHABLE();
return NULL;
LInstruction* result = NULL;
if (kPointerSize == kInt64Size) {
- key = UseRegisterOrConstantAtStart(instr->key());
+ bool clobbers_key = ExternalArrayOpRequiresPreScale(
+ instr->key()->representation(), elements_kind);
+ key = clobbers_key
+ ? UseTempRegisterOrConstant(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
} else {
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
FindDehoistedKeyDefinitions(instr->key());
}
+
if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
(instr->representation().IsInteger32() &&
!(IsDoubleOrFloatElementsKind(elements_kind))) ||
(instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(elements_kind))));
+ (IsDoubleOrFloatElementsKind(elements_kind))) ||
+ (instr->representation().IsFloat32x4() &&
+ IsFloat32x4ElementsKind(elements_kind)) ||
+ (instr->representation().IsFloat64x2() &&
+ IsFloat64x2ElementsKind(elements_kind)) ||
+ (instr->representation().IsInt32x4() &&
+ IsInt32x4ElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
(instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
+ IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsFloat32x4() &&
+ IsFloat32x4ElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsFloat64x2() &&
+ IsFloat64x2ElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsInt32x4() &&
+ IsInt32x4ElementsKind(elements_kind)));
ASSERT((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
: UseRegister(instr->value());
LOperand* key = NULL;
if (kPointerSize == kInt64Size) {
- key = UseRegisterOrConstantAtStart(instr->key());
+ bool clobbers_key = ExternalArrayOpRequiresPreScale(
+ instr->key()->representation(), elements_kind);
+ key = clobbers_key
+ ? UseTempRegisterOrConstant(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
} else {
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
V(MathSqrt) \
V(ModByConstI) \
V(ModByPowerOf2I) \
+ V(NullarySIMDOperation) \
+ V(UnarySIMDOperation) \
+ V(BinarySIMDOperation) \
+ V(TernarySIMDOperation) \
+ V(QuarternarySIMDOperation) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
+ V(SIMD128ToTagged) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
+ V(TaggedToSIMD128) \
V(OsrEntry) \
V(Parameter) \
V(Power) \
};
+class LNullarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LNullarySIMDOperation(BuiltinFunctionId op)
+ : op_(op) {
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kNullarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LNullarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsNullarySIMDOperation());
+ return reinterpret_cast<LNullarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(NullarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LUnarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LUnarySIMDOperation(LOperand* value, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kUnarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LUnarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsUnarySIMDOperation());
+ return reinterpret_cast<LUnarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(UnarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LBinarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBinarySIMDOperation(LOperand* left, LOperand* right, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kBinarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LBinarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsBinarySIMDOperation());
+ return reinterpret_cast<LBinarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(BinarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LTernarySIMDOperation V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LTernarySIMDOperation(LOperand* first, LOperand* second, LOperand* third,
+ BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = first;
+ inputs_[1] = second;
+ inputs_[2] = third;
+ }
+
+ LOperand* first() { return inputs_[0]; }
+ LOperand* second() { return inputs_[1]; }
+ LOperand* third() { return inputs_[2]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kTernarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LTernarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsTernarySIMDOperation());
+ return reinterpret_cast<LTernarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(TernarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LQuarternarySIMDOperation V8_FINAL
+ : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LQuarternarySIMDOperation(LOperand* x, LOperand* y, LOperand* z,
+ LOperand* w, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = x;
+ inputs_[1] = y;
+ inputs_[2] = z;
+ inputs_[3] = w;
+ }
+
+ LOperand* x() { return inputs_[0]; }
+ LOperand* y() { return inputs_[1]; }
+ LOperand* z() { return inputs_[2]; }
+ LOperand* w() { return inputs_[3]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kQuarternarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+ static LQuarternarySIMDOperation* cast(LInstruction* instr) {
+ ASSERT(instr->IsQuarternarySIMDOperation());
+ return reinterpret_cast<LQuarternarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(QuarternarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
};
+inline static bool ExternalArrayOpRequiresPreScale(
+ Representation key_representation,
+ ElementsKind kind) {
+ int shift_size = ElementsKindToShiftSize(kind);
+ return SmiValuesAre31Bits() && key_representation.IsSmi()
+ ? shift_size > static_cast<int>(maximal_scale_factor) + kSmiTagSize
+ : shift_size > static_cast<int>(maximal_scale_factor);
+}
+
+
inline static bool ExternalArrayOpRequiresTemp(
Representation key_representation,
ElementsKind elements_kind) {
// Operations that require the key to be divided by two to be converted into
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
- return SmiValuesAre31Bits() && key_representation.IsSmi() &&
+ return ExternalArrayOpRequiresPreScale(key_representation, elements_kind) ||
+ (SmiValuesAre31Bits() && key_representation.IsSmi() &&
(elements_kind == EXTERNAL_INT8_ELEMENTS ||
elements_kind == EXTERNAL_UINT8_ELEMENTS ||
elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
elements_kind == UINT8_ELEMENTS ||
elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
+ elements_kind == UINT8_CLAMPED_ELEMENTS));
}
};
+class LSIMD128ToTagged V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ explicit LSIMD128ToTagged(LOperand* value, LOperand* temp,
+ LOperand* temp2, LOperand* temp3) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SIMD128ToTagged, "simd128-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
};
+class LTaggedToSIMD128 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LTaggedToSIMD128(LOperand* value, LOperand* temp,
+ Representation representation)
+ : representation_(representation) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Representation representation() const { return representation_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToSIMD128, "simd128-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
+ private:
+ Representation representation_;
+};
+
+
class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(rsp, i * kDoubleSize), reg);
+ movups(Operand(rsp, i * kSIMD128Size), reg);
}
}
}
if (fp_mode == kSaveFPRegs) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(rsp, i * kDoubleSize));
+ movups(reg, Operand(rsp, i * kSIMD128Size));
}
- addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
}
+void MacroAssembler::absps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_absolute_constant =
+ { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant));
+ andps(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::abspd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint64_t a;
+ uint64_t b;
+ } double_absolute_constant =
+ { V8_UINT64_C(0x7FFFFFFFFFFFFFFF), V8_UINT64_C(0x7FFFFFFFFFFFFFFF) };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
+ andpd(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::negateps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_negate_constant =
+ { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant));
+ xorps(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::negatepd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint64_t a;
+ uint64_t b;
+ } double_absolute_constant =
+ { V8_UINT64_C(0x8000000000000000), V8_UINT64_C(0x8000000000000000) };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
+ xorpd(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::notps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_not_constant =
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_not_constant));
+ xorps(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::pnegd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
+ notps(dst);
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&int32_one_constant));
+ paddd(dst, Operand(kScratchRegister, 0));
+}
+
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kSIMD128Size +
arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
+ movups(Operand(rbp, offset - ((i + 1) * kSIMD128Size)), reg);
}
} else if (arg_stack_space > 0) {
subp(rsp, Immediate(arg_stack_space * kRegisterSize));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
+ movups(reg, Operand(rbp, offset - ((i + 1) * kSIMD128Size)));
}
}
// Get the return address from the stack and restore the frame pointer.
}
+#define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
+ V(Float32x4, float32x4) \
+ V(Float64x2, float64x2) \
+ V(Int32x4, int32x4)
+
+#define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
+void MacroAssembler::Allocate##TYPE(Register result, \
+ Register scratch1, \
+ Register scratch2, \
+ Register scratch3, \
+ Label* gc_required) { \
+ /* Allocate SIMD128 object. */ \
+ Allocate(TYPE::kSize, result, scratch1, no_reg, gc_required, TAG_OBJECT);\
+ Handle<Map> simd128_map( \
+ isolate()->native_context()->type##_function()->initial_map()); \
+ MoveHeapObject(kScratchRegister, simd128_map); \
+ movp(FieldOperand(result, JSObject::kMapOffset), \
+ kScratchRegister); \
+ MoveHeapObject(kScratchRegister, \
+ isolate()->factory()->empty_fixed_array()); \
+ movp(FieldOperand(result, JSObject::kPropertiesOffset), \
+ kScratchRegister); \
+ movp(FieldOperand(result, JSObject::kElementsOffset), \
+ kScratchRegister); \
+ /* Allocate FixedTypedArray object. */ \
+ Allocate(FixedTypedArrayBase::kDataOffset + k##TYPE##Size, \
+ scratch1, scratch2, no_reg, gc_required, TAG_OBJECT); \
+ MoveHeapObject(kScratchRegister, \
+ isolate()->factory()->fixed_##type##_array_map()); \
+ movp(FieldOperand(scratch1, FixedTypedArrayBase::kMapOffset), \
+ kScratchRegister); \
+ movp(scratch3, Immediate(1)); \
+ Integer32ToSmi(scratch2, scratch3); \
+ movp(FieldOperand(scratch1, FixedTypedArrayBase::kLengthOffset), \
+ scratch2); \
+ /* Assign FixedTypedArray object to SIMD128 object. */ \
+ movp(FieldOperand(result, TYPE::kValueOffset), scratch1); \
+}
+
+SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
+
+
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
// ---------------------------------------------------------------------------
+ // SIMD macros.
+ void absps(XMMRegister dst);
+ void abspd(XMMRegister dst);
+ void negateps(XMMRegister dst);
+ void negatepd(XMMRegister dst);
+ void notps(XMMRegister dst);
+ void pnegd(XMMRegister dst);
+
+
+ // ---------------------------------------------------------------------------
// String macros.
// Generate code to do a lookup in the number string cache. If the number in
Register scratch,
Label* gc_required);
+ // Allocate a float32x4, float64x2 and int32x4 object in new space with
+ // undefined value.
+ // Returns tagged pointer in result register, or jumps to gc_required if new
+ // space is full.
+ void AllocateFloat32x4(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
+ void AllocateInt32x4(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
+ void AllocateFloat64x2(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
// Allocate a sequential string. All the header fields of the string object
// are initialized.
void AllocateTwoByteString(Register result,
__ psrlq(xmm0, xmm1);
__ por(xmm0, xmm1);
}
+ {
+ __ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
+ __ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
+ // 128 bit move instructions.
+ __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
+
+ __ addsd(xmm1, xmm0);
+ __ mulsd(xmm1, xmm0);
+ __ subsd(xmm1, xmm0);
+ __ divsd(xmm1, xmm0);
+ __ ucomisd(xmm0, xmm1);
+ __ cmpltsd(xmm0, xmm1);
+
+ __ andpd(xmm0, xmm1);
+ __ psllq(xmm0, 17);
+ __ psllq(xmm0, xmm1);
+ __ psrlq(xmm0, 17);
+ __ psrlq(xmm0, xmm1);
+ __ por(xmm0, xmm1);
+
+ // new instruction introduced by SIMD
+ __ cvtdq2ps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvtdq2ps(xmm1, xmm0);
+ __ cvtps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvtps2dq(xmm1, xmm0);
+ __ paddd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ paddd(xmm1, xmm0);
+ __ psubd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ psubd(xmm1, xmm0);
+ __ pmuludq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ pmuludq(xmm1, xmm0);
+ __ punpackldq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ punpackldq(xmm1, xmm0);
+ {
+ __ shufps(xmm1, xmm1, 0x0);
+ __ movups(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movups(Operand(ebx, ecx, times_4, 10000), xmm1);
+
+ __ andps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ andps(xmm1, xmm0);
+ __ xorps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ xorps(xmm1, xmm0);
+ __ orps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ orps(xmm1, xmm0);
+
+ __ addps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ addps(xmm1, xmm0);
+ __ subps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ subps(xmm1, xmm0);
+ __ mulps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ mulps(xmm1, xmm0);
+ __ divps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ divps(xmm1, xmm0);
+ __ minps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ minps(xmm1, xmm0);
+ __ maxps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ maxps(xmm1, xmm0);
+ __ rcpps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ rcpps(xmm1, xmm0);
+ __ rsqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ rsqrtps(xmm1, xmm0);
+ __ sqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ sqrtps(xmm1, xmm0);
+
+ __ cmpeqps(xmm1, xmm0);
+ __ cmpltps(xmm1, xmm0);
+ __ cmpleps(xmm1, xmm0);
+ __ cmpneqps(xmm1, xmm0);
+ __ cmpnltps(xmm1, xmm0);
+ __ cmpnleps(xmm1, xmm0);
+ }
+ }
// cmov.
{
__ pextrd(eax, xmm0, 1);
__ pinsrd(xmm1, eax, 0);
__ extractps(eax, xmm1, 0);
+ __ insertps(xmm1, xmm0, 0);
+ __ pmulld(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ pmulld(xmm1, xmm0);
}
}
shell += ".exe"
output = commands.Execute(
context.command_prefix +
- [shell, "--allow-natives-syntax", "-e",
+ [shell, "--allow-natives-syntax", "--simd-object", "-e",
"try { var natives = %ListNatives();"
" for (var n in natives) { print(natives[n]); }"
"} catch(e) {}"] +
tests = []
for line in output.stdout.strip().split():
(name, argc) = line.split(",")
- flags = ["--allow-natives-syntax",
+ flags = ["--allow-natives-syntax", "--simd-object",
"-e", "var NAME = '%s', ARGC = %s;" % (name, argc)]
test = testcase.TestCase(self, name, flags)
tests.append(test)
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+%AllocateFloat32x4();
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+%AllocateFloat64x2();
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+%AllocateInt32x4();
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+var _lo = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+var _hi = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+%Float32x4Clamp(_self, _lo, _hi);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+%Float32x4GetSignMask(_self);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.float64x2(0.0, 0.0);
+var _lo = SIMD.float64x2(0.0, 0.0);
+var _hi = SIMD.float64x2(0.0, 0.0);
+%Float64x2Clamp(_self, _lo, _hi);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.float64x2(0.0, 0.0);
+%Float64x2GetSignMask(_self);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.int32x4(0, 0, 0, 0);
+%Int32x4GetSignMask(_self);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.int32x4(0, 0, 0, 0);
+var _tv = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+var _fv = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+%Int32x4Select(_self, _tv, _fv);
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax --use-escape-analysis
+
+function testArgumentsObjectwithFloat32x4Field() {
+ "use strict";
+ var forceDeopt = { deopt:false };
+ function inner(a,b,c,d,e,f,g,h,i,j,k) {
+ var args = arguments;
+ forceDeopt.deopt;
+ assertSame(11, args.length);
+ assertSame(a, args[0]);
+ assertSame(b, args[1]);
+ assertSame(c, args[2]);
+ assertSame(d, args[3]);
+ assertSame(e, args[4]);
+ assertSame(f, args[5]);
+ assertSame(g, args[6]);
+ assertSame(h, args[7]);
+ assertSame(i, args[8]);
+ assertSame(j, args[9]);
+ assertEquals(1, args[10].x);
+ assertEquals(2, args[10].y);
+ assertEquals(3, args[10].z);
+ assertEquals(4, args[10].w);
+ }
+
+ var a = 0.5;
+ var b = 1.7;
+ var c = 123;
+ function outer() {
+ inner(
+ a - 0.3, // double in double register
+ b + 2.3, // integer in double register
+ c + 321, // integer in general register
+ c - 456, // integer in stack slot
+ a + 0.1, a + 0.2, a + 0.3, a + 0.4, a + 0.5,
+ a + 0.6, // double in stack slot
+ SIMD.float32x4(1, 2, 3, 4)
+ );
+ }
+
+ outer();
+ outer();
+ %OptimizeFunctionOnNextCall(outer);
+ outer();
+ delete forceDeopt.deopt;
+ outer();
+}
+
+testArgumentsObjectwithFloat32x4Field();
+
+function testArgumentsObjectwithInt32x4Field() {
+ "use strict";
+ var forceDeopt = { deopt:false };
+ function inner(a,b,c,d,e,f,g,h,i,j,k) {
+ var args = arguments;
+ forceDeopt.deopt;
+ assertSame(11, args.length);
+ assertSame(a, args[0]);
+ assertSame(b, args[1]);
+ assertSame(c, args[2]);
+ assertSame(d, args[3]);
+ assertSame(e, args[4]);
+ assertSame(f, args[5]);
+ assertSame(g, args[6]);
+ assertSame(h, args[7]);
+ assertSame(i, args[8]);
+ assertSame(j, args[9]);
+ assertEquals(1, args[10].x);
+ assertEquals(2, args[10].y);
+ assertEquals(3, args[10].z);
+ assertEquals(4, args[10].w);
+ }
+
+ var a = 0.5;
+ var b = 1.7;
+ var c = 123;
+ function outer() {
+ inner(
+ a - 0.3, // double in double register
+ b + 2.3, // integer in double register
+ c + 321, // integer in general register
+ c - 456, // integer in stack slot
+ a + 0.1, a + 0.2, a + 0.3, a + 0.4, a + 0.5,
+ a + 0.6, // double in stack slot
+ SIMD.int32x4(1, 2, 3, 4)
+ );
+ }
+
+ outer();
+ outer();
+ %OptimizeFunctionOnNextCall(outer);
+ outer();
+ delete forceDeopt.deopt;
+ outer();
+}
+
+testArgumentsObjectwithInt32x4Field();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testArithmeticOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.zero();
+ var c;
+
+ c = a + b;
+ assertEquals('float32x4(0,0,0,0)float32x4(0,0,0,0)', c);
+ c = a++;
+ assertEquals(NaN, c);
+ c = a - b;
+ assertEquals(NaN, c);
+ c = a--;
+ assertEquals(NaN, c);
+ c = a * b;
+ assertEquals(NaN, c);
+ c = a / b;
+ assertEquals(NaN, c);
+ c = a % b;
+ assertEquals(NaN, c);
+}
+
+testArithmeticOperators();
+testArithmeticOperators();
+%OptimizeFunctionOnNextCall(testArithmeticOperators);
+testArithmeticOperators();
+
+
+function testBitwiseOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.zero();
+ var c;
+ c = a | b;
+ assertEquals(0, c);
+ c = a & b;
+ assertEquals(0, c);
+ c = a ^ b;
+ assertEquals(0, c);
+ c = ~a;
+ assertEquals(-1, c);
+ c = a << 0;
+ assertEquals(0, c);
+ c = a >> 0;
+ assertEquals(0, c);
+ c = a >>> 0;
+ assertEquals(0, c);
+}
+
+testBitwiseOperators();
+testBitwiseOperators();
+%OptimizeFunctionOnNextCall(testBitwiseOperators);
+testBitwiseOperators();
+
+
+function testAssignmentOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.zero();
+ var c = a;
+ c += b;
+ assertEquals('float32x4(0,0,0,0)float32x4(0,0,0,0)', c);
+ c -= b;
+ assertEquals(NaN, c);
+ c *= b;
+ assertEquals(NaN, c);
+ c /= b;
+ assertEquals(NaN, c);
+ c %= b;
+ assertEquals(NaN, c);
+
+ c &= b;
+ assertEquals(0, c);
+ c |= b;
+ assertEquals(0, c);
+ c ^= b;
+ assertEquals(0, c);
+ c <<= b;
+ assertEquals(0, c);
+ c >>= b;
+ assertEquals(0, c);
+ c >>>= b;
+ assertEquals(0, c);
+}
+
+testAssignmentOperators();
+testAssignmentOperators();
+%OptimizeFunctionOnNextCall(testAssignmentOperators);
+testAssignmentOperators();
+
+
+function testStringOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = "0";
+ var c = a;
+ c += b;
+ assertEquals("float32x4(0,0,0,0)0", c);
+ c = b + a;
+ assertEquals("0float32x4(0,0,0,0)", c);
+}
+
+testStringOperators();
+testStringOperators();
+%OptimizeFunctionOnNextCall(testStringOperators);
+testStringOperators();
+
+
+function testComparisionOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.zero();
+ assertEquals(false, a == b);
+ assertEquals(true, a != b);
+ assertEquals(false, a === b);
+ assertEquals(true, a !== b);
+ assertEquals(false, a > b);
+ assertEquals(true, a >= b);
+ assertEquals(false, a < b);
+ assertEquals(true, a <= b);
+}
+
+testComparisionOperators();
+testComparisionOperators();
+// TODO(ningxin): optimized code will get opposite result.
+//%OptimizeFunctionOnNextCall(testComparisionOperators);
+testComparisionOperators();
+
+
+function testLogicalOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.splat(1);
+ assertEquals(1, (a && b).x);
+ assertEquals(1, (a && b).y);
+ assertEquals(1, (a && b).z);
+ assertEquals(1, (a && b).w);
+ assertEquals(0, (a || b).x);
+ assertEquals(0, (a || b).y);
+ assertEquals(0, (a || b).z);
+ assertEquals(0, (a || b).w);
+ assertEquals(false, !a);
+}
+
+testLogicalOperators();
+testLogicalOperators();
+%OptimizeFunctionOnNextCall(testLogicalOperators);
+testLogicalOperators();
+
+
+function testConditionalOperators() {
+ var a = SIMD.int32x4.zero();
+ var c = a ? 1 : 0;
+ assertEquals(1, c);
+}
+
+testConditionalOperators();
+testConditionalOperators();
+%OptimizeFunctionOnNextCall(testConditionalOperators);
+testConditionalOperators();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testArithmeticOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.zero();
+ var c;
+
+ c = a + b;
+ assertEquals('float64x2(0,0)float64x2(0,0)', c);
+ c = a++;
+ assertEquals(NaN, c);
+ c = a - b;
+ assertEquals(NaN, c);
+ c = a--;
+ assertEquals(NaN, c);
+ c = a * b;
+ assertEquals(NaN, c);
+ c = a / b;
+ assertEquals(NaN, c);
+ c = a % b;
+ assertEquals(NaN, c);
+}
+
+testArithmeticOperators();
+testArithmeticOperators();
+%OptimizeFunctionOnNextCall(testArithmeticOperators);
+testArithmeticOperators();
+
+
+function testBitwiseOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.zero();
+ var c;
+ c = a | b;
+ assertEquals(0, c);
+ c = a & b;
+ assertEquals(0, c);
+ c = a ^ b;
+ assertEquals(0, c);
+ c = ~a;
+ assertEquals(-1, c);
+ c = a << 0;
+ assertEquals(0, c);
+ c = a >> 0;
+ assertEquals(0, c);
+ c = a >>> 0;
+ assertEquals(0, c);
+}
+
+testBitwiseOperators();
+testBitwiseOperators();
+%OptimizeFunctionOnNextCall(testBitwiseOperators);
+testBitwiseOperators();
+
+
+function testAssignmentOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.zero();
+ var c = a;
+ c += b;
+ assertEquals('float64x2(0,0)float64x2(0,0)', c);
+ c -= b;
+ assertEquals(NaN, c);
+ c *= b;
+ assertEquals(NaN, c);
+ c /= b;
+ assertEquals(NaN, c);
+ c %= b;
+ assertEquals(NaN, c);
+
+ c &= b;
+ assertEquals(0, c);
+ c |= b;
+ assertEquals(0, c);
+ c ^= b;
+ assertEquals(0, c);
+ c <<= b;
+ assertEquals(0, c);
+ c >>= b;
+ assertEquals(0, c);
+ c >>>= b;
+ assertEquals(0, c);
+}
+
+testAssignmentOperators();
+testAssignmentOperators();
+%OptimizeFunctionOnNextCall(testAssignmentOperators);
+testAssignmentOperators();
+
+
+function testStringOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = "0";
+ var c = a;
+ c += b;
+ assertEquals("float64x2(0,0)0", c);
+ c = b + a;
+ assertEquals("0float64x2(0,0)", c);
+}
+
+testStringOperators();
+testStringOperators();
+%OptimizeFunctionOnNextCall(testStringOperators);
+testStringOperators();
+
+
+function testComparisionOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.zero();
+ assertEquals(false, a == b);
+ assertEquals(true, a != b);
+ assertEquals(false, a === b);
+ assertEquals(true, a !== b);
+ assertEquals(false, a > b);
+ assertEquals(true, a >= b);
+ assertEquals(false, a < b);
+ assertEquals(true, a <= b);
+}
+
+testComparisionOperators();
+testComparisionOperators();
+// TODO(ningxin): optimized code will get opposite result.
+//%OptimizeFunctionOnNextCall(testComparisionOperators);
+testComparisionOperators();
+
+
+function testLogicalOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.splat(1);
+ assertEquals(1, (a && b).x);
+ assertEquals(1, (a && b).y);
+ assertEquals(0, (a || b).x);
+ assertEquals(0, (a || b).y);
+ assertEquals(false, !a);
+}
+
+testLogicalOperators();
+testLogicalOperators();
+%OptimizeFunctionOnNextCall(testLogicalOperators);
+testLogicalOperators();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax --use-escape-analysis
+
+function testCapturedObjectwithFloat32x4Field() {
+ var deopt = { deopt:false };
+ function constructor() {
+ this.x = 1.1;
+ this.y = SIMD.float32x4(1,2,3,4);
+ }
+ function field(x) {
+ var o = new constructor();
+ o.x = x;
+ deopt.deopt;
+ assertEquals(x, o.x);
+ assertEquals(o.y.x, 1);
+ assertEquals(o.y.y, 2);
+ assertEquals(o.y.z, 3);
+ assertEquals(o.y.w, 4);
+ }
+ field(1); field(2);
+ // TODO(ningxin): fails in x64 test.
+ //%OptimizeFunctionOnNextCall(field);
+ field(3); field(4);
+ delete deopt.deopt;
+ field(5); field(6);
+}
+
+testCapturedObjectwithFloat32x4Field();
+
+function testCapturedObjectwithInt32x4Field() {
+ var deopt = { deopt:false };
+ function constructor() {
+ this.x = 1.1;
+ this.y = SIMD.int32x4(1,2,3,4);
+ }
+ function field(x) {
+ var o = new constructor();
+ o.x = x;
+ deopt.deopt;
+ assertEquals(x, o.x);
+ assertEquals(o.y.x, 1);
+ assertEquals(o.y.y, 2);
+ assertEquals(o.y.z, 3);
+ assertEquals(o.y.w, 4);
+ }
+ field(1); field(2);
+ // TODO(ningxin): fix the failures.
+ //%OptimizeFunctionOnNextCall(field);
+ field(3); field(4);
+ delete deopt.deopt;
+ field(5); field(6);
+}
+
+testCapturedObjectwithInt32x4Field();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testObject() {
+ var a = SIMD.float32x4.zero();
+ var b = Object(a);
+ assertEquals(0, b.x);
+ assertEquals(0, b.y);
+ assertEquals(0, b.z);
+ assertEquals(0, b.w);
+ assertEquals(typeof(b), "object");
+ assertEquals(typeof(b.valueOf()), "object");
+ assertEquals(Object.prototype.toString.call(b), "[object Object]");
+}
+
+testObject();
+testObject();
+%OptimizeFunctionOnNextCall(testObject);
+testObject();
+
+
+function testNumber() {
+ var a = SIMD.float32x4.zero();
+ var b = Number(a);
+ assertEquals(NaN, b);
+}
+
+testNumber();
+testNumber();
+%OptimizeFunctionOnNextCall(testNumber);
+testNumber();
+
+
+function testString() {
+ var a = SIMD.float32x4.zero();
+ var b = String(a);
+ assertEquals("float32x4(0,0,0,0)", b);
+}
+
+testString();
+testString();
+%OptimizeFunctionOnNextCall(testString);
+testString();
+
+
+function testBoolean() {
+ var a = SIMD.float32x4.zero();
+ var b = Boolean(a);
+ assertEquals(true, b);
+}
+
+testBoolean();
+testBoolean();
+%OptimizeFunctionOnNextCall(testBoolean);
+testBoolean();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testdeopt(a, b) {
+ var a4 = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ var b4 = SIMD.float32x4.abs(a4);
+
+ if (a > 0) {
+ a = 0;
+ } else {
+ a += b; //deopt
+ }
+
+ assertEquals(1.0, b4.x);
+ assertEquals(2.0, b4.y);
+ assertEquals(3.0, b4.z);
+ assertEquals(4.0, b4.w);
+}
+
+testdeopt(1, 1);
+testdeopt(1, 1);
+%OptimizeFunctionOnNextCall(testdeopt);
+testdeopt(0, 1);
+
+function testdeopt2() {
+ var a4 = SIMD.float32x4(1.0, -1.0, 1.0, -1.0);
+ var b4 = SIMD.float32x4.abs(a4);
+
+ assertEquals(1.0, b4.x);
+ assertEquals(1.0, b4.y);
+ assertEquals(1.0, b4.z);
+ assertEquals(1.0, b4.w);
+
+ var new_a4 = new SIMD.float32x4(1.0, -1.0, 1.0, -1.0);
+ var new_b4 = SIMD.float32x4.abs(new_a4);
+
+ assertEquals(1.0, new_b4.x);
+ assertEquals(1.0, new_b4.y);
+ assertEquals(1.0, new_b4.z);
+ assertEquals(1.0, new_b4.w);
+
+ // Verifying deoptimization
+ assertEquals(1.0, b4.x);
+ assertEquals(1.0, b4.y);
+ assertEquals(1.0, b4.z);
+ assertEquals(1.0, b4.w);
+}
+
+testdeopt2();
+testdeopt2();
+%OptimizeFunctionOnNextCall(testdeopt2);
+testdeopt2();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testConstructor() {
+ var f4 = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ assertEquals(1.0, f4.x);
+ assertEquals(2.0, f4.y);
+ assertEquals(3.0, f4.z);
+ assertEquals(4.0, f4.w);
+
+ f4 = SIMD.float32x4(1.1, 2.2, 3.3, 4.4);
+ assertEquals(1.100000023841858, f4.x);
+ assertEquals(2.200000047683716, f4.y);
+ assertEquals(3.299999952316284, f4.z);
+ assertEquals(4.400000095367432, f4.w);
+}
+
+testConstructor();
+testConstructor();
+%OptimizeFunctionOnNextCall(testConstructor);
+testConstructor();
+
+function testZeroConstructor() {
+ var z4 = SIMD.float32x4.zero();
+ assertEquals(0.0, z4.x);
+ assertEquals(0.0, z4.y);
+ assertEquals(0.0, z4.z);
+ assertEquals(0.0, z4.w);
+}
+
+testZeroConstructor();
+testZeroConstructor();
+%OptimizeFunctionOnNextCall(testZeroConstructor);
+testZeroConstructor();
+
+function testSplatConstructor() {
+ var z4 = SIMD.float32x4.splat(5.0);
+ assertEquals(5.0, z4.x);
+ assertEquals(5.0, z4.y);
+ assertEquals(5.0, z4.z);
+ assertEquals(5.0, z4.w);
+}
+
+testSplatConstructor();
+testSplatConstructor();
+%OptimizeFunctionOnNextCall(testSplatConstructor);
+testSplatConstructor();
+
+function testTypeof() {
+ var z4 = SIMD.float32x4.zero();
+ assertEquals(typeof(z4), "object");
+
+ var new_z4 = new SIMD.float32x4(0, 0, 0, 0);
+ assertEquals(typeof(new_z4), "object");
+ assertEquals(typeof(new_z4.valueOf()), "object");
+ assertEquals(Object.prototype.toString.call(new_z4), "[object Object]");
+}
+
+testTypeof();
+
+function testSignMaskGetter() {
+ var a = SIMD.float32x4(-1.0, -2.0, -3.0, -4.0);
+ assertEquals(0xf, a.signMask);
+ var b = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ assertEquals(0x0, b.signMask);
+ var c = SIMD.float32x4(1.0, -2.0, -3.0, 4.0);
+ assertEquals(0x6, c.signMask);
+}
+
+testSignMaskGetter();
+testSignMaskGetter();
+%OptimizeFunctionOnNextCall(testSignMaskGetter);
+testSignMaskGetter();
+
+function testSIMDAbs() {
+ var a4 = SIMD.float32x4(1.0, -1.0, 1.0, -1.0);
+ var b4 = SIMD.float32x4.abs(a4);
+
+ assertEquals(1.0, b4.x);
+ assertEquals(1.0, b4.y);
+ assertEquals(1.0, b4.z);
+ assertEquals(1.0, b4.w);
+}
+
+testSIMDAbs();
+testSIMDAbs();
+%OptimizeFunctionOnNextCall(testSIMDAbs);
+testSIMDAbs();
+
+function testSIMDNeg() {
+ var a4 = SIMD.float32x4(1.0, -1.0, 1.0, -1.0);
+ var b4 = SIMD.float32x4.neg(a4);
+
+ assertEquals(-1.0, b4.x);
+ assertEquals(1.0, b4.y);
+ assertEquals(-1.0, b4.z);
+ assertEquals(1.0, b4.w);
+}
+
+testSIMDNeg();
+testSIMDNeg();
+%OptimizeFunctionOnNextCall(testSIMDNeg);
+testSIMDNeg();
+
+function testSIMDAdd() {
+ var a4 = SIMD.float32x4(1.0, 1.0, 1.0, 1.0);
+ var b4 = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ var c4 = SIMD.float32x4.add(a4, b4);
+
+ assertEquals(3.0, c4.x);
+ assertEquals(3.0, c4.y);
+ assertEquals(3.0, c4.z);
+ assertEquals(3.0, c4.w);
+}
+
+testSIMDAdd();
+testSIMDAdd();
+%OptimizeFunctionOnNextCall(testSIMDAdd);
+testSIMDAdd();
+
+function testSIMDSub() {
+ var a4 = SIMD.float32x4(1.0, 1.0, 1.0, 1.0);
+ var b4 = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ var c4 = SIMD.float32x4.sub(a4, b4);
+
+ assertEquals(-1.0, c4.x);
+ assertEquals(-1.0, c4.y);
+ assertEquals(-1.0, c4.z);
+ assertEquals(-1.0, c4.w);
+}
+
+testSIMDSub();
+testSIMDSub();
+%OptimizeFunctionOnNextCall(testSIMDSub);
+testSIMDSub();
+
+function testSIMDMul() {
+ var a4 = SIMD.float32x4(1.0, 1.0, 1.0, 1.0);
+ var b4 = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ var c4 = SIMD.float32x4.mul(a4, b4);
+
+ assertEquals(2.0, c4.x);
+ assertEquals(2.0, c4.y);
+ assertEquals(2.0, c4.z);
+ assertEquals(2.0, c4.w);
+}
+
+testSIMDMul();
+testSIMDMul();
+%OptimizeFunctionOnNextCall(testSIMDMul);
+testSIMDMul();
+
+function testSIMDDiv() {
+ var a4 = SIMD.float32x4(1.0, 1.0, 1.0, 1.0);
+ var b4 = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ var c4 = SIMD.float32x4.div(a4, b4);
+
+ assertEquals(0.5, c4.x);
+ assertEquals(0.5, c4.y);
+ assertEquals(0.5, c4.z);
+ assertEquals(0.5, c4.w);
+}
+
+testSIMDDiv();
+testSIMDDiv();
+%OptimizeFunctionOnNextCall(testSIMDDiv);
+testSIMDDiv();
+
+function testSIMDClamp() {
+ var m = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ var lo = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+ var hi = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ m = SIMD.float32x4.clamp(m, lo, hi);
+ assertEquals(1.0, m.x);
+ assertEquals(0.0, m.y);
+ assertEquals(2.0, m.z);
+ assertEquals(0.0, m.w);
+}
+
+testSIMDClamp();
+testSIMDClamp();
+%OptimizeFunctionOnNextCall(testSIMDClamp);
+testSIMDClamp();
+
+function testSIMDMin() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(1.0, 0.0, 2.5, 5.0);
+ m = SIMD.float32x4.min(m, n);
+ assertEquals(1.0, m.x);
+ assertEquals(0.0, m.y);
+ assertEquals(2.5, m.z);
+ assertEquals(4.0, m.w);
+}
+
+testSIMDMin();
+testSIMDMin();
+%OptimizeFunctionOnNextCall(testSIMDMin);
+testSIMDMin();
+
+function testSIMDMax() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(1.0, 0.0, 2.5, 5.0);
+ m = SIMD.float32x4.max(m, n);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+ assertEquals(3.0, m.z);
+ assertEquals(5.0, m.w);
+}
+
+testSIMDMax();
+testSIMDMax();
+%OptimizeFunctionOnNextCall(testSIMDMax);
+testSIMDMax();
+
+function testSIMDReciprocal() {
+ var m = SIMD.float32x4(1.0, 4.0, 9.0, 16.0);
+ m = SIMD.float32x4.reciprocal(m);
+ assertTrue(Math.abs(1.0 - m.x) <= 0.001);
+ assertTrue(Math.abs(0.25 - m.y) <= 0.001);
+ assertTrue(Math.abs(0.1111111 - m.z) <= 0.001);
+ assertTrue(Math.abs(0.0625 - m.w) <= 0.001);
+}
+
+testSIMDReciprocal();
+testSIMDReciprocal();
+%OptimizeFunctionOnNextCall(testSIMDReciprocal);
+testSIMDReciprocal();
+
+function testSIMDReciprocalSqrt() {
+ var m = SIMD.float32x4(1.0, 0.25, 0.111111, 0.0625);
+ m = SIMD.float32x4.reciprocalSqrt(m);
+ assertTrue(Math.abs(1.0 - m.x) <= 0.001);
+ assertTrue(Math.abs(2.0 - m.y) <= 0.001);
+ assertTrue(Math.abs(3.0 - m.z) <= 0.001);
+ assertTrue(Math.abs(4.0 - m.w) <= 0.001);
+}
+
+testSIMDReciprocalSqrt();
+testSIMDReciprocalSqrt();
+%OptimizeFunctionOnNextCall(testSIMDReciprocalSqrt);
+testSIMDReciprocalSqrt();
+
+function testSIMDScale() {
+ var m = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ m = SIMD.float32x4.scale(m, 20.0);
+ assertEquals(20.0, m.x);
+ assertEquals(-40.0, m.y);
+ assertEquals(60.0, m.z);
+ assertEquals(-80.0, m.w);
+}
+
+testSIMDScale();
+testSIMDScale();
+%OptimizeFunctionOnNextCall(testSIMDScale);
+testSIMDScale();
+
+function testSIMDSqrt() {
+ var m = SIMD.float32x4(1.0, 4.0, 9.0, 16.0);
+ m = SIMD.float32x4.sqrt(m);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+ assertEquals(3.0, m.z);
+ assertEquals(4.0, m.w);
+}
+
+testSIMDSqrt();
+testSIMDSqrt();
+%OptimizeFunctionOnNextCall(testSIMDSqrt);
+testSIMDSqrt();
+
+function testSIMDShuffle() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var xxxx = SIMD.float32x4.shuffle(m, SIMD.XXXX);
+ assertEquals(1.0, xxxx.x);
+ assertEquals(1.0, xxxx.y);
+ assertEquals(1.0, xxxx.z);
+ assertEquals(1.0, xxxx.w);
+ var yyyy = SIMD.float32x4.shuffle(m, SIMD.YYYY);
+ assertEquals(2.0, yyyy.x);
+ assertEquals(2.0, yyyy.y);
+ assertEquals(2.0, yyyy.z);
+ assertEquals(2.0, yyyy.w);
+ var zzzz = SIMD.float32x4.shuffle(m, SIMD.ZZZZ);
+ assertEquals(3.0, zzzz.x);
+ assertEquals(3.0, zzzz.y);
+ assertEquals(3.0, zzzz.z);
+ assertEquals(3.0, zzzz.w);
+ var wwww = SIMD.float32x4.shuffle(m, SIMD.WWWW);
+ assertEquals(4.0, wwww.x);
+ assertEquals(4.0, wwww.y);
+ assertEquals(4.0, wwww.z);
+ assertEquals(4.0, wwww.w);
+ var wzyx = SIMD.float32x4.shuffle(m, SIMD.WZYX);
+ assertEquals(4.0, wzyx.x);
+ assertEquals(3.0, wzyx.y);
+ assertEquals(2.0, wzyx.z);
+ assertEquals(1.0, wzyx.w);
+ var wwzz = SIMD.float32x4.shuffle(m, SIMD.WWZZ);
+ assertEquals(4.0, wwzz.x);
+ assertEquals(4.0, wwzz.y);
+ assertEquals(3.0, wwzz.z);
+ assertEquals(3.0, wwzz.w);
+ var xxyy = SIMD.float32x4.shuffle(m, SIMD.XXYY);
+ assertEquals(1.0, xxyy.x);
+ assertEquals(1.0, xxyy.y);
+ assertEquals(2.0, xxyy.z);
+ assertEquals(2.0, xxyy.w);
+ var yyww = SIMD.float32x4.shuffle(m, SIMD.YYWW);
+ assertEquals(2.0, yyww.x);
+ assertEquals(2.0, yyww.y);
+ assertEquals(4.0, yyww.z);
+ assertEquals(4.0, yyww.w);
+}
+
+testSIMDShuffle();
+testSIMDShuffle();
+%OptimizeFunctionOnNextCall(testSIMDShuffle);
+testSIMDShuffle();
+
+function testSIMDShuffleMix() {
+ var a = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var b = SIMD.float32x4(5.0, 6.0, 7.0, 8.0);
+ var xxxx = SIMD.float32x4.shuffleMix(a, b, SIMD.XXXX);
+ assertEquals(1.0, xxxx.x);
+ assertEquals(1.0, xxxx.y);
+ assertEquals(5.0, xxxx.z);
+ assertEquals(5.0, xxxx.w);
+ var yyyy = SIMD.float32x4.shuffleMix(a, b, SIMD.YYYY);
+ assertEquals(2.0, yyyy.x);
+ assertEquals(2.0, yyyy.y);
+ assertEquals(6.0, yyyy.z);
+ assertEquals(6.0, yyyy.w);
+ var zzzz = SIMD.float32x4.shuffleMix(a, b, SIMD.ZZZZ);
+ assertEquals(3.0, zzzz.x);
+ assertEquals(3.0, zzzz.y);
+ assertEquals(7.0, zzzz.z);
+ assertEquals(7.0, zzzz.w);
+ var wwww = SIMD.float32x4.shuffleMix(a, b, SIMD.WWWW);
+ assertEquals(4.0, wwww.x);
+ assertEquals(4.0, wwww.y);
+ assertEquals(8.0, wwww.z);
+ assertEquals(8.0, wwww.w);
+ var wzyx = SIMD.float32x4.shuffleMix(a, b, SIMD.WZYX);
+ assertEquals(4.0, wzyx.x);
+ assertEquals(3.0, wzyx.y);
+ assertEquals(6.0, wzyx.z);
+ assertEquals(5.0, wzyx.w);
+ var wwzz = SIMD.float32x4.shuffleMix(a, b, SIMD.WWZZ);
+ assertEquals(4.0, wwzz.x);
+ assertEquals(4.0, wwzz.y);
+ assertEquals(7.0, wwzz.z);
+ assertEquals(7.0, wwzz.w);
+ var xxyy = SIMD.float32x4.shuffleMix(a, b, SIMD.XXYY);
+ assertEquals(1.0, xxyy.x);
+ assertEquals(1.0, xxyy.y);
+ assertEquals(6.0, xxyy.z);
+ assertEquals(6.0, xxyy.w);
+ var yyww = SIMD.float32x4.shuffleMix(a, b, SIMD.YYWW);
+ assertEquals(2.0, yyww.x);
+ assertEquals(2.0, yyww.y);
+ assertEquals(8.0, yyww.z);
+ assertEquals(8.0, yyww.w);
+}
+
+testSIMDShuffleMix();
+testSIMDShuffleMix();
+%OptimizeFunctionOnNextCall(testSIMDShuffleMix);
+testSIMDShuffleMix();
+
+function testSIMDSetters() {
+ var f = SIMD.float32x4.zero();
+ assertEquals(0.0, f.x);
+ assertEquals(0.0, f.y);
+ assertEquals(0.0, f.z);
+ assertEquals(0.0, f.w);
+ f = SIMD.float32x4.withX(f, 4.0);
+ assertEquals(4.0, f.x);
+ f = SIMD.float32x4.withY(f, 3.0);
+ assertEquals(3.0, f.y);
+ f = SIMD.float32x4.withZ(f, 2.0);
+ assertEquals(2.0, f.z);
+ f = SIMD.float32x4.withW(f, 1.0);
+ assertEquals(1.0, f.w);
+ f = SIMD.float32x4.zero();
+}
+
+testSIMDSetters();
+testSIMDSetters();
+%OptimizeFunctionOnNextCall(testSIMDSetters);
+testSIMDSetters();
+
+function testSIMDConversion() {
+ var m = SIMD.int32x4(0x3F800000, 0x40000000, 0x40400000, 0x40800000);
+ var n = SIMD.int32x4.bitsToFloat32x4(m);
+ assertEquals(1.0, n.x);
+ assertEquals(2.0, n.y);
+ assertEquals(3.0, n.z);
+ assertEquals(4.0, n.w);
+ n = SIMD.float32x4(5.0, 6.0, 7.0, 8.0);
+ m = SIMD.float32x4.bitsToInt32x4(n);
+ assertEquals(0x40A00000, m.x);
+ assertEquals(0x40C00000, m.y);
+ assertEquals(0x40E00000, m.z);
+ assertEquals(0x41000000, m.w);
+ // Flip sign using bit-wise operators.
+ n = SIMD.float32x4(9.0, 10.0, 11.0, 12.0);
+ m = SIMD.int32x4(0x80000000, 0x80000000, 0x80000000, 0x80000000);
+ var nMask = SIMD.float32x4.bitsToInt32x4(n);
+ nMask = SIMD.int32x4.xor(nMask, m); // flip sign.
+ n = SIMD.int32x4.bitsToFloat32x4(nMask);
+ assertEquals(-9.0, n.x);
+ assertEquals(-10.0, n.y);
+ assertEquals(-11.0, n.z);
+ assertEquals(-12.0, n.w);
+ nMask = SIMD.float32x4.bitsToInt32x4(n);
+ nMask = SIMD.int32x4.xor(nMask, m); // flip sign.
+ n = SIMD.int32x4.bitsToFloat32x4(nMask);
+ assertEquals(9.0, n.x);
+ assertEquals(10.0, n.y);
+ assertEquals(11.0, n.z);
+ assertEquals(12.0, n.w);
+}
+
+testSIMDConversion();
+testSIMDConversion();
+%OptimizeFunctionOnNextCall(testSIMDConversion);
+testSIMDConversion();
+
+function testSIMDConversion2() {
+ var m = SIMD.int32x4(1, 2, 3, 4);
+ var n = SIMD.int32x4.toFloat32x4(m);
+ assertEquals(1.0, n.x);
+ assertEquals(2.0, n.y);
+ assertEquals(3.0, n.z);
+ assertEquals(4.0, n.w);
+ n = SIMD.float32x4(5.0, 6.0, 7.0, 8.0);
+ m = SIMD.float32x4.toInt32x4(n);
+ assertEquals(5, m.x);
+ assertEquals(6, m.y);
+ assertEquals(7, m.z);
+ assertEquals(8, m.w);
+}
+
+testSIMDConversion2();
+testSIMDConversion2();
+%OptimizeFunctionOnNextCall(testSIMDConversion2);
+testSIMDConversion2();
+
+
+function testSIMDComparisons() {
+ var m = SIMD.float32x4(1.0, 2.0, 0.1, 0.001);
+ var n = SIMD.float32x4(2.0, 2.0, 0.001, 0.1);
+ var cmp;
+ cmp = SIMD.float32x4.lessThan(m, n);
+ assertEquals(-1, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(-1, cmp.w);
+
+ cmp = SIMD.float32x4.lessThanOrEqual(m, n);
+ assertEquals(-1, cmp.x);
+ assertEquals(-1, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(-1, cmp.w);
+
+ cmp = SIMD.float32x4.equal(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(-1, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(0x0, cmp.w);
+
+ cmp = SIMD.float32x4.notEqual(m, n);
+ assertEquals(-1, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(-1, cmp.z);
+ assertEquals(-1, cmp.w);
+
+ cmp = SIMD.float32x4.greaterThanOrEqual(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(-1, cmp.y);
+ assertEquals(-1, cmp.z);
+ assertEquals(0x0, cmp.w);
+
+ cmp = SIMD.float32x4.greaterThan(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(-1, cmp.z);
+ assertEquals(0x0, cmp.w);
+}
+
+testSIMDComparisons();
+testSIMDComparisons();
+%OptimizeFunctionOnNextCall(testSIMDComparisons);
+testSIMDComparisons();
+
+function testSIMDAnd() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(~1.0, 2.0, 3.0, 4.0);
+ o = SIMD.float32x4.and(m,n); // and
+ assertEquals(0, o.x);
+ assertEquals(2, o.y);
+ assertEquals(3, o.z);
+ assertEquals(4, o.w);
+}
+
+testSIMDAnd();
+testSIMDAnd();
+%OptimizeFunctionOnNextCall(testSIMDAnd);
+testSIMDAnd();
+
+function testSIMDOr() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(~1.0, 2.0, 3.0, 4.0);
+ var o = SIMD.float32x4.or(m,n); // or
+ assertEquals(-Infinity, o.x);
+ assertEquals(2.0, o.y);
+ assertEquals(3.0, o.z);
+ assertEquals(4.0, o.w);
+}
+
+testSIMDOr();
+testSIMDOr();
+%OptimizeFunctionOnNextCall(testSIMDOr);
+testSIMDOr();
+
+function testSIMDXor() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(~1.0, 2.0, 3.0, 4.0);
+ var o = SIMD.float32x4.xor(m,n); // xor
+ assertEquals(-Infinity, o.x);
+ assertEquals(0x0, o.y);
+ assertEquals(0x0, o.z);
+ assertEquals(0x0, o.w);
+}
+
+testSIMDXor();
+testSIMDXor();
+%OptimizeFunctionOnNextCall(testSIMDXor);
+testSIMDXor();
+
+function testSIMDNot() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ m = SIMD.float32x4.not(m);
+ m = SIMD.float32x4.not(m);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+ assertEquals(3.0, m.z);
+ assertEquals(4.0, m.w);
+}
+
+testSIMDNot();
+testSIMDNot();
+%OptimizeFunctionOnNextCall(testSIMDNot);
+testSIMDNot();
+
+
+function testFloat32x4ArrayBasic() {
+ var a = new Float32x4Array(1);
+ assertEquals(1, a.length);
+ assertEquals(16, a.byteLength);
+ assertEquals(16, a.BYTES_PER_ELEMENT);
+ assertEquals(16, Float32x4Array.BYTES_PER_ELEMENT);
+ assertEquals(0, a.byteOffset);
+ assertTrue(undefined != a.buffer);
+ var b = new Float32x4Array(4);
+ assertEquals(4, b.length);
+ assertEquals(64, b.byteLength);
+ assertEquals(16, b.BYTES_PER_ELEMENT);
+ assertEquals(16, Float32x4Array.BYTES_PER_ELEMENT);
+ assertEquals(0, b.byteOffset);
+ assertTrue(undefined != b.buffer);
+}
+
+testFloat32x4ArrayBasic();
+
+function testFloat32x4ArrayGetAndSet() {
+ var a = new Float32x4Array(4);
+ a[0] = SIMD.float32x4(1, 2, 3, 4);
+ a[1] = SIMD.float32x4(5, 6, 7, 8);
+ a[2] = SIMD.float32x4(9, 10, 11, 12);
+ a[3] = SIMD.float32x4(13, 14, 15, 16);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 3);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+
+ var b = new Float32x4Array(4);
+ b.setAt(0,SIMD.float32x4(1, 2, 3, 4));
+ b.setAt(1,SIMD.float32x4(5, 6, 7, 8));
+ b.setAt(2,SIMD.float32x4(9, 10, 11, 12));
+ b.setAt(3,SIMD.float32x4(13, 14, 15, 16));
+
+ assertEquals(b.getAt(0).x, 1);
+ assertEquals(b.getAt(0).y, 2);
+ assertEquals(b.getAt(0).z, 3);
+ assertEquals(b.getAt(0).w, 4);
+
+ assertEquals(b.getAt(1).x, 5);
+ assertEquals(b.getAt(1).y, 6);
+ assertEquals(b.getAt(1).z, 7);
+ assertEquals(b.getAt(1).w, 8);
+
+ assertEquals(b.getAt(2).x, 9);
+ assertEquals(b.getAt(2).y, 10);
+ assertEquals(b.getAt(2).z, 11);
+ assertEquals(b.getAt(2).w, 12);
+
+ assertEquals(b.getAt(3).x, 13);
+ assertEquals(b.getAt(3).y, 14);
+ assertEquals(b.getAt(3).z, 15);
+ assertEquals(b.getAt(3).w, 16);
+}
+
+testFloat32x4ArrayGetAndSet();
+testFloat32x4ArrayGetAndSet();
+%OptimizeFunctionOnNextCall(testFloat32x4ArrayGetAndSet);
+testFloat32x4ArrayGetAndSet();
+
+function testFloat32x4ArraySwap() {
+ var a = new Float32x4Array(4);
+ a[0] = SIMD.float32x4(1, 2, 3, 4);
+ a[1] = SIMD.float32x4(5, 6, 7, 8);
+ a[2] = SIMD.float32x4(9, 10, 11, 12);
+ a[3] = SIMD.float32x4(13, 14, 15, 16);
+
+ // Swap element 0 and element 3
+ var t = a[0];
+ a[0] = a[3];
+ a[3] = t;
+
+ assertEquals(a[3].x, 1);
+ assertEquals(a[3].y, 2);
+ assertEquals(a[3].z, 3);
+ assertEquals(a[3].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[0].x, 13);
+ assertEquals(a[0].y, 14);
+ assertEquals(a[0].z, 15);
+ assertEquals(a[0].w, 16);
+}
+
+testFloat32x4ArraySwap();
+
+function testFloat32x4ArrayCopy() {
+ var a = new Float32x4Array(4);
+ a[0] = SIMD.float32x4(1, 2, 3, 4);
+ a[1] = SIMD.float32x4(5, 6, 7, 8);
+ a[2] = SIMD.float32x4(9, 10, 11, 12);
+ a[3] = SIMD.float32x4(13, 14, 15, 16);
+ var b = new Float32x4Array(a);
+ assertEquals(a[0].x, b[0].x);
+ assertEquals(a[0].y, b[0].y);
+ assertEquals(a[0].z, b[0].z);
+ assertEquals(a[0].w, b[0].w);
+
+ assertEquals(a[1].x, b[1].x);
+ assertEquals(a[1].y, b[1].y);
+ assertEquals(a[1].z, b[1].z);
+ assertEquals(a[1].w, b[1].w);
+
+ assertEquals(a[2].x, b[2].x);
+ assertEquals(a[2].y, b[2].y);
+ assertEquals(a[2].z, b[2].z);
+ assertEquals(a[2].w, b[2].w);
+
+ assertEquals(a[3].x, b[3].x);
+ assertEquals(a[3].y, b[3].y);
+ assertEquals(a[3].z, b[3].z);
+ assertEquals(a[3].w, b[3].w);
+
+ a[2] = SIMD.float32x4(17, 18, 19, 20);
+
+ assertEquals(a[2].x, 17);
+ assertEquals(a[2].y, 18);
+ assertEquals(a[2].z, 19);
+ assertEquals(a[2].w, 20);
+
+ assertTrue(a[2].x != b[2].x);
+ assertTrue(a[2].y != b[2].y);
+ assertTrue(a[2].z != b[2].z);
+ assertTrue(a[2].w != b[2].w);
+}
+
+testFloat32x4ArrayCopy();
+
+function testFloat32x4ArrayViewBasic() {
+ var a = new Float32Array(8);
+ // view with no offset.
+ var b = new Float32x4Array(a.buffer, 0);
+ // view with offset.
+ var c = new Float32x4Array(a.buffer, 16);
+ // view with no offset but shorter than original list.
+ var d = new Float32x4Array(a.buffer, 0, 1);
+ assertEquals(a.length, 8);
+ assertEquals(b.length, 2);
+ assertEquals(c.length, 1);
+ assertEquals(d.length, 1);
+ assertEquals(a.byteLength, 32);
+ assertEquals(b.byteLength, 32);
+ assertEquals(c.byteLength, 16);
+ assertEquals(d.byteLength, 16)
+ assertEquals(a.byteOffset, 0);
+ assertEquals(b.byteOffset, 0);
+ assertEquals(c.byteOffset, 16);
+ assertEquals(d.byteOffset, 0);
+}
+
+testFloat32x4ArrayViewBasic();
+
+function testFloat32x4ArrayViewValues() {
+ var a = new Float32Array(8);
+ var b = new Float32x4Array(a.buffer, 0);
+ var c = new Float32x4Array(a.buffer, 16);
+ var d = new Float32x4Array(a.buffer, 0, 1);
+ var start = 100;
+ for (var i = 0; i < b.length; i++) {
+ assertEquals(0.0, b[i].x);
+ assertEquals(0.0, b[i].y);
+ assertEquals(0.0, b[i].z);
+ assertEquals(0.0, b[i].w);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertEquals(0.0, c[i].x);
+ assertEquals(0.0, c[i].y);
+ assertEquals(0.0, c[i].z);
+ assertEquals(0.0, c[i].w);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertEquals(0.0, d[i].x);
+ assertEquals(0.0, d[i].y);
+ assertEquals(0.0, d[i].z);
+ assertEquals(0.0, d[i].w);
+ }
+ for (var i = 0; i < a.length; i++) {
+ a[i] = i+start;
+ }
+ for (var i = 0; i < b.length; i++) {
+ assertTrue(0.0 != b[i].x);
+ assertTrue(0.0 != b[i].y);
+ assertTrue(0.0 != b[i].z);
+ assertTrue(0.0 != b[i].w);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertTrue(0.0 != c[i].x);
+ assertTrue(0.0 != c[i].y);
+ assertTrue(0.0 != c[i].z);
+ assertTrue(0.0 != c[i].w);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertTrue(0.0 != d[i].x);
+ assertTrue(0.0 != d[i].y);
+ assertTrue(0.0 != d[i].z);
+ assertTrue(0.0 != d[i].w);
+ }
+ assertEquals(start+0, b[0].x);
+ assertEquals(start+1, b[0].y);
+ assertEquals(start+2, b[0].z);
+ assertEquals(start+3, b[0].w);
+ assertEquals(start+4, b[1].x);
+ assertEquals(start+5, b[1].y);
+ assertEquals(start+6, b[1].z);
+ assertEquals(start+7, b[1].w);
+
+ assertEquals(start+4, c[0].x);
+ assertEquals(start+5, c[0].y);
+ assertEquals(start+6, c[0].z);
+ assertEquals(start+7, c[0].w);
+
+ assertEquals(start+0, d[0].x);
+ assertEquals(start+1, d[0].y);
+ assertEquals(start+2, d[0].z);
+ assertEquals(start+3, d[0].w);
+}
+
+testFloat32x4ArrayViewValues();
+
+function testViewOnFloat32x4Array() {
+ var a = new Float32x4Array(4);
+ a[0] = SIMD.float32x4(1, 2, 3, 4);
+ a[1] = SIMD.float32x4(5, 6, 7, 8);
+ a[2] = SIMD.float32x4(9, 10, 11, 12);
+ a[3] = SIMD.float32x4(13, 14, 15, 16);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 3);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+
+ // Create view on a.
+ var b = new Float32Array(a.buffer);
+ assertEquals(b.length, 16);
+ assertEquals(b.byteLength, 64);
+ b[2] = 99.0;
+ b[6] = 1.0;
+
+ // Observe changes in "a"
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 99);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 1);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+}
+
+testViewOnFloat32x4Array();
+
+function testArrayOfFloat32x4() {
+ var a = [];
+ var a4 = new Float32x4Array(2);
+ for (var i = 0; i < a4.length; i++) {
+ a[i] = SIMD.float32x4(i, i + 1, i + 2, i + 3);
+ a4[i] = SIMD.float32x4(i, i + 1, i + 2, i + 3);
+ }
+
+ for (var i = 0; i < a4.length; i++) {
+ assertEquals(a[i].x, a4[i].x);
+ assertEquals(a[i].y, a4[i].y);
+ assertEquals(a[i].z, a4[i].z);
+ assertEquals(a[i].w, a4[i].w);
+ }
+}
+
+testArrayOfFloat32x4();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testConstructor() {
+ var f4 = SIMD.float64x2(1.0, 2.0);
+ assertEquals(1.0, f4.x);
+ assertEquals(2.0, f4.y);
+
+ f4 = SIMD.float64x2(1.1, 2.2);
+ assertEquals(1.1, f4.x);
+ assertEquals(2.2, f4.y);
+}
+
+testConstructor();
+testConstructor();
+%OptimizeFunctionOnNextCall(testConstructor);
+testConstructor();
+
+function testZeroConstructor() {
+ var z4 = SIMD.float64x2.zero();
+ assertEquals(0.0, z4.x);
+ assertEquals(0.0, z4.y);
+}
+
+testZeroConstructor();
+testZeroConstructor();
+%OptimizeFunctionOnNextCall(testZeroConstructor);
+testZeroConstructor();
+
+function testSplatConstructor() {
+ var z4 = SIMD.float64x2.splat(5.0);
+ assertEquals(5.0, z4.x);
+ assertEquals(5.0, z4.y);
+}
+
+testSplatConstructor();
+testSplatConstructor();
+%OptimizeFunctionOnNextCall(testSplatConstructor);
+testSplatConstructor();
+
+function testTypeof() {
+ var z4 = SIMD.float64x2.zero();
+ assertEquals(typeof(z4), "object");
+
+ var new_z4 = new SIMD.float64x2(0, 0);
+ assertEquals(typeof(new_z4), "object");
+ assertEquals(typeof(new_z4.valueOf()), "object");
+ assertEquals(Object.prototype.toString.call(new_z4), "[object Object]");
+}
+
+testTypeof();
+
+function testSignMaskGetter() {
+ var a = SIMD.float64x2(-1.0, -2.0);
+ assertEquals(0x3, a.signMask);
+ var b = SIMD.float64x2(1.0, 2.0);
+ assertEquals(0x0, b.signMask);
+ var c = SIMD.float64x2(1.0, -2.0);
+ assertEquals(0x2, c.signMask);
+}
+
+testSignMaskGetter();
+testSignMaskGetter();
+%OptimizeFunctionOnNextCall(testSignMaskGetter);
+testSignMaskGetter();
+
+function testSIMDAbs() {
+ var a4 = SIMD.float64x2(1.0, -1.0);
+ var b4 = SIMD.float64x2.abs(a4);
+
+ assertEquals(1.0, b4.x);
+ assertEquals(1.0, b4.y);
+}
+
+testSIMDAbs();
+testSIMDAbs();
+%OptimizeFunctionOnNextCall(testSIMDAbs);
+testSIMDAbs();
+
+function testSIMDNeg() {
+ var a4 = SIMD.float64x2(1.0, -1.0);
+ var b4 = SIMD.float64x2.neg(a4);
+
+ assertEquals(-1.0, b4.x);
+ assertEquals(1.0, b4.y);
+}
+
+testSIMDNeg();
+testSIMDNeg();
+%OptimizeFunctionOnNextCall(testSIMDNeg);
+testSIMDNeg();
+
+function testSIMDAdd() {
+ var a4 = SIMD.float64x2(1.0, 1.0);
+ var b4 = SIMD.float64x2(2.0, 2.0);
+ var c4 = SIMD.float64x2.add(a4, b4);
+
+ assertEquals(3.0, c4.x);
+ assertEquals(3.0, c4.y);
+}
+
+testSIMDAdd();
+testSIMDAdd();
+%OptimizeFunctionOnNextCall(testSIMDAdd);
+testSIMDAdd();
+
+function testSIMDSub() {
+ var a4 = SIMD.float64x2(1.0, 1.0);
+ var b4 = SIMD.float64x2(2.0, 2.0);
+ var c4 = SIMD.float64x2.sub(a4, b4);
+
+ assertEquals(-1.0, c4.x);
+ assertEquals(-1.0, c4.y);
+}
+
+testSIMDSub();
+testSIMDSub();
+%OptimizeFunctionOnNextCall(testSIMDSub);
+testSIMDSub();
+
+function testSIMDMul() {
+ var a4 = SIMD.float64x2(1.0, 1.0);
+ var b4 = SIMD.float64x2(2.0, 2.0);
+ var c4 = SIMD.float64x2.mul(a4, b4);
+
+ assertEquals(2.0, c4.x);
+ assertEquals(2.0, c4.y);
+}
+
+testSIMDMul();
+testSIMDMul();
+%OptimizeFunctionOnNextCall(testSIMDMul);
+testSIMDMul();
+
+function testSIMDDiv() {
+ var a4 = SIMD.float64x2(1.0, 1.0);
+ var b4 = SIMD.float64x2(2.0, 2.0);
+ var c4 = SIMD.float64x2.div(a4, b4);
+
+ assertEquals(0.5, c4.x);
+ assertEquals(0.5, c4.y);
+}
+
+testSIMDDiv();
+testSIMDDiv();
+%OptimizeFunctionOnNextCall(testSIMDDiv);
+testSIMDDiv();
+
+function testSIMDClamp() {
+ var m = SIMD.float64x2(1.0, -2.0);
+ var lo = SIMD.float64x2(0.0, 0.0);
+ var hi = SIMD.float64x2(2.0, 2.0);
+ m = SIMD.float64x2.clamp(m, lo, hi);
+ assertEquals(1.0, m.x);
+ assertEquals(0.0, m.y);
+}
+
+testSIMDClamp();
+testSIMDClamp();
+%OptimizeFunctionOnNextCall(testSIMDClamp);
+testSIMDClamp();
+
+function testSIMDMin() {
+ var m = SIMD.float64x2(1.0, 2.0);
+ var n = SIMD.float64x2(1.0, 0.0);
+ m = SIMD.float64x2.min(m, n);
+ assertEquals(1.0, m.x);
+ assertEquals(0.0, m.y);
+}
+
+testSIMDMin();
+testSIMDMin();
+%OptimizeFunctionOnNextCall(testSIMDMin);
+testSIMDMin();
+
+function testSIMDMax() {
+ var m = SIMD.float64x2(1.0, 2.0);
+ var n = SIMD.float64x2(1.0, 0.0);
+ m = SIMD.float64x2.max(m, n);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+}
+
+testSIMDMax();
+testSIMDMax();
+%OptimizeFunctionOnNextCall(testSIMDMax);
+testSIMDMax();
+
+function testSIMDScale() {
+ var m = SIMD.float64x2(1.0, -2.0);
+ m = SIMD.float64x2.scale(m, 20.0);
+ assertEquals(20.0, m.x);
+ assertEquals(-40.0, m.y);
+}
+
+testSIMDScale();
+testSIMDScale();
+%OptimizeFunctionOnNextCall(testSIMDScale);
+testSIMDScale();
+
+function testSIMDSqrt() {
+ var m = SIMD.float64x2(1.0, 4.0);
+ m = SIMD.float64x2.sqrt(m);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+}
+
+testSIMDSqrt();
+testSIMDSqrt();
+%OptimizeFunctionOnNextCall(testSIMDSqrt);
+testSIMDSqrt();
+
+function testSIMDSetters() {
+ var f = SIMD.float64x2.zero();
+ assertEquals(0.0, f.x);
+ assertEquals(0.0, f.y);
+ f = SIMD.float64x2.withX(f, 4.0);
+ assertEquals(4.0, f.x);
+ f = SIMD.float64x2.withY(f, 3.0);
+ assertEquals(3.0, f.y);
+}
+
+testSIMDSetters();
+testSIMDSetters();
+%OptimizeFunctionOnNextCall(testSIMDSetters);
+testSIMDSetters();
+
+function testFloat64x2ArrayBasic() {
+ var a = new Float64x2Array(1);
+ assertEquals(1, a.length);
+ assertEquals(16, a.byteLength);
+ assertEquals(16, a.BYTES_PER_ELEMENT);
+ assertEquals(16, Float64x2Array.BYTES_PER_ELEMENT);
+ assertEquals(0, a.byteOffset);
+ assertTrue(undefined != a.buffer);
+ var b = new Float64x2Array(4);
+ assertEquals(4, b.length);
+ assertEquals(64, b.byteLength);
+ assertEquals(16, b.BYTES_PER_ELEMENT);
+ assertEquals(16, Float64x2Array.BYTES_PER_ELEMENT);
+ assertEquals(0, b.byteOffset);
+ assertTrue(undefined != b.buffer);
+}
+
+testFloat64x2ArrayBasic();
+
+function testFloat64x2ArrayGetAndSet() {
+ var a = new Float64x2Array(4);
+ a[0] = SIMD.float64x2(1, 2);
+ a[1] = SIMD.float64x2(5, 6);
+ a[2] = SIMD.float64x2(9, 10);
+ a[3] = SIMD.float64x2(13, 14);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+
+ var b = new Float64x2Array(4);
+ b.setAt(0,SIMD.float64x2(1, 2));
+ b.setAt(1,SIMD.float64x2(5, 6));
+ b.setAt(2,SIMD.float64x2(9, 10));
+ b.setAt(3,SIMD.float64x2(13, 14));
+
+ assertEquals(b.getAt(0).x, 1);
+ assertEquals(b.getAt(0).y, 2);
+
+ assertEquals(b.getAt(1).x, 5);
+ assertEquals(b.getAt(1).y, 6);
+
+ assertEquals(b.getAt(2).x, 9);
+ assertEquals(b.getAt(2).y, 10);
+
+ assertEquals(b.getAt(3).x, 13);
+ assertEquals(b.getAt(3).y, 14);
+}
+
+testFloat64x2ArrayGetAndSet();
+
+function testFloat64x2ArraySwap() {
+ var a = new Float64x2Array(4);
+ a[0] = SIMD.float64x2(1, 2);
+ a[1] = SIMD.float64x2(5, 6);
+ a[2] = SIMD.float64x2(9, 10);
+ a[3] = SIMD.float64x2(13, 14);
+
+ // Swap element 0 and element 3
+ var t = a[0];
+ a[0] = a[3];
+ a[3] = t;
+
+ assertEquals(a[3].x, 1);
+ assertEquals(a[3].y, 2);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+
+ assertEquals(a[0].x, 13);
+ assertEquals(a[0].y, 14);
+}
+
+testFloat64x2ArraySwap();
+
+function testFloat64x2ArrayCopy() {
+ var a = new Float64x2Array(4);
+ a[0] = SIMD.float64x2(1, 2);
+ a[1] = SIMD.float64x2(5, 6);
+ a[2] = SIMD.float64x2(9, 10);
+ a[3] = SIMD.float64x2(13, 14);
+ var b = new Float64x2Array(a);
+ assertEquals(a[0].x, b[0].x);
+ assertEquals(a[0].y, b[0].y);
+
+ assertEquals(a[1].x, b[1].x);
+ assertEquals(a[1].y, b[1].y);
+
+ assertEquals(a[2].x, b[2].x);
+ assertEquals(a[2].y, b[2].y);
+
+ assertEquals(a[3].x, b[3].x);
+ assertEquals(a[3].y, b[3].y);
+
+ a[2] = SIMD.float64x2(17, 18);
+
+ assertEquals(a[2].x, 17);
+ assertEquals(a[2].y, 18);
+
+ assertTrue(a[2].x != b[2].x);
+ assertTrue(a[2].y != b[2].y);
+}
+
+testFloat64x2ArrayCopy();
+
+function testFloat64x2ArrayViewBasic() {
+ var a = new Float64Array(8);
+ // view with no offset.
+ var b = new Float64x2Array(a.buffer, 0);
+ // view with offset.
+ var c = new Float64x2Array(a.buffer, 16);
+ // view with no offset but shorter than original list.
+ var d = new Float64x2Array(a.buffer, 0, 1);
+ assertEquals(a.length, 8);
+ assertEquals(b.length, 4);
+ assertEquals(c.length, 3);
+ assertEquals(d.length, 1);
+ assertEquals(a.byteLength, 64);
+ assertEquals(b.byteLength, 64);
+ assertEquals(c.byteLength, 48);
+ assertEquals(d.byteLength, 16)
+ assertEquals(a.byteOffset, 0);
+ assertEquals(b.byteOffset, 0);
+ assertEquals(c.byteOffset, 16);
+ assertEquals(d.byteOffset, 0);
+}
+
+testFloat64x2ArrayViewBasic();
+
+function testFloat64x2ArrayViewValues() {
+ var a = new Float64Array(8);
+ var b = new Float64x2Array(a.buffer, 0);
+ var c = new Float64x2Array(a.buffer, 16);
+ var d = new Float64x2Array(a.buffer, 0, 1);
+ var start = 100;
+ for (var i = 0; i < b.length; i++) {
+ assertEquals(0.0, b[i].x);
+ assertEquals(0.0, b[i].y);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertEquals(0.0, c[i].x);
+ assertEquals(0.0, c[i].y);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertEquals(0.0, d[i].x);
+ assertEquals(0.0, d[i].y);
+ }
+ for (var i = 0; i < a.length; i++) {
+ a[i] = i+start;
+ }
+ for (var i = 0; i < b.length; i++) {
+ assertTrue(0.0 != b[i].x);
+ assertTrue(0.0 != b[i].y);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertTrue(0.0 != c[i].x);
+ assertTrue(0.0 != c[i].y);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertTrue(0.0 != d[i].x);
+ assertTrue(0.0 != d[i].y);
+ }
+ assertEquals(start+0, b[0].x);
+ assertEquals(start+1, b[0].y);
+ assertEquals(start+2, b[1].x);
+ assertEquals(start+3, b[1].y);
+ assertEquals(start+4, b[2].x);
+ assertEquals(start+5, b[2].y);
+ assertEquals(start+6, b[3].x);
+ assertEquals(start+7, b[3].y);
+
+ assertEquals(start+2, c[0].x);
+ assertEquals(start+3, c[0].y);
+ assertEquals(start+4, c[1].x);
+ assertEquals(start+5, c[1].y);
+ assertEquals(start+6, c[2].x);
+ assertEquals(start+7, c[2].y);
+
+ assertEquals(start+0, d[0].x);
+ assertEquals(start+1, d[0].y);
+}
+
+testFloat64x2ArrayViewValues();
+
+function testViewOnFloat64x2Array() {
+ var a = new Float64x2Array(4);
+ a[0] = SIMD.float64x2(1, 2);
+ a[1] = SIMD.float64x2(5, 6);
+ a[2] = SIMD.float64x2(9, 10);
+ a[3] = SIMD.float64x2(13, 14);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+
+ // Create view on a.
+ var b = new Float64Array(a.buffer);
+ assertEquals(b.length, 8);
+ assertEquals(b.byteLength, 64);
+ b[2] = 99.0;
+ b[6] = 1.0;
+
+ // Observe changes in "a"
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+
+ assertEquals(a[1].x, 99.0);
+ assertEquals(a[1].y, 6);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+
+ assertEquals(a[3].x, 1.0);
+ assertEquals(a[3].y, 14);
+}
+
+testViewOnFloat64x2Array();
+
+function testArrayOfFloat64x2() {
+ var a = [];
+ var a4 = new Float64x2Array(2);
+ for (var i = 0; i < a4.length; i++) {
+ a[i] = SIMD.float64x2(i, i + 1);
+ a4[i] = SIMD.float64x2(i, i + 1);
+ }
+
+ for (var i = 0; i < a4.length; i++) {
+ assertEquals(a[i].x, a4[i].x);
+ assertEquals(a[i].y, a4[i].y);
+ }
+}
+
+testArrayOfFloat64x2();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testConstructor() {
+ var u4 = SIMD.int32x4(1, 2, 3, 4);
+ assertEquals(1, u4.x);
+ assertEquals(2, u4.y);
+ assertEquals(3, u4.z);
+ assertEquals(4, u4.w);
+}
+
+testConstructor();
+
+function testZeroConstructor() {
+ var u4 = SIMD.int32x4.zero();
+ assertEquals(0, u4.x);
+ assertEquals(0, u4.y);
+ assertEquals(0, u4.z);
+ assertEquals(0, u4.w);
+}
+
+testZeroConstructor();
+testZeroConstructor();
+%OptimizeFunctionOnNextCall(testZeroConstructor);
+testZeroConstructor();
+
+function testBoolConstructor() {
+ var u4 = SIMD.int32x4.bool(true, false, true, false);
+ assertEquals(-1, u4.x);
+ assertEquals(0, u4.y);
+ assertEquals(-1, u4.z);
+ assertEquals(0, u4.w);
+}
+
+testBoolConstructor();
+testBoolConstructor();
+%OptimizeFunctionOnNextCall(testBoolConstructor);
+testBoolConstructor();
+
+function testSplatConstructor() {
+ var u4 = SIMD.int32x4.splat(4);
+ assertEquals(4, u4.x);
+ assertEquals(4, u4.y);
+ assertEquals(4, u4.z);
+ assertEquals(4, u4.w);
+}
+
+testSplatConstructor();
+testSplatConstructor();
+%OptimizeFunctionOnNextCall(testSplatConstructor);
+testSplatConstructor();
+
+function testTypeof() {
+ var u4 = SIMD.int32x4(1, 2, 3, 4);
+ assertEquals(typeof(u4), "object");
+
+ var new_u4 = new SIMD.int32x4(1, 2, 3, 4);
+ assertEquals(typeof(new_u4), "object");
+ assertEquals(typeof(new_u4.valueOf()), "object");
+ assertEquals(Object.prototype.toString.call(new_u4), "[object Object]");
+}
+
+testTypeof();
+
+function testSignMaskGetter() {
+ var a = SIMD.int32x4(0x80000000 - 0xFFFFFFFF - 1, 0x7000000, -1, 0x0);
+ assertEquals(0x5, a.signMask);
+ var b = SIMD.int32x4(0x0, 0x0, 0x0, 0x0);
+ assertEquals(0x0, b.signMask);
+ var c = SIMD.int32x4(-1, -1, -1, -1);
+ assertEquals(0xf, c.signMask);
+}
+
+testSignMaskGetter();
+testSignMaskGetter();
+%OptimizeFunctionOnNextCall(testSignMaskGetter);
+testSignMaskGetter();
+
+
+function testSIMDAnd() {
+ var m = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var n = SIMD.int32x4(0x55555555, 0x55555555, 0x55555555, 0x55555555);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, m.x);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, m.y);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, m.z);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, m.w);
+ assertEquals(0x55555555, n.x);
+ assertEquals(0x55555555, n.y);
+ assertEquals(0x55555555, n.z);
+ assertEquals(0x55555555, n.w);
+ assertEquals(true, n.flagX);
+ assertEquals(true, n.flagY);
+ assertEquals(true, n.flagZ);
+ assertEquals(true, n.flagW);
+ o = SIMD.int32x4.and(m,n); // and
+ assertEquals(0x0, o.x);
+ assertEquals(0x0, o.y);
+ assertEquals(0x0, o.z);
+ assertEquals(0x0, o.w);
+ assertEquals(false, o.flagX);
+ assertEquals(false, o.flagY);
+ assertEquals(false, o.flagZ);
+ assertEquals(false, o.flagW);
+}
+
+testSIMDAnd();
+testSIMDAnd();
+%OptimizeFunctionOnNextCall(testSIMDAnd);
+testSIMDAnd();
+
+function testSIMDOr() {
+ var m = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var n = SIMD.int32x4(0x55555555, 0x55555555, 0x55555555, 0x55555555);
+ var o = SIMD.int32x4.or(m,n); // or
+ assertEquals(-1, o.x);
+ assertEquals(-1, o.y);
+ assertEquals(-1, o.z);
+ assertEquals(-1, o.w);
+ assertEquals(true, o.flagX);
+ assertEquals(true, o.flagY);
+ assertEquals(true, o.flagZ);
+ assertEquals(true, o.flagW);
+}
+
+testSIMDOr();
+testSIMDOr();
+%OptimizeFunctionOnNextCall(testSIMDOr);
+testSIMDOr();
+
+function testSIMDInt32x4Or() {
+ var m = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var n = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var o = SIMD.int32x4.xor(m,n); // xor
+ assertEquals(0x0, o.x);
+ assertEquals(0x0, o.y);
+ assertEquals(0x0, o.z);
+ assertEquals(0x0, o.w);
+ assertEquals(false, o.flagX);
+ assertEquals(false, o.flagY);
+ assertEquals(false, o.flagZ);
+ assertEquals(false, o.flagW);
+}
+
+testSIMDInt32x4Or();
+testSIMDInt32x4Or();
+%OptimizeFunctionOnNextCall(testSIMDInt32x4Or);
+testSIMDInt32x4Or();
+
+function testSIMDNot() {
+ var m = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var n = SIMD.int32x4(0x55555555, 0x55555555, 0x55555555, 0x55555555);
+ m = SIMD.int32x4.not(m);
+ n = SIMD.int32x4.not(n);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, n.x);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, n.y);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, n.z);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, n.w);
+ assertEquals(0x55555555, m.x);
+ assertEquals(0x55555555, m.y);
+ assertEquals(0x55555555, m.z);
+ assertEquals(0x55555555, m.w);
+}
+
+testSIMDNot();
+testSIMDNot();
+%OptimizeFunctionOnNextCall(testSIMDNot);
+testSIMDNot();
+
+function testSIMDNegu32() {
+ var m = SIMD.int32x4(-1, 1, -1, 1);
+ m = SIMD.int32x4.neg(m);
+ assertEquals(1, m.x);
+ assertEquals(-1, m.y);
+ assertEquals(1, m.z);
+ assertEquals(-1, m.w);
+}
+
+testSIMDNegu32();
+testSIMDNegu32();
+%OptimizeFunctionOnNextCall(testSIMDNegu32);
+testSIMDNegu32();
+
+function testSIMDSelect() {
+ var m = SIMD.int32x4.bool(true, true, false, false);
+ var t = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var f = SIMD.float32x4(5.0, 6.0, 7.0, 8.0);
+ var s = SIMD.int32x4.select(m, t, f);
+ assertEquals(1.0, s.x);
+ assertEquals(2.0, s.y);
+ assertEquals(7.0, s.z);
+ assertEquals(8.0, s.w);
+}
+
+testSIMDSelect();
+testSIMDSelect();
+%OptimizeFunctionOnNextCall(testSIMDSelect);
+testSIMDSelect();
+
+
+function testSIMDWithXu32() {
+ var a = SIMD.int32x4(1, 2, 3, 4);
+ var c = SIMD.int32x4.withX(a, 20);
+ assertEquals(20, c.x);
+ assertEquals(2, c.y);
+ assertEquals(3, c.z);
+ assertEquals(4, c.w);
+}
+
+testSIMDWithXu32();
+testSIMDWithXu32();
+%OptimizeFunctionOnNextCall(testSIMDWithXu32);
+testSIMDWithXu32();
+
+function testSIMDWithYu32() {
+ var a = SIMD.int32x4(1, 2, 3, 4);
+ var c = SIMD.int32x4.withY(a, 20);
+ assertEquals(1, c.x);
+ assertEquals(20, c.y);
+ assertEquals(3, c.z);
+ assertEquals(4, c.w);
+}
+
+testSIMDWithYu32();
+testSIMDWithYu32();
+%OptimizeFunctionOnNextCall(testSIMDWithYu32);
+testSIMDWithYu32();
+
+function testSIMDWithZu32() {
+ var a = SIMD.int32x4(1, 2, 3, 4);
+ var c = SIMD.int32x4.withZ(a, 20);
+ assertEquals(1, c.x);
+ assertEquals(2, c.y);
+ assertEquals(20, c.z);
+ assertEquals(4, c.w);
+}
+
+testSIMDWithZu32();
+testSIMDWithZu32();
+%OptimizeFunctionOnNextCall(testSIMDWithZu32);
+testSIMDWithZu32();
+
+function testSIMDWithWu32() {
+ var a = SIMD.int32x4(1, 2, 3, 4);
+ var c = SIMD.int32x4.withW(a, 20);
+ assertEquals(1, c.x);
+ assertEquals(2, c.y);
+ assertEquals(3, c.z);
+ assertEquals(20, c.w);
+}
+
+testSIMDWithWu32();
+testSIMDWithWu32();
+%OptimizeFunctionOnNextCall(testSIMDWithWu32);
+testSIMDWithWu32();
+
+function testSIMDWithFlagX() {
+ var a = SIMD.int32x4.bool(true, false, true, false);
+
+ // boolean
+ var c = SIMD.int32x4.withFlagX(a, true);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ c = SIMD.int32x4.withFlagX(a, false);
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ // smi
+ c = SIMD.int32x4.withFlagX(a, 2);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+ c = SIMD.int32x4.withFlagX(a, 0);
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ // string
+ c = SIMD.int32x4.withFlagX(a, 'true');
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+ c = SIMD.int32x4.withFlagX(a, '');
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ // heap number
+ c = SIMD.int32x4.withFlagX(a, 3.14);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+ c = SIMD.int32x4.withFlagX(a, 0.0);
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ // JS Array
+ var array = [1];
+ c = SIMD.int32x4.withFlagX(a, array);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ c = SIMD.int32x4.withFlagX(a, undefined);
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDWithFlagX();
+testSIMDWithFlagX();
+%OptimizeFunctionOnNextCall(testSIMDWithFlagX);
+testSIMDWithFlagX();
+
+function testSIMDWithFlagY() {
+ var a = SIMD.int32x4.bool(true, false, true, false);
+ var c = SIMD.int32x4.withFlagY(a, true);
+ assertEquals(true, c.flagX);
+ assertEquals(true, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ c = SIMD.int32x4.withFlagY(a, false);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDWithFlagY();
+testSIMDWithFlagY();
+%OptimizeFunctionOnNextCall(testSIMDWithFlagY);
+testSIMDWithFlagY();
+
+function testSIMDWithFlagZ() {
+ var a = SIMD.int32x4.bool(true, false, true, false);
+ var c = SIMD.int32x4.withFlagZ(a, true);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ c = SIMD.int32x4.withFlagZ(a, false);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(false, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(0x0, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDWithFlagZ();
+testSIMDWithFlagZ();
+%OptimizeFunctionOnNextCall(testSIMDWithFlagZ);
+testSIMDWithFlagZ();
+
+function testSIMDWithFlagW() {
+ var a = SIMD.int32x4.bool(true, false, true, false);
+ var c = SIMD.int32x4.withFlagW(a, true);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(true, c.flagW);
+ c = SIMD.int32x4.withFlagW(a, false);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDWithFlagW();
+testSIMDWithFlagW();
+%OptimizeFunctionOnNextCall(testSIMDWithFlagW);
+testSIMDWithFlagW();
+
+function testSIMDAddu32() {
+ var a = SIMD.int32x4(-1, -1, 0x7fffffff, 0x0);
+ var b = SIMD.int32x4(0x1, -1, 0x1, -1);
+ var c = SIMD.int32x4.add(a, b);
+ assertEquals(0x0, c.x);
+ assertEquals(-2, c.y);
+ assertEquals(0x80000000 - 0xFFFFFFFF - 1, c.z);
+ assertEquals(-1, c.w);
+}
+
+testSIMDAddu32();
+testSIMDAddu32();
+%OptimizeFunctionOnNextCall(testSIMDAddu32);
+testSIMDAddu32();
+
+function testSIMDSubu32() {
+ var a = SIMD.int32x4(-1, -1, 0x80000000 - 0xFFFFFFFF - 1, 0x0);
+ var b = SIMD.int32x4(0x1, -1, 0x1, -1);
+ var c = SIMD.int32x4.sub(a, b);
+ assertEquals(-2, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(0x7FFFFFFF, c.z);
+ assertEquals(0x1, c.w);
+}
+
+testSIMDSubu32();
+testSIMDSubu32();
+%OptimizeFunctionOnNextCall(testSIMDSubu32);
+testSIMDSubu32();
+
+function testSIMDMulu32() {
+ var a = SIMD.int32x4(-1, -1, 0x80000000 - 0xFFFFFFFF - 1, 0x0);
+ var b = SIMD.int32x4(0x1, -1, 0x80000000 - 0xFFFFFFFF - 1, -1);
+ var c = SIMD.int32x4.mul(a, b);
+ assertEquals(-1, c.x);
+ assertEquals(0x1, c.y);
+ assertEquals(0x0, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDMulu32();
+testSIMDMulu32();
+%OptimizeFunctionOnNextCall(testSIMDMulu32);
+testSIMDMulu32();
+
+function testSIMDShuffleu32() {
+ var m = SIMD.int32x4(1, 2, 3, 4);
+ var xxxx = SIMD.int32x4.shuffle(m, SIMD.XXXX);
+ assertEquals(1, xxxx.x);
+ assertEquals(1, xxxx.y);
+ assertEquals(1, xxxx.z);
+ assertEquals(1, xxxx.w);
+ var yyyy = SIMD.int32x4.shuffle(m, SIMD.YYYY);
+ assertEquals(2, yyyy.x);
+ assertEquals(2, yyyy.y);
+ assertEquals(2, yyyy.z);
+ assertEquals(2, yyyy.w);
+ var zzzz = SIMD.int32x4.shuffle(m, SIMD.ZZZZ);
+ assertEquals(3, zzzz.x);
+ assertEquals(3, zzzz.y);
+ assertEquals(3, zzzz.z);
+ assertEquals(3, zzzz.w);
+ var wwww = SIMD.int32x4.shuffle(m, SIMD.WWWW);
+ assertEquals(4, wwww.x);
+ assertEquals(4, wwww.y);
+ assertEquals(4, wwww.z);
+ assertEquals(4, wwww.w);
+ var wzyx = SIMD.int32x4.shuffle(m, SIMD.WZYX);
+ assertEquals(4, wzyx.x);
+ assertEquals(3, wzyx.y);
+ assertEquals(2, wzyx.z);
+ assertEquals(1, wzyx.w);
+ var wwzz = SIMD.int32x4.shuffle(m, SIMD.WWZZ);
+ assertEquals(4, wwzz.x);
+ assertEquals(4, wwzz.y);
+ assertEquals(3, wwzz.z);
+ assertEquals(3, wwzz.w);
+ var xxyy = SIMD.int32x4.shuffle(m, SIMD.XXYY);
+ assertEquals(1, xxyy.x);
+ assertEquals(1, xxyy.y);
+ assertEquals(2, xxyy.z);
+ assertEquals(2, xxyy.w);
+ var yyww = SIMD.int32x4.shuffle(m, SIMD.YYWW);
+ assertEquals(2, yyww.x);
+ assertEquals(2, yyww.y);
+ assertEquals(4, yyww.z);
+ assertEquals(4, yyww.w);
+}
+
+testSIMDShuffleu32();
+testSIMDShuffleu32();
+%OptimizeFunctionOnNextCall(testSIMDShuffleu32);
+testSIMDShuffleu32();
+
+function testSIMDComparisons() {
+ var m = SIMD.int32x4(1, 2, 100, 1);
+ var n = SIMD.int32x4(2, 2, 1, 100);
+ var cmp;
+ cmp = SIMD.int32x4.lessThan(m, n);
+ assertEquals(-1, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(-1, cmp.w);
+
+ cmp = SIMD.int32x4.equal(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(-1, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(0x0, cmp.w);
+
+ cmp = SIMD.int32x4.greaterThan(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(-1, cmp.z);
+ assertEquals(0x0, cmp.w);
+}
+
+testSIMDComparisons();
+testSIMDComparisons();
+%OptimizeFunctionOnNextCall(testSIMDComparisons);
+testSIMDComparisons();
+
+function testSIMDShift() {
+ var m = SIMD.int32x4(1, 2, 100, 0);
+
+ var a = SIMD.int32x4.shiftLeft(m, 2);
+ assertEquals(4, a.x);
+ assertEquals(8, a.y);
+ assertEquals(400, a.z);
+ assertEquals(0, a.w);
+
+ var b = SIMD.int32x4.shiftRight(a, 2);
+ assertEquals(1, b.x);
+ assertEquals(2, b.y);
+ assertEquals(100, b.z);
+ assertEquals(0, b.w);
+
+ var n = SIMD.int32x4(-8, 2, 1, 100);
+
+ var c = SIMD.int32x4.shiftRightArithmetic(n, 2);
+ assertEquals(-2, c.x);
+ assertEquals(0, c.y);
+ assertEquals(0, c.z);
+ assertEquals(25, c.w);
+}
+
+testSIMDShift();
+testSIMDShift();
+%OptimizeFunctionOnNextCall(testSIMDShift);
+testSIMDShift();
+
+function testInt32x4ArrayBasic() {
+ var a = new Int32x4Array(1);
+ assertEquals(1, a.length);
+ assertEquals(16, a.byteLength);
+ assertEquals(16, a.BYTES_PER_ELEMENT);
+ assertEquals(16, Int32x4Array.BYTES_PER_ELEMENT);
+ assertEquals(0, a.byteOffset);
+ assertTrue(undefined != a.buffer);
+ var b = new Int32x4Array(4);
+ assertEquals(4, b.length);
+ assertEquals(64, b.byteLength);
+ assertEquals(16, b.BYTES_PER_ELEMENT);
+ assertEquals(16, Int32x4Array.BYTES_PER_ELEMENT);
+ assertEquals(0, b.byteOffset);
+ assertTrue(undefined != b.buffer);
+}
+
+testInt32x4ArrayBasic();
+
+function testInt32x4ArrayGetAndSet() {
+ var a = new Int32x4Array(4);
+ a[0] = SIMD.int32x4(1, 2, 3, 4);
+ a[1] = SIMD.int32x4(5, 6, 7, 8);
+ a[2] = SIMD.int32x4(9, 10, 11, 12);
+ a[3] = SIMD.int32x4(13, 14, 15, 16);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 3);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+
+ var b = new Int32x4Array(4);
+ b.setAt(0,SIMD.int32x4(1, 2, 3, 4));
+ b.setAt(1,SIMD.int32x4(5, 6, 7, 8));
+ b.setAt(2,SIMD.int32x4(9, 10, 11, 12));
+ b.setAt(3,SIMD.int32x4(13, 14, 15, 16));
+
+ assertEquals(b.getAt(0).x, 1);
+ assertEquals(b.getAt(0).y, 2);
+ assertEquals(b.getAt(0).z, 3);
+ assertEquals(b.getAt(0).w, 4);
+
+ assertEquals(b.getAt(1).x, 5);
+ assertEquals(b.getAt(1).y, 6);
+ assertEquals(b.getAt(1).z, 7);
+ assertEquals(b.getAt(1).w, 8);
+
+ assertEquals(b.getAt(2).x, 9);
+ assertEquals(b.getAt(2).y, 10);
+ assertEquals(b.getAt(2).z, 11);
+ assertEquals(b.getAt(2).w, 12);
+
+ assertEquals(b.getAt(3).x, 13);
+ assertEquals(b.getAt(3).y, 14);
+ assertEquals(b.getAt(3).z, 15);
+ assertEquals(b.getAt(3).w, 16);
+}
+
+testInt32x4ArrayGetAndSet();
+
+function testInt32x4ArraySwap() {
+ var a = new Int32x4Array(4);
+ a[0] = SIMD.int32x4(1, 2, 3, 4);
+ a[1] = SIMD.int32x4(5, 6, 7, 8);
+ a[2] = SIMD.int32x4(9, 10, 11, 12);
+ a[3] = SIMD.int32x4(13, 14, 15, 16);
+
+ // Swap element 0 and element 3
+ var t = a[0];
+ a[0] = a[3];
+ a[3] = t;
+
+ assertEquals(a[3].x, 1);
+ assertEquals(a[3].y, 2);
+ assertEquals(a[3].z, 3);
+ assertEquals(a[3].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[0].x, 13);
+ assertEquals(a[0].y, 14);
+ assertEquals(a[0].z, 15);
+ assertEquals(a[0].w, 16);
+}
+
+testInt32x4ArraySwap();
+testInt32x4ArraySwap();
+%OptimizeFunctionOnNextCall(testInt32x4ArraySwap);
+testInt32x4ArraySwap();
+
+function testInt32x4ArrayCopy() {
+ var a = new Int32x4Array(4);
+ a[0] = SIMD.int32x4(1, 2, 3, 4);
+ a[1] = SIMD.int32x4(5, 6, 7, 8);
+ a[2] = SIMD.int32x4(9, 10, 11, 12);
+ a[3] = SIMD.int32x4(13, 14, 15, 16);
+ var b = new Int32x4Array(a);
+ assertEquals(a[0].x, b[0].x);
+ assertEquals(a[0].y, b[0].y);
+ assertEquals(a[0].z, b[0].z);
+ assertEquals(a[0].w, b[0].w);
+
+ assertEquals(a[1].x, b[1].x);
+ assertEquals(a[1].y, b[1].y);
+ assertEquals(a[1].z, b[1].z);
+ assertEquals(a[1].w, b[1].w);
+
+ assertEquals(a[2].x, b[2].x);
+ assertEquals(a[2].y, b[2].y);
+ assertEquals(a[2].z, b[2].z);
+ assertEquals(a[2].w, b[2].w);
+
+ assertEquals(a[3].x, b[3].x);
+ assertEquals(a[3].y, b[3].y);
+ assertEquals(a[3].z, b[3].z);
+ assertEquals(a[3].w, b[3].w);
+
+ a[2] = SIMD.int32x4(17, 18, 19, 20);
+
+ assertEquals(a[2].x, 17);
+ assertEquals(a[2].y, 18);
+ assertEquals(a[2].z, 19);
+ assertEquals(a[2].w, 20);
+
+ assertTrue(a[2].x != b[2].x);
+ assertTrue(a[2].y != b[2].y);
+ assertTrue(a[2].z != b[2].z);
+ assertTrue(a[2].w != b[2].w);
+}
+
+testInt32x4ArrayCopy();
+
+function testInt32x4ArrayViewBasic() {
+ var a = new Uint32Array(8);
+ // view with no offset.
+ var b = new Int32x4Array(a.buffer, 0);
+ // view with offset.
+ var c = new Int32x4Array(a.buffer, 16);
+ // view with no offset but shorter than original list.
+ var d = new Int32x4Array(a.buffer, 0, 1);
+ assertEquals(a.length, 8);
+ assertEquals(b.length, 2);
+ assertEquals(c.length, 1);
+ assertEquals(d.length, 1);
+ assertEquals(a.byteLength, 32);
+ assertEquals(b.byteLength, 32);
+ assertEquals(c.byteLength, 16);
+ assertEquals(d.byteLength, 16)
+ assertEquals(a.byteOffset, 0);
+ assertEquals(b.byteOffset, 0);
+ assertEquals(c.byteOffset, 16);
+ assertEquals(d.byteOffset, 0);
+}
+
+testInt32x4ArrayViewBasic();
+
+function testInt32x4ArrayViewValues() {
+ var a = new Uint32Array(8);
+ var b = new Int32x4Array(a.buffer, 0);
+ var c = new Int32x4Array(a.buffer, 16);
+ var d = new Int32x4Array(a.buffer, 0, 1);
+ var start = 100;
+ for (var i = 0; i < b.length; i++) {
+ assertEquals(0, b[i].x);
+ assertEquals(0, b[i].y);
+ assertEquals(0, b[i].z);
+ assertEquals(0, b[i].w);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertEquals(0, c[i].x);
+ assertEquals(0, c[i].y);
+ assertEquals(0, c[i].z);
+ assertEquals(0, c[i].w);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertEquals(0, d[i].x);
+ assertEquals(0, d[i].y);
+ assertEquals(0, d[i].z);
+ assertEquals(0, d[i].w);
+ }
+ for (var i = 0; i < a.length; i++) {
+ a[i] = i+start;
+ }
+ for (var i = 0; i < b.length; i++) {
+ assertTrue(0 != b[i].x);
+ assertTrue(0 != b[i].y);
+ assertTrue(0 != b[i].z);
+ assertTrue(0 != b[i].w);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertTrue(0 != c[i].x);
+ assertTrue(0 != c[i].y);
+ assertTrue(0 != c[i].z);
+ assertTrue(0 != c[i].w);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertTrue(0 != d[i].x);
+ assertTrue(0 != d[i].y);
+ assertTrue(0 != d[i].z);
+ assertTrue(0 != d[i].w);
+ }
+ assertEquals(start+0, b[0].x);
+ assertEquals(start+1, b[0].y);
+ assertEquals(start+2, b[0].z);
+ assertEquals(start+3, b[0].w);
+ assertEquals(start+4, b[1].x);
+ assertEquals(start+5, b[1].y);
+ assertEquals(start+6, b[1].z);
+ assertEquals(start+7, b[1].w);
+
+ assertEquals(start+4, c[0].x);
+ assertEquals(start+5, c[0].y);
+ assertEquals(start+6, c[0].z);
+ assertEquals(start+7, c[0].w);
+
+ assertEquals(start+0, d[0].x);
+ assertEquals(start+1, d[0].y);
+ assertEquals(start+2, d[0].z);
+ assertEquals(start+3, d[0].w);
+}
+
+testInt32x4ArrayViewValues();
+
+function testViewOnInt32x4Array() {
+ var a = new Int32x4Array(4);
+ a[0] = SIMD.int32x4(1, 2, 3, 4);
+ a[1] = SIMD.int32x4(5, 6, 7, 8);
+ a[2] = SIMD.int32x4(9, 10, 11, 12);
+ a[3] = SIMD.int32x4(13, 14, 15, 16);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 3);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+
+ // Create view on a.
+ var b = new Uint32Array(a.buffer);
+ assertEquals(b.length, 16);
+ assertEquals(b.byteLength, 64);
+ b[2] = 99.0;
+ b[6] = 1.0;
+
+ // Observe changes in "a"
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 99);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 1);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+}
+
+testViewOnInt32x4Array();
+
+function testArrayOfInt32x4() {
+ var a = [];
+ var a4 = new Int32x4Array(2);
+ for (var i = 0; i < a4.length; i++) {
+ a[i] = SIMD.int32x4(i, i + 1, i + 2, i + 3);
+ a4[i] = SIMD.int32x4(i, i + 1, i + 2, i + 3);
+ }
+
+ for (var i = 0; i < a4.length; i++) {
+ assertEquals(a[i].x, a4[i].x);
+ assertEquals(a[i].y, a4[i].y);
+ assertEquals(a[i].z, a4[i].z);
+ assertEquals(a[i].w, a4[i].w);
+ }
+}
+
+testArrayOfInt32x4();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object
+
+function testSIMDAbs() {
+ var a4 = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ var b4;
+ for (var i = 0; i < 100000; i++) {
+ b4 = SIMD.float32x4.abs(a4);
+ }
+
+ assertEquals(1.0, b4.x);
+ assertEquals(2.0, b4.y);
+ assertEquals(3.0, b4.z);
+ assertEquals(4.0, b4.w);
+}
+
+testSIMDAbs();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testFloat32x4Prototype() {
+ var a4 = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ SIMD.float32x4.prototype = {};
+ try {
+ var x = a4.x;
+ } catch (o) {
+ assertEquals(o instanceof TypeError, true);
+ assertEquals(o.message, "<unknown message this is not a float32x4 object.>");
+ }
+}
+
+testFloat32x4Prototype();
+testFloat32x4Prototype();
+%OptimizeFunctionOnNextCall(testFloat32x4Prototype);
+testFloat32x4Prototype();
+
+function testInt32x4Prototype() {
+ var a4 = SIMD.int32x4(1.0, -2.0, 3.0, -4.0);
+ SIMD.int32x4.prototype = {};
+ try {
+ var x = a4.x;
+ } catch (o) {
+ assertEquals(o instanceof TypeError, true);
+ assertEquals(o.message, "<unknown message this is not a int32x4 object.>");
+ }
+}
+
+testInt32x4Prototype();
+testInt32x4Prototype();
+%OptimizeFunctionOnNextCall(testInt32x4Prototype);
+testInt32x4Prototype();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testSIMDAbs(i) {
+ var a;
+ if (i < 3) {
+ a = SIMD.float32x4(1, 1, 1, 1);
+ } else {
+ a = SIMD.int32x4(2, 2, 2, 2);
+ }
+ return SIMD.float32x4.abs(a);
+}
+
+function tryTestSIMDAbs(i) {
+ var r = 0;
+ try {
+ r = testSIMDAbs(i);
+ } catch (o) {
+ assertEquals(o instanceof TypeError, true);
+ assertEquals(o.message, "<unknown message this is not a float32x4 object.>");
+ }
+}
+
+tryTestSIMDAbs(1);
+tryTestSIMDAbs(2);
+%OptimizeFunctionOnNextCall(testSIMDAbs);
+tryTestSIMDAbs(3);
# that the parser doesn't bit-rot. Change the values as needed when you add,
# remove or change runtime functions, but make sure we don't lose our ability
# to parse them!
-EXPECTED_FUNCTION_COUNT = 358
-EXPECTED_FUZZABLE_COUNT = 326
+EXPECTED_FUNCTION_COUNT = 373
+EXPECTED_FUZZABLE_COUNT = 335
EXPECTED_CCTEST_COUNT = 6
-EXPECTED_UNKNOWN_COUNT = 4
-EXPECTED_BUILTINS_COUNT = 798
+EXPECTED_UNKNOWN_COUNT = 10
+EXPECTED_BUILTINS_COUNT = 918
# Don't call these at all.
fallback="new DataView(new ArrayBuffer(8))")
return result
+ def _Float32x4(self, name, recursion_budget):
+ x = random.random()
+ y = random.random()
+ z = random.random()
+ w = random.random()
+ return self._Variable(name, "SIMD.float32x4(%s, %s, %s, %s)" %(x, y, z, w))
+
+ def _Float64x2(self, name, recursion_budget):
+ x = random.random()
+ y = random.random()
+ return self._Variable(name, "SIMD.float64x2(%s, %s)" %(x, y))
+
+ def _Int32x4(self, name, recursion_budget):
+ x = random.randint(-0x40000000, 0x3fffffff)
+ y = random.randint(-0x40000000, 0x3fffffff)
+ z = random.randint(-0x40000000, 0x3fffffff)
+ w = random.randint(-0x40000000, 0x3fffffff)
+ return self._Variable(name, "SIMD.int32x4(%s, %s, %s, %s)" %(x, y, z, w))
+
def _JSDate(self, name, recursion_budget):
die = random.random()
if die < 0.25:
"String": ["\"foo\"", _String],
"Symbol": ["Symbol(\"symbol\")", _Symbol],
"Uint32": ["32", _Uint32],
+ "Float32x4": ["SIMD.float32x4(0.0, 0.0, 0.0, 0.0)", _Float32x4],
+ "Float64x2": ["SIMD.float64x2(0.0, 0.0)", _Float64x2],
+ "Int32x4": ["SIMD.int32x4(0, 0, 0, 0)", _Int32x4],
}
s = ["// Copyright 2014 the V8 project authors. All rights reserved.",
"// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY",
"// Flags: --allow-natives-syntax --harmony"] + definitions
+ if function.name.find("Float32x4") != -1 or function.name.find("Float64x2") != -1 or function.name.find("Int32x4") != -1:
+ s = ["// Copyright 2014 the V8 project authors. All rights reserved.",
+ "// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY",
+ "// Flags: --allow-natives-syntax --harmony --simd-object"] + definitions
call = "%%%s%s(%s);" % (function.inline, function.name, ", ".join(argslist))
if throws:
s.append("try {")
'../../src/array-iterator.js',
'../../src/harmony-string.js',
'../../src/harmony-array.js',
- '../../src/harmony-math.js'
+ '../../src/harmony-math.js',
+ '../../src/simd128.js',
],
},
'actions': [
if macro_filename:
(consts, macros) = ReadMacros(ReadFile(macro_filename))
filter_chain.append(lambda l: ExpandConstants(l, consts))
+ filter_chain.append(lambda l: ExpandInlineMacros(l))
filter_chain.append(lambda l: ExpandMacros(l, macros))
filter_chain.extend([
RemoveCommentsAndTrailingWhitespace,
- ExpandInlineMacros,
Validate,
jsmin.JavaScriptMinifier().JSMinify
])
chromium_crosswalk_rev = 'b93afe0192aa9888d1246e8933d31272afecb7f0'
blink_crosswalk_rev = 'b656b39cc2eb71a9f4b70f8439c2d0a1ca54d619'
-v8_crosswalk_rev = '8baa2b8fb1a66d5842294840aed969164c52d978'
+v8_crosswalk_rev = '402ca0b81254dd9d7b376485d4582c6b6eac2185'
ozone_wayland_rev = '0a8caf9bc740d767464b2d1d16fec08ff2f91d1f'
crosswalk_git = 'https://github.com/crosswalk-project'
MAJOR=8
MINOR=37
-BUILD=183
+BUILD=186
PATCH=0
self.fullscreen_flag = ''
self.icon = ''
# android_name is only composed of alphabetic characters,
- # generated from the last segment of input package name.
- # It will be used for Android project name,
+ # it will be used for Android project name,
# APK file name and Activity name.
self.android_name = 'AppTemplate'
self.orientation = 'unspecified'
from xml.dom import minidom
-def VerifyPackageName(value):
- regex = r'^[a-z][a-z0-9_]*(\.[a-z][a-z0-9_]*)+$'
- descrpt = 'Each part of package'
- sample = 'org.xwalk.example, org.xwalk.example_'
+def VerifyAppName(value, mode='default'):
+ descrpt = 'The app'
+ sample = 'helloworld, hello world, hello_world, hello_world1'
+ regex = r'[a-zA-Z][\w ]*$'
if len(value) >= 128:
print('To be safe, the length of package name or app name '
'should be less than 128.')
sys.exit(6)
+ if mode == 'packagename':
+ regex = r'^[a-z][a-z0-9_]*(\.[a-z][a-z0-9_]*)+$'
+ descrpt = 'Each part of package'
+ sample = 'org.xwalk.example, org.xwalk.example_'
if not re.match(regex, value):
print('Error: %s name should be started with letters and should not '
orientation = app_info.orientation
package = app_info.package
app_name = app_info.app_name
- # Chinese character with unicode get from 'manifest.json' will cause
- # 'UnicodeEncodeError' when finally wrote to 'AndroidManifest.xml'.
- if isinstance(app_name, unicode):
- app_name = app_name.encode("utf-8")
- # If string start with '@' or '?', it will be treated as Android resource,
- # which will cause 'No resource found' error,
- # append a space before '@' or '?' to fix that.
- if app_name.startswith('@') or app_name.startswith('?'):
- app_name = ' ' + app_name
manifest_path = os.path.join(name, 'AndroidManifest.xml')
if not os.path.isfile(manifest_path):
print ('Please make sure AndroidManifest.xml'
sys.path.append('scripts/gyp')
from app_info import AppInfo
-from customize import VerifyPackageName, CustomizeAll, \
- ParseParameterForCompressor
+from customize import VerifyAppName, CustomizeAll, \
+ ParseParameterForCompressor, ReplaceSpaceWithUnderscore
from dex import AddExeExtensions
from handle_permissions import permission_mapping_table
from manifest_json_parser import HandlePermissionList
def Customize(options, app_info, manifest):
app_info.package = options.package
app_info.app_name = options.name
- # 'org.xwalk.my_first_app' => 'MyFirstApp'
- android_name = options.package.split('.')[-1].split('_')
- app_info.android_name = ''.join([i.capitalize() for i in android_name if i])
+ app_info.android_name = ReplaceSpaceWithUnderscore(options.name)
if options.app_version:
app_info.app_version = options.app_version
app_info.app_versionCode = MakeVersionCode(options)
if not options.name:
parser.error('An APK name is required. Please use the "--name" option.')
+ VerifyAppName(options.name)
if not options.package:
parser.error('A package name is required. Please use the "--package" '
'option.')
- VerifyPackageName(options.package)
+ VerifyAppName(options.package, 'packagename')
if (options.app_root and options.app_local_path and
not os.path.isfile(os.path.join(options.app_root,
#!/usr/bin/env python
-# coding: UTF-8
# Copyright (c) 2013, 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
import unittest
import warnings
+from customize import ReplaceSpaceWithUnderscore
+
def Clean(name, app_version):
if os.path.exists(name):
'--package=org.xwalk.example', self._mode]
out = RunCommand(cmd)
self.assertNotIn('An APK name is required', out)
- Clean('Example', '1.0.0')
+ Clean('Test_Example', '1.0.0')
+
+ invalid_chars = '\/:.*?"<>|-'
+ for c in invalid_chars:
+ invalid_name = '--name=Example' + c
+ cmd = ['python', 'make_apk.py', invalid_name,
+ '--app-version=1.0.0', '--package=org.xwalk.example',
+ '--app-url=http://www.intel.com', self._mode]
+ out = RunCommand(cmd)
+ self.assertTrue(out.find('invalid characters') != -1)
def testToolVersion(self):
cmd = ['python', 'make_apk.py', '--version']
Clean(name, '1.0.0')
-
- def verifyResultForAppName(self, app_name):
- android_manifest = 'Example/AndroidManifest.xml'
- self.assertTrue(os.path.exists(android_manifest))
- with open(android_manifest, 'r') as content_file:
+ def testInvalidCharacter(self):
+ version = '1.0.0'
+ start_with_letters = ' should be started with letters'
+ app_name_error = 'app name' + start_with_letters
+ package_name_error = 'package name' + start_with_letters
+ parse_error = 'parser error in manifest.json file'
+ directory = os.path.join('test_data', 'manifest', 'invalidchars')
+
+ manifest_path = os.path.join(directory, 'manifest_with_space_name.json')
+ result = GetResultWithOption(self._mode, manifest_path)
+ self.assertTrue(result.find(app_name_error) != -1)
+
+ manifest_path = os.path.join(directory, 'manifest_with_chinese_name.json')
+ result = GetResultWithOption(self._mode, manifest_path)
+ self.assertTrue(result.find(app_name_error) != -1)
+
+ manifest_path = os.path.join(directory, 'manifest_parse_error.json')
+ result = GetResultWithOption(self._mode, manifest_path)
+ self.assertTrue(result.find(parse_error) != -1)
+
+ manifest_path = os.path.join(directory, 'manifest_with_invalid_name.json')
+ result = GetResultWithOption(self._mode, manifest_path)
+ self.assertTrue(result.find(app_name_error) != -1)
+
+ manifest_path = os.path.join(directory, 'manifest_contain_space_name.json')
+ result = GetResultWithOption(self._mode, manifest_path)
+ self.assertTrue(result.find(app_name_error) == -1)
+
+ package = 'org.xwalk.example'
+ name = '_hello'
+ result = GetResultWithOption(self._mode, name=name, package=package)
+ self.assertTrue(result.find(app_name_error) != -1)
+
+ name = '123hello'
+ result = GetResultWithOption(self._mode, name=name, package=package)
+ self.assertTrue(result.find(app_name_error) != -1)
+
+ name = 'hello_'
+ result = GetResultWithOption(self._mode, name=name, package=package)
+ self.assertTrue(result.find(app_name_error) == -1)
+ Clean(name, version)
+
+
+ def VerifyResultForAppNameWithSpace(self, manifest=None, name=None,
+ package=None):
+ version = '1.0.0'
+ GetResultWithOption(manifest=manifest, name=name, package=package)
+ if name is None:
+ name = 'app name '
+ replaced_name = ReplaceSpaceWithUnderscore(name)
+ manifest = replaced_name + '/AndroidManifest.xml'
+ with open(manifest, 'r') as content_file:
content = content_file.read()
- label_name = 'android:label="%s"' % app_name
- self.assertIn(label_name, content)
- Clean('Example', '1.0.0')
+ self.assertTrue(os.path.exists(manifest))
+ self.assertTrue(name in content)
+ Clean(replaced_name, version)
- def testAppNameWithNonASCII (self):
- cmd = ['python', 'make_apk.py', '--name=ä½ å¥½', '--app-version=1.0.0',
- '--package=org.xwalk.example', '--app-url=http://www.intel.com']
- RunCommand(cmd)
- self.verifyResultForAppName('ä½ å¥½')
+ def testAppNameWithSpace(self):
+ name = 'app name'
+ package = 'org.xwalk.app_name'
- manifest_path = os.path.join('test_data', 'manifest', 'invalidchars',
- 'manifest_with_chinese_name.json')
- cmd = ['python', 'make_apk.py', '--package=org.xwalk.example',
- '--manifest=%s' % manifest_path]
- RunCommand(cmd)
- self.verifyResultForAppName('ä½ å¥½')
+ self.VerifyResultForAppNameWithSpace(name=name, package=package)
+
+ name = 'app name '
+ self.VerifyResultForAppNameWithSpace(name=name, package=package)
+
+ directory = os.path.join('test_data', 'manifest', 'invalidchars')
+ manifest_path = os.path.join(directory, 'manifest_contain_space_name.json')
+ self.VerifyResultForAppNameWithSpace(manifest=manifest_path,
+ package=package)
def SuiteWithModeOption():
test_suite.addTest(TestMakeApk('testFullscreen'))
test_suite.addTest(TestMakeApk('testIconByOption'))
test_suite.addTest(TestMakeApk('testIconByManifest'))
+ test_suite.addTest(TestMakeApk('testInvalidCharacter'))
test_suite.addTest(TestMakeApk('testKeystore'))
test_suite.addTest(TestMakeApk('testManifest'))
test_suite.addTest(TestMakeApk('testManifestWithDeprecatedField'))
def SuiteWithEmptyModeOption():
# Gather all the tests for empty mode option.
test_suite = unittest.TestSuite()
- test_suite.addTest(TestMakeApk('testAppNameWithNonASCII'))
+ test_suite.addTest(TestMakeApk('testAppNameWithSpace'))
test_suite.addTest(TestMakeApk('testCompressor'))
test_suite.addTest(TestMakeApk('testCustomizeFile'))
test_suite.addTest(TestMakeApk('testEmptyMode'))
}
GURL Application::GetStartURL(const LaunchParams& params,
- LaunchEntryPoint* used) {
+ LaunchEntryPoint* used) {
if (params.entry_points & StartURLKey) {
- GURL url = GetURLFromRelativePathKey(keys::kStartURLKey);
+ GURL url = GetAbsoluteURLFromKey(keys::kStartURLKey);
if (url.is_valid()) {
*used = StartURLKey;
return url;
}
if (params.entry_points & LaunchLocalPathKey) {
- GURL url = GetURLFromRelativePathKey(
+ GURL url = GetAbsoluteURLFromKey(
GetLaunchLocalPathKey(data_->GetPackageType()));
if (url.is_valid()) {
*used = LaunchLocalPathKey;
}
if (params.entry_points & URLKey) {
- GURL url = GetURLFromURLKey();
+ LOG(WARNING) << "Deprecated key '" << keys::kDeprecatedURLKey
+ << "' found. Please migrate to using '" << keys::kStartURLKey
+ << "' instead.";
+ GURL url = GetAbsoluteURLFromKey(keys::kDeprecatedURLKey);
if (url.is_valid()) {
*used = URLKey;
return url;
}
}
- LOG(WARNING) << "Failed to find a valid launch URL for the app.";
+ LOG(WARNING) << "Failed to find a valid start URL in the manifest.";
return GURL();
}
return ui::SHOW_STATE_DEFAULT;
}
-GURL Application::GetURLFromURLKey() {
+GURL Application::GetAbsoluteURLFromKey(const std::string& key) {
const Manifest* manifest = data_->GetManifest();
- std::string url_string;
- if (!manifest->GetString(keys::kURLKey, &url_string))
- return GURL();
+ std::string source;
- return GURL(url_string);
-}
-
-GURL Application::GetURLFromRelativePathKey(const std::string& key) {
- const Manifest* manifest = data_->GetManifest();
- std::string entry_page;
- if (!manifest->GetString(key, &entry_page)
- || entry_page.empty()) {
+ if (!manifest->GetString(key, &source) || source.empty()) {
if (data_->GetPackageType() == Package::XPK)
return GURL();
+ // FIXME: Refactor this Widget specific code out.
base::ThreadRestrictions::SetIOAllowed(true);
- base::FileEnumerator iter(data_->Path(), true,
- base::FileEnumerator::FILES,
- FILE_PATH_LITERAL("index.*"));
- int priority = arraysize(kDefaultWidgetEntryPage);
+ base::FileEnumerator iter(
+ data_->Path(), true,
+ base::FileEnumerator::FILES,
+ FILE_PATH_LITERAL("index.*"));
+ size_t priority = arraysize(kDefaultWidgetEntryPage);
for (base::FilePath file = iter.Next(); !file.empty(); file = iter.Next()) {
for (size_t i = 0; i < arraysize(kDefaultWidgetEntryPage); ++i) {
if (file.BaseName().MaybeAsASCII() == kDefaultWidgetEntryPage[i] &&
i < priority) {
- entry_page = kDefaultWidgetEntryPage[i];
+ source = kDefaultWidgetEntryPage[i];
priority = i;
}
}
}
- if (entry_page.empty())
+ if (source.empty())
return GURL();
}
- return data_->GetResourceURL(entry_page);
+ std::size_t found = source.find_first_of("://");
+ if (found == std::string::npos)
+ return data_->GetResourceURL(source);
+ return GURL(source);
}
void Application::Terminate() {
ui::WindowShowState GetWindowShowStateWGT(const LaunchParams& params);
ui::WindowShowState GetWindowShowStateXPK(const LaunchParams& params);
- GURL GetURLFromURLKey();
-
- GURL GetURLFromRelativePathKey(const std::string& key);
+ GURL GetAbsoluteURLFromKey(const std::string& key);
void NotifyTermination();
const base::CommandLine::StringVector& args = cmd_line.GetArgs();
if (!args.empty()) {
std::string app_id = std::string(args[0].begin(), args[0].end());
- if (ApplicationData::IsIDValid(app_id)) {
+ if (IsValidApplicationID(app_id)) {
run_default_message_loop = LaunchWithCommandLineParam(app_id, cmd_line);
return true;
}
namespace xwalk {
namespace application {
+// The [tizen_app_id] contains a dot, making it an invalid object path.
+// For this reason we replace it with an underscore '_'.
+std::string GetAppObjectPathFromAppId(const std::string& app_id) {
+#if defined(OS_TIZEN)
+ std::string ret(app_id);
+ std::replace(ret.begin(), ret.end(), '.', '_');
+ return ret;
+#else
+ return app_id;
+#endif
+}
+
dbus::ObjectPath GetRunningPathForAppID(const std::string& app_id) {
- return dbus::ObjectPath(kRunningManagerDBusPath.value() + "/" + app_id);
+ return dbus::ObjectPath(kRunningManagerDBusPath.value() + "/" +
+ GetAppObjectPathFromAppId(app_id));
}
RunningApplicationsManager::RunningApplicationsManager(
// FIXME(cmarcelo): ApplicationService will tell us when new applications
// appear (with DidLaunchApplication()) and we create new managed objects
// in D-Bus based on that.
- dbus::ObjectPath path = AddObject(application->id(), method_call->GetSender(),
- application);
+ dbus::ObjectPath path =
+ AddObject(GetAppObjectPathFromAppId(application->id()),
+ method_call->GetSender(),
+ application);
scoped_ptr<dbus::Response> response =
dbus::Response::FromMethodCall(method_call);
#include "xwalk/application/common/manifest_handler.h"
#include "xwalk/application/common/manifest_handlers/permissions_handler.h"
#include "xwalk/application/common/manifest_handlers/widget_handler.h"
+#include "xwalk/application/common/manifest_handlers/tizen_application_handler.h"
#include "xwalk/application/common/permission_policy_manager.h"
#include "content/public/common/url_constants.h"
#include "url/url_util.h"
new xwalk::application::Manifest(source_type,
scoped_ptr<base::DictionaryValue>(manifest_data.DeepCopy())));
- if (!InitApplicationID(manifest.get(), path, explicit_id, &error)) {
- *error_message = base::UTF16ToUTF8(error);
- return NULL;
- }
-
std::vector<InstallWarning> install_warnings;
if (!manifest->ValidateManifest(error_message, &install_warnings)) {
return NULL;
manifest.Pass());
application->install_warnings_.swap(install_warnings);
- if (!application->Init(&error)) {
+ if (!application->Init(explicit_id, &error)) {
*error_message = base::UTF16ToUTF8(error);
return NULL;
}
base::DictionaryValue manifest;
// FIXME: define permissions!
- manifest.SetString(application_manifest_keys::kURLKey, url_spec);
+ manifest.SetString(application_manifest_keys::kStartURLKey, url_spec);
+ // FIXME: Why use URL as name?
manifest.SetString(application_manifest_keys::kNameKey, url_spec);
- manifest.SetString(application_manifest_keys::kVersionKey, "0");
+ manifest.SetString(application_manifest_keys::kXWalkVersionKey, "0");
scoped_refptr<ApplicationData> application_data =
ApplicationData::Create(base::FilePath(), Manifest::COMMAND_LINE,
manifest, app_id, error_message);
}
// static
-bool ApplicationData::IsIDValid(const std::string& id) {
- std::string temp = StringToLowerASCII(id);
-
-#if defined(OS_TIZEN)
- // An ID with 10 characters is most likely a legacy Tizen ID.
- if (temp.size() == kLegacyTizenIdSize) {
- for (size_t i = 0; i < kLegacyTizenIdSize; ++i) {
- const char c = temp[i];
- const bool valid = (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z');
- if (!valid)
- return false;
- }
-
- return true;
- }
-#endif
-
- // Verify that the id is legal.
- if (temp.size() != (kIdSize * 2))
- return false;
-
- // We only support lowercase IDs, because IDs can be used as URL components
- // (where GURL will lowercase it).
- for (size_t i = 0; i < temp.size(); ++i)
- if (temp[i] < 'a' || temp[i] > 'p')
- return false;
-
- return true;
-}
-
-// static
GURL ApplicationData::GetBaseURLFromApplicationId(
const std::string& application_id) {
return GURL(std::string(xwalk::application::kApplicationScheme) +
return GetManifest()->IsHosted();
}
-// static
-bool ApplicationData::InitApplicationID(xwalk::application::Manifest* manifest,
- const base::FilePath& path,
- const std::string& explicit_id,
- base::string16* error) {
- std::string application_id;
-#if defined(OS_TIZEN)
- if (manifest->HasKey(keys::kTizenAppIdKey)) {
- if (!manifest->GetString(keys::kTizenAppIdKey, &application_id)) {
- NOTREACHED() << "Could not get Tizen application key";
- return false;
- }
- }
-
- if (!application_id.empty()) {
- manifest->SetApplicationID(application_id);
- return true;
- }
-#endif
-
- if (!explicit_id.empty()) {
- manifest->SetApplicationID(explicit_id);
- return true;
- }
-
- application_id = GenerateIdForPath(path);
- if (application_id.empty()) {
- NOTREACHED() << "Could not create ID from path.";
- return false;
- }
- manifest->SetApplicationID(application_id);
- return true;
-}
-
ApplicationData::ApplicationData(const base::FilePath& path,
scoped_ptr<xwalk::application::Manifest> manifest)
: manifest_version_(0),
return manifest_->GetType();
}
-bool ApplicationData::Init(base::string16* error) {
+bool ApplicationData::Init(const std::string& explicit_id,
+ base::string16* error) {
DCHECK(error);
+ ManifestHandlerRegistry* registry =
+ ManifestHandlerRegistry::GetInstance(GetPackageType());
+ if (!registry->ParseAppManifest(this, error))
+ return false;
+ if (!LoadID(explicit_id, error))
+ return false;
if (!LoadName(error))
return false;
if (!LoadVersion(error))
- return false;
+ return false;
if (!LoadDescription(error))
- return false;
- if (!LoadManifestVersion(error))
return false;
application_url_ = ApplicationData::GetBaseURLFromApplicationId(ID());
- ManifestHandlerRegistry* registry =
- ManifestHandlerRegistry::GetInstance(GetPackageType());
- if (!registry->ParseAppManifest(this, error))
- return false;
-
finished_parsing_manifest_ = true;
return true;
}
+bool ApplicationData::LoadID(const std::string& explicit_id,
+ base::string16* error) {
+ std::string application_id;
+#if defined(OS_TIZEN)
+ if (GetPackageType() == Package::WGT) {
+ const TizenApplicationInfo* tizen_app_info =
+ static_cast<TizenApplicationInfo*>(GetManifestData(
+ widget_keys::kTizenApplicationKey));
+ CHECK(tizen_app_info);
+ application_id = tizen_app_info->id();
+ } else if (manifest_->HasKey(keys::kTizenAppIdKey)) {
+ if (!manifest_->GetString(keys::kTizenAppIdKey, &application_id)) {
+ NOTREACHED() << "Could not get Tizen application key";
+ return false;
+ }
+ }
+
+ if (!application_id.empty()) {
+ manifest_->SetApplicationID(application_id);
+ return true;
+ }
+#endif
+
+ if (!explicit_id.empty()) {
+ manifest_->SetApplicationID(explicit_id);
+ return true;
+ }
+
+ application_id = GenerateIdForPath(path_);
+ if (application_id.empty()) {
+ NOTREACHED() << "Could not create ID from path.";
+ return false;
+ }
+ manifest_->SetApplicationID(application_id);
+ return true;
+}
+
bool ApplicationData::LoadName(base::string16* error) {
DCHECK(error);
base::string16 localized_name;
bool ApplicationData::LoadVersion(base::string16* error) {
DCHECK(error);
std::string version_str;
- std::string version_key(GetVersionKey(GetPackageType()));
- if (!manifest_->GetString(version_key, &version_str) &&
- package_type_ == Package::XPK) {
- *error = base::ASCIIToUTF16(errors::kInvalidVersion);
- return false;
+ version_.reset(new base::Version());
+
+ if (package_type_ == Package::WGT) {
+ bool ok = manifest_->GetString(widget_keys::kVersionKey, &version_str);
+ if (!ok) {
+ *error = base::ASCIIToUTF16(errors::kInvalidVersion);
+ return false;
+ }
+
+ version_.reset(new base::Version(version_str));
+ return true;
+ }
+
+ // W3C Manifest (XPK and hosted):
+
+ bool hasDeprecatedKey = manifest_->HasKey(keys::kDeprecatedVersionKey);
+ bool hasKey = manifest_->HasKey(keys::kXWalkVersionKey);
+
+ if (!hasKey && !hasDeprecatedKey) {
+ // xwalk_version is optional.
+ return true;
+ }
+
+ bool ok = false;
+ if (hasKey) {
+ if (hasDeprecatedKey) {
+ LOG(WARNING) << "Deprecated key '" << keys::kDeprecatedVersionKey
+ << "' found in addition to '" << keys::kXWalkVersionKey
+ << "'. Consider removing.";
+ }
+ ok = manifest_->GetString(keys::kXWalkVersionKey, &version_str);
}
+
+ if (!hasKey && hasDeprecatedKey) {
+ LOG(WARNING) << "Deprecated key '" << keys::kDeprecatedVersionKey
+ << "' found. Please migrate to using '" << keys::kXWalkVersionKey
+ << "' instead.";
+ ok = manifest_->GetString(keys::kDeprecatedVersionKey, &version_str);
+ }
+
version_.reset(new base::Version(version_str));
- if (package_type_ == Package::XPK &&
- (!version_->IsValid() || version_->components().size() > 4)) {
+
+ if (!ok || !version_->IsValid() || version_->components().size() > 4) {
*error = base::ASCIIToUTF16(errors::kInvalidVersion);
+ version_.reset(new base::Version());
return false;
}
- return true;
+
+ return ok;
}
bool ApplicationData::LoadDescription(base::string16* error) {
DCHECK(error);
- if (manifest_->HasKey(keys::kDescriptionKey) &&
- !manifest_->GetString(keys::kDescriptionKey, &description_) &&
- package_type_ == Package::XPK) {
- *error = base::ASCIIToUTF16(errors::kInvalidDescription);
- return false;
+ // FIXME: Better to assert on use from Widget.
+ if (package_type_ != Package::XPK)
+ return true; // No error.
+
+ bool hasDeprecatedKey = manifest_->HasKey(keys::kDeprecatedDescriptionKey);
+ bool hasKey = manifest_->HasKey(keys::kXWalkDescriptionKey);
+
+ if (hasKey) {
+ if (hasDeprecatedKey) {
+ LOG(WARNING) << "Deprecated key '" << keys::kDeprecatedDescriptionKey
+ << "' found in addition to '" << keys::kXWalkDescriptionKey
+ << "'. Consider removing.";
+ }
+ bool ok = manifest_->GetString(keys::kXWalkDescriptionKey, &description_);
+ if (!ok)
+ *error = base::ASCIIToUTF16(errors::kInvalidDescription);
+ return ok;
}
- return true;
-}
-bool ApplicationData::LoadManifestVersion(base::string16* error) {
- DCHECK(error);
- // Get the original value out of the dictionary so that we can validate it
- // more strictly.
- if (manifest_->value()->HasKey(keys::kManifestVersionKey)) {
- int manifest_version = 1;
- if (!manifest_->GetInteger(keys::kManifestVersionKey, &manifest_version) ||
- manifest_version < 1) {
- if (package_type_ == Package::XPK) {
- *error = base::ASCIIToUTF16(errors::kInvalidManifestVersion);
- return false;
- }
- }
+ if (hasDeprecatedKey) {
+ LOG(WARNING) << "Deprecated key '" << keys::kDeprecatedDescriptionKey
+ << "' found. Please migrate to using '" << keys::kXWalkDescriptionKey
+ << "' instead.";
+ bool ok = manifest_->GetString(
+ keys::kDeprecatedDescriptionKey, &description_);
+ if (!ok)
+ *error = base::ASCIIToUTF16(errors::kInvalidDescription);
+ return ok;
}
- manifest_version_ = manifest_->GetManifestVersion();
+ // No error but also no description found.
return true;
}
static scoped_refptr<ApplicationData> Create(const GURL& url,
std::string* error_message);
- // Checks to see if the application has a valid ID.
- static bool IsIDValid(const std::string& id);
-
Manifest::Type GetType() const;
// Returns an absolute url to a resource inside of an application. The
const std::string& Name() const { return name_; }
const std::string& NonLocalizedName() const { return non_localized_name_; }
const std::string& Description() const { return description_; }
- int ManifestVersion() const { return manifest_version_; }
const Manifest* GetManifest() const {
return manifest_.get();
friend class base::RefCountedThreadSafe<ApplicationData>;
friend class ApplicationStorageImpl;
- // Chooses the application ID for an application based on a variety of
- // criteria. The chosen ID will be set in |manifest|.
- static bool InitApplicationID(Manifest* manifest,
- const base::FilePath& path,
- const std::string& explicit_id,
- base::string16* error);
-
ApplicationData(const base::FilePath& path,
scoped_ptr<Manifest> manifest);
virtual ~ApplicationData();
// Initialize the application from a parsed manifest.
- bool Init(base::string16* error);
+ bool Init(const std::string& explicit_id, base::string16* error);
+ // Chooses the application ID for an application based on a variety of
+ // criteria. The chosen ID will be set in |manifest|.
+ bool LoadID(const std::string& explicit_id, base::string16* error);
// The following are helpers for InitFromValue to load various features of the
// application from the manifest.
bool LoadName(base::string16* error);
bool LoadVersion(base::string16* error);
bool LoadDescription(base::string16* error);
- bool LoadManifestVersion(base::string16* error);
// The application's human-readable name. Name is used for display purpose. It
// might be wrapped with unicode bidi control characters so that it is
namespace xwalk {
namespace application_manifest_keys {
-const char kAppKey[] = "app";
-const char kCSPKey[] = "csp";
-const char kCSPKeyLegacy[] = "content_security_policy";
-const char kDescriptionKey[] = "description";
+
+// Official fields (ordered as spec):
+
+const char kNameKey[] = "name";
const char kDisplay[] = "display";
+const char kStartURLKey[] = "start_url";
+const char kCSPKey[] = "csp";
+
+// Deprecated entries:
+
+const char kAppKey[] = "app";
const char kLaunchLocalPathKey[] = "app.launch.local_path";
+const char kLaunchWebURLKey[] = "app.launch.web_url";
+const char kDeprecatedURLKey[] = "url";
+const char kDeprecatedVersionKey[] = "version";
+const char kDeprecatedDescriptionKey[] = "description";
+const char kDeprecatedCSPKey[] = "content_security_policy";
const char kLaunchScreen[] = "launch_screen";
const char kLaunchScreenDefault[] = "launch_screen.default";
const char kLaunchScreenImageBorderDefault[] =
"launch_screen.landscape.image_border";
const char kLaunchScreenImageBorderPortrait[] =
"launch_screen.portrait.image_border";
-const char kLaunchScreenLandscape[] = "launch_screen.landscape";
-const char kLaunchScreenPortrait[] = "launch_screen.portrait";
-const char kLaunchScreenReadyWhen[] = "launch_screen.ready_when";
-const char kLaunchWebURLKey[] = "app.launch.web_url";
-const char kManifestVersionKey[] = "manifest_version";
-const char kNameKey[] = "name";
+const char kLaunchScreenLandscape[] =
+ "launch_screen.landscape";
+const char kLaunchScreenPortrait[] =
+ "launch_screen.portrait";
+const char kLaunchScreenReadyWhen[] =
+ "launch_screen.ready_when";
+
+// XWalk W3C Manifest (XPK) extensions:
+
const char kPermissionsKey[] = "permissions";
-const char kStartURLKey[] = "start_url";
-const char kURLKey[] = "url";
-const char kVersionKey[] = "version";
-const char kWebURLsKey[] = "app.urls";
+const char kXWalkVersionKey[] = "xwalk_version";
+const char kXWalkDescriptionKey[] = "xwalk_description";
const char kXWalkHostsKey[] = "xwalk_hosts";
const char kXWalkLaunchScreen[] = "xwalk_launch_screen";
const char kXWalkLaunchScreenDefault[] = "xwalk_launch_screen.default";
"xwalk_launch_screen.landscape.image_border";
const char kXWalkLaunchScreenImageBorderPortrait[] =
"xwalk_launch_screen.portrait.image_border";
-const char kXWalkLaunchScreenLandscape[] = "xwalk_launch_screen.landscape";
-const char kXWalkLaunchScreenPortrait[] = "xwalk_launch_screen.portrait";
-const char kXWalkLaunchScreenReadyWhen[] = "xwalk_launch_screen.ready_when";
+const char kXWalkLaunchScreenLandscape[] =
+ "xwalk_launch_screen.landscape";
+const char kXWalkLaunchScreenPortrait[] =
+ "xwalk_launch_screen.portrait";
+const char kXWalkLaunchScreenReadyWhen[] =
+ "xwalk_launch_screen.ready_when";
#if defined(OS_TIZEN)
const char kTizenAppIdKey[] = "tizen_app_id";
// manifest keys for widget applications.
namespace application_widget_keys {
+
const char kNamespaceKey[] = "@namespace";
const char kXmlLangKey[] = "@lang";
const char kDefaultLocaleKey[] = "widget.@defaultlocale";
const char kTizenSplashScreenKey[] = "widget.splash-screen";
const char kTizenSplashScreenSrcKey[] = "@src";
#endif
+
} // namespace application_widget_keys
#if defined(OS_TIZEN)
"Invalid value for 'description'.";
const char kInvalidKey[] =
"Value 'key' is missing or invalid.";
-const char kInvalidManifestVersion[] =
- "Invalid value for 'manifest_version'. Must be an integer greater than "
- "zero.";
const char kInvalidName[] =
"Required value 'name' is missing or invalid.";
const char kInvalidVersion[] =
"Manifest is not valid JSON.";
const char kManifestUnreadable[] =
"Manifest file is missing or unreadable.";
-const char kPlatformAppNeedsManifestVersion2[] =
- "Packaged apps need manifest_version set to >= 2";
} // namespace application_manifest_errors
namespace application {
if (package_type == Package::WGT)
return application_widget_keys::kVersionKey;
- return application_manifest_keys::kVersionKey;
-}
-
-const char* GetWebURLsKey(Package::Type package_type) {
- if (package_type == Package::WGT)
- return application_widget_keys::kWebURLsKey;
-
- return application_manifest_keys::kWebURLsKey;
+ return application_manifest_keys::kXWalkVersionKey;
}
const char* GetLaunchLocalPathKey(Package::Type package_type) {
// Keys used in JSON representation of applications.
namespace xwalk {
namespace application_manifest_keys {
- extern const char kAppKey[];
- extern const char kCSPKey[];
- extern const char kCSPKeyLegacy[];
- extern const char kDescriptionKey[];
+ // Official fields (ordered as spec):
+
+ extern const char kNameKey[];
+ // extern const char kShortName[];
+ // extern const char kIcons[];
extern const char kDisplay[];
+ // extern const char kOrientation[];
+ extern const char kStartURLKey[];
+ extern const char kCSPKey[];
+
+ // Deprecated fields:
+
+ extern const char kAppKey[];
+ extern const char kLaunchWebURLKey[];
extern const char kLaunchLocalPathKey[];
+ extern const char kDeprecatedURLKey[];
+ extern const char kDeprecatedVersionKey[];
+ extern const char kDeprecatedDescriptionKey[];
+ extern const char kDeprecatedCSPKey[];
extern const char kLaunchScreen[];
extern const char kLaunchScreenDefault[];
extern const char kLaunchScreenImageBorderDefault[];
extern const char kLaunchScreenLandscape[];
extern const char kLaunchScreenPortrait[];
extern const char kLaunchScreenReadyWhen[];
- extern const char kLaunchWebURLKey[];
- extern const char kManifestVersionKey[];
- extern const char kNameKey[];
+
+ // XWalk extensions:
+
extern const char kPermissionsKey[];
- extern const char kStartURLKey[];
- extern const char kURLKey[];
- extern const char kVersionKey[];
- extern const char kWebURLsKey[];
+ extern const char kXWalkVersionKey[];
+ extern const char kXWalkDescriptionKey[];
extern const char kXWalkHostsKey[];
extern const char kXWalkLaunchScreen[];
extern const char kXWalkLaunchScreenDefault[];
namespace application_manifest_errors {
extern const char kInvalidDescription[];
extern const char kInvalidKey[];
- extern const char kInvalidManifestVersion[];
extern const char kInvalidName[];
extern const char kInvalidVersion[];
extern const char kManifestParseError[];
extern const char kManifestUnreadable[];
- extern const char kPlatformAppNeedsManifestVersion2[];
} // namespace application_manifest_errors
namespace application {
typedef application::Manifest Manifest;
const char* GetNameKey(Package::Type type);
-const char* GetVersionKey(Package::Type type);
-const char* GetWebURLsKey(Package::Type type);
const char* GetLaunchLocalPathKey(Package::Type type);
const char* GetCSPKey(Package::Type type);
#if defined(OS_TIZEN)
#include "sql/transaction.h"
#include "xwalk/application/common/application_storage.h"
#include "xwalk/application/common/application_storage_constants.h"
+#include "xwalk/application/common/id_util.h"
namespace db_fields = xwalk::application_storage_constants;
namespace xwalk {
while (smt.Step()) {
const std::string& id = smt.ColumnString(0);
- if (!ApplicationData::IsIDValid(id)) {
+ if (!IsValidApplicationID(id)) {
LOG(ERROR) << "Failed to obtain Application ID from SQL query";
return false;
}
}
base::FilePath GetApplicationPath(const std::string& app_id) {
- std::string ail_id = RawAppIdToAppIdForTizenPkgmgrDB(app_id);
ail_filter_h filter;
ail_error_e ret = ail_filter_new(&filter);
if (ret != AIL_ERROR_OK) {
return base::FilePath();
}
- ret = ail_filter_add_str(filter, AIL_PROP_X_SLP_APPID_STR, ail_id.c_str());
+ ret = ail_filter_add_str(filter, AIL_PROP_X_SLP_APPID_STR, app_id.c_str());
if (ret != AIL_ERROR_OK) {
LOG(ERROR) << "Failed to init AIL filter.";
ail_filter_destroy(filter);
base::FilePath app_path = GetApplicationPath(app_id);
std::string error_str;
- return LoadApplication(app_path, RawAppIdToCrosswalkAppId(app_id),
- Manifest::INTERNAL, &error_str);
+ return LoadApplication(app_path, app_id, Manifest::INTERNAL, &error_str);
}
namespace {
-int pkgmgrinfo_app_list_cb(pkgmgrinfo_appinfo_h handle, void *user_data) {
+ail_cb_ret_e appinfo_get_app_id_cb(
+ const ail_appinfo_h appinfo, void* user_data) {
std::vector<std::string>* app_ids =
static_cast<std::vector<std::string>*>(user_data);
- char* appid = NULL;
- pkgmgrinfo_appinfo_get_appid(handle, &appid);
- CHECK(appid);
+ char* app_id;
+ ail_appinfo_get_str(appinfo, AIL_PROP_X_SLP_APPID_STR, &app_id);
+ if (app_id)
+ app_ids->push_back(app_id);
- app_ids->push_back(TizenPkgmgrDBAppIdToRawAppId(appid));
- return 0;
+ return AIL_CB_RET_CONTINUE;
}
+const char kXWalkPackageType[] = "wgt";
+
} // namespace
bool ApplicationStorageImpl::GetInstalledApplicationIDs(
- std::vector<std::string>& app_ids) { // NOLINT
- pkgmgrinfo_appinfo_filter_h handle;
- int ret = pkgmgrinfo_appinfo_filter_create(&handle);
- if (ret != PMINFO_R_OK) {
- LOG(ERROR) << "Failed to create pkgmgrinfo filter.";
+ std::vector<std::string>& app_ids) { // NOLINT
+ ail_filter_h filter;
+ ail_error_e ret = ail_filter_new(&filter);
+ if (ret != AIL_ERROR_OK) {
+ LOG(ERROR) << "Failed to create AIL filter.";
return false;
}
-
- ret = pkgmgrinfo_appinfo_filter_add_string(
- handle, PMINFO_APPINFO_PROP_APP_TYPE, "webapp");
- if (ret != PMINFO_R_OK) {
- LOG(ERROR) << "Failed to init pkgmgrinfo filter.";
- pkgmgrinfo_appinfo_filter_destroy(handle);
+ // Filters out web apps (installed from WGT and XPK packages).
+ ret = ail_filter_add_str(
+ filter, AIL_PROP_X_SLP_PACKAGETYPE_STR, kXWalkPackageType);
+ if (ret != AIL_ERROR_OK) {
+ LOG(ERROR) << "Failed to init AIL filter.";
+ ail_filter_destroy(filter);
return false;
}
- ret = pkgmgrinfo_appinfo_filter_foreach_appinfo(
- handle, pkgmgrinfo_app_list_cb, &app_ids);
- if (ret != PMINFO_R_OK) {
- LOG(ERROR) << "Failed to apply pkgmgrinfo filter.";
- pkgmgrinfo_appinfo_filter_destroy(handle);
+ int count;
+ ret = ail_filter_count_appinfo(filter, &count);
+ if (ret != AIL_ERROR_OK) {
+ LOG(ERROR) << "Failed to count AIL app info.";
+ ail_filter_destroy(filter);
return false;
}
- pkgmgrinfo_appinfo_filter_destroy(handle);
+ if (count > 0)
+ ail_filter_list_appinfo_foreach(filter, appinfo_get_app_id_cb, &app_ids);
+ ail_filter_destroy(filter);
return true;
}
TestInit();
base::DictionaryValue manifest;
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
manifest.SetString("a", "b");
std::string error;
scoped_refptr<ApplicationData> application = ApplicationData::Create(
TestInit();
base::DictionaryValue manifest;
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
manifest.SetString("a", "b");
std::string error;
scoped_refptr<ApplicationData> application =
base::DictionaryValue* manifest = new base::DictionaryValue;
manifest->SetString("a", "b");
manifest->SetString(keys::kNameKey, "no name");
- manifest->SetString(keys::kVersionKey, "0");
+ manifest->SetString(keys::kXWalkVersionKey, "0");
value->Set("manifest", manifest);
value->SetDouble("install_time", 0);
db_value->Set("test_id", value.release());
TestInit();
base::DictionaryValue manifest;
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
manifest.SetString("a", "b");
std::string error;
scoped_refptr<ApplicationData> application =
TestInit();
base::DictionaryValue manifest;
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
manifest.SetString("a", "b");
std::string error;
scoped_refptr<ApplicationData> application =
ASSERT_EQ(2, Manifest::COMMAND_LINE);
}
-TEST(ApplicationTest, IsIDValid) {
- EXPECT_TRUE(ApplicationData::IsIDValid("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"));
- EXPECT_TRUE(ApplicationData::IsIDValid("pppppppppppppppppppppppppppppppp"));
- EXPECT_TRUE(ApplicationData::IsIDValid("abcdefghijklmnopabcdefghijklmnop"));
- EXPECT_TRUE(ApplicationData::IsIDValid("ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP"));
- EXPECT_FALSE(ApplicationData::IsIDValid("abcdefghijklmnopabcdefghijklmno"));
- EXPECT_FALSE(ApplicationData::IsIDValid("abcdefghijklmnopabcdefghijklmnopa"));
- EXPECT_FALSE(ApplicationData::IsIDValid("0123456789abcdef0123456789abcdef"));
- EXPECT_FALSE(ApplicationData::IsIDValid("abcdefghijklmnopabcdefghijklmnoq"));
- EXPECT_FALSE(ApplicationData::IsIDValid("abcdefghijklmnopabcdefghijklmno0"));
-}
-
} // namespace application
} // namespace xwalk
namespace application {
namespace {
#if defined(OS_TIZEN)
-const char kTizenAppIdPattern[] = "\\A[0-9a-zA-Z]{10}[.][0-9a-zA-Z]{1,52}\\z";
+const char kWGTAppIdPattern[] = "\\A([0-9a-zA-Z]{10})[.][0-9a-zA-Z]{1,52}\\z";
+const char kXPKAppIdPattern[] = "\\Axwalk[.]([a-p]{32})\\z";
const std::string kAppIdPrefix("xwalk.");
#endif
+const size_t kIdSize = 16;
} // namespace
// Converts a normal hexadecimal string into the alphabet used by applications.
}
}
-// First 16 bytes of SHA256 hashed public key.
-const size_t kIdSize = 16;
-
-#if defined(OS_TIZEN)
-const size_t kLegacyTizenIdSize = 10;
-#endif
-
std::string GenerateId(const std::string& input) {
uint8 hash[kIdSize];
crypto::SHA256HashString(input, hash, sizeof(hash));
std::string output = StringToLowerASCII(base::HexEncode(hash, sizeof(hash)));
ConvertHexadecimalToIDAlphabet(&output);
+#if defined(OS_TIZEN)
+ return kAppIdPrefix + output;
+#else
return output;
+#endif
}
std::string GenerateIdForPath(const base::FilePath& path) {
return GenerateId(path_bytes);
}
+bool IsValidApplicationID(const std::string& id) {
#if defined(OS_TIZEN)
-std::string RawAppIdToCrosswalkAppId(const std::string& id) {
- if (RE2::PartialMatch(id, kTizenAppIdPattern))
- return GenerateId(id);
- return id;
-}
+ if (RE2::FullMatch(id, kWGTAppIdPattern) ||
+ RE2::FullMatch(id, kXPKAppIdPattern))
+ return true;
+ return false;
+#endif
-std::string RawAppIdToAppIdForTizenPkgmgrDB(const std::string& id) {
- if (RE2::PartialMatch(id, kTizenAppIdPattern))
- return id;
- return kAppIdPrefix + id;
-}
+ std::string temp = StringToLowerASCII(id);
+ // Verify that the id is legal.
+ if (temp.size() != (kIdSize * 2))
+ return false;
-std::string TizenPkgmgrDBAppIdToRawAppId(const std::string& id) {
- std::string raw_id;
- if (RE2::FullMatch(id, "xwalk.(\\w+)", &raw_id))
- return raw_id;
- return id;
-}
+ // We only support lowercase IDs, because IDs can be used as URL components
+ // (where GURL will lowercase it).
+ for (size_t i = 0; i < temp.size(); ++i)
+ if (temp[i] < 'a' || temp[i] > 'p')
+ return false;
-std::string GetTizenAppId(ApplicationData* application) {
- if (application->GetPackageType() == xwalk::application::Package::XPK)
- return application->ID();
+ return true;
+}
- const TizenApplicationInfo* tizen_app_info =
- static_cast<TizenApplicationInfo*>(application->GetManifestData(
- application_widget_keys::kTizenApplicationKey));
- return tizen_app_info->id();
+#if defined(OS_TIZEN)
+std::string GetPackageIdFromAppId(const std::string& app_id) {
+ std::string package_id;
+ if (RE2::FullMatch(app_id, kWGTAppIdPattern, &package_id) ||
+ RE2::FullMatch(app_id, kXPKAppIdPattern, &package_id)) {
+ return package_id;
+ } else {
+ LOG(ERROR) << "Cannot get package_id from invalid app id";
+ return app_id;
+ }
}
#endif
namespace xwalk {
namespace application {
-
-// The number of bytes in a legal id.
-extern const size_t kIdSize;
-
-#if defined(OS_TIZEN)
-// The number of bytes in a legal legacy Tizen id.
-extern const size_t kLegacyTizenIdSize;
-#endif
-
// Generates an application ID from arbitrary input. The same input string will
// always generate the same output ID.
std::string GenerateId(const std::string& input);
// Used while developing applications, before they have a key.
std::string GenerateIdForPath(const base::FilePath& path);
+// Checks to see if the application has a valid ID.
+bool IsValidApplicationID(const std::string& id);
+
#if defined(OS_TIZEN)
-// If this appid is a xpk app id(crosswalk_32bytes_app_id), return itself.
-// If this appid is a wgt app id(tizen_app_id), convert it to
-// crosswalk_32bytes_app_id and return it.
-std::string RawAppIdToCrosswalkAppId(const std::string& id);
-
-// If this appid is a xpk app id(crosswalk_32bytes_app_id), return
-// xwalk.crosswalk_32bytes_app_id.
-// If this appid is a wgt app id(tizen_app_id), return itself.
-// It is better to storage crosswalk_32bytes_app_id on tizen pkgmgr db
-// for xpk, but it must be an "." on appid or it cannot insert to tizen pkgmgr
-// db, so we have to have a "xwalk." as it's prefix.
-std::string RawAppIdToAppIdForTizenPkgmgrDB(const std::string& id);
-// Does the opposite to the above function.
-std::string TizenPkgmgrDBAppIdToRawAppId(const std::string& id);
-
-// For xpk, app_id == crosswalk_32bytes_app_id == this->ID(),
-// For wgt, app_id == tizen_wrt_10bytes_package_id.app_name,
-std::string GetTizenAppId(ApplicationData* application);
+std::string GetPackageIdFromAppId(const std::string& app_id);
#endif
} // namespace application
"this_string_is_longer_than_a_single_sha256_hash_digest"));
}
+TEST(IDUtilTest, IsValidApplicationID) {
+ EXPECT_TRUE(IsValidApplicationID("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"));
+ EXPECT_TRUE(IsValidApplicationID("pppppppppppppppppppppppppppppppp"));
+ EXPECT_TRUE(IsValidApplicationID("abcdefghijklmnopabcdefghijklmnop"));
+ EXPECT_TRUE(IsValidApplicationID("ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP"));
+ EXPECT_FALSE(IsValidApplicationID("abcdefghijklmnopabcdefghijklmno"));
+ EXPECT_FALSE(IsValidApplicationID("abcdefghijklmnopabcdefghijklmnopa"));
+ EXPECT_FALSE(IsValidApplicationID("0123456789abcdef0123456789abcdef"));
+ EXPECT_FALSE(IsValidApplicationID("abcdefghijklmnopabcdefghijklmnoq"));
+ EXPECT_FALSE(IsValidApplicationID("abcdefghijklmnopabcdefghijklmno0"));
+}
+
} // namespace application
} // namespace xwalk
#include "xwalk/application/common/application_manifest_constants.h"
#include "xwalk/application/common/permission_policy_manager.h"
#include "xwalk/application/common/application_storage.h"
+#include "xwalk/application/common/id_util.h"
#include "xwalk/application/common/installer/tizen/packageinfo_constants.h"
#include "xwalk/runtime/common/xwalk_paths.h"
bool PackageInstaller::Update(const std::string& app_id,
const base::FilePath& path) {
- if (!ApplicationData::IsIDValid(app_id)) {
+ if (!IsValidApplicationID(app_id)) {
LOG(ERROR) << "The given application id " << app_id << " is invalid.";
return false;
}
return true;
}
-bool PackageInstaller::Uninstall(const std::string& id) {
+bool PackageInstaller::Uninstall(const std::string& app_id) {
+ if (!IsValidApplicationID(app_id)) {
+ LOG(ERROR) << "The given application id " << app_id << " is invalid.";
+ return false;
+ }
+
bool result = true;
- scoped_refptr<ApplicationData> app_data = storage_->GetApplicationData(id);
+ scoped_refptr<ApplicationData> app_data =
+ storage_->GetApplicationData(app_id);
if (!app_data) {
- LOG(ERROR) << "Failed to find application with id " << id
+ LOG(ERROR) << "Failed to find application with id " << app_id
<< " among the installed ones.";
result = false;
}
- if (!storage_->RemoveApplication(id)) {
- LOG(ERROR) << "Cannot uninstall application with id " << id
+ if (!storage_->RemoveApplication(app_id)) {
+ LOG(ERROR) << "Cannot uninstall application with id " << app_id
<< "; application is not installed.";
result = false;
}
base::FilePath resources;
CHECK(PathService::Get(xwalk::DIR_DATA_PATH, &resources));
- resources = resources.Append(kApplicationsDir).AppendASCII(id);
+ resources = resources.Append(kApplicationsDir).AppendASCII(app_id);
if (base::DirectoryExists(resources) &&
!base::DeleteFile(resources, true)) {
LOG(ERROR) << "Error occurred while trying to remove application with id "
- << id << "; Cannot remove all resources.";
+ << app_id << "; Cannot remove all resources.";
result = false;
}
}
}
-// For xpk, package_id => [crosswalk_32bytes_app_id]
-// For wgt, package_id => [tizen_wrt_10bytes_package_id]
-std::string GetTizenPackageId(
- xwalk::application::ApplicationData* application) {
- if (application->GetPackageType() == xwalk::application::Package::XPK)
- return application->ID();
-
- const xwalk::application::TizenApplicationInfo* tizen_app_info =
- static_cast<xwalk::application::TizenApplicationInfo*>(
- application->GetManifestData(widget_keys::kTizenApplicationKey));
- return tizen_app_info->package();
-}
-
bool GeneratePkgInfoXml(xwalk::application::ApplicationData* application,
const std::string& icon_name,
const base::FilePath& app_dir,
!base::CreateDirectory(app_dir))
return false;
- std::string package_id = GetTizenPackageId(application);
- std::string tizen_app_id = GetTizenAppId(application);
+ std::string package_id =
+ xwalk::application::GetPackageIdFromAppId(application->ID());
base::FilePath execute_path =
- app_dir.AppendASCII("bin/").AppendASCII(tizen_app_id);
+ app_dir.AppendASCII("bin/").AppendASCII(application->ID());
std::string stripped_name = application->Name();
FILE* file = base::OpenFile(xml_path, "w");
xml_writer.WriteElement("description", application->Description());
xml_writer.StartElement("ui-application");
- xml_writer.AddAttribute("appid",
- xwalk::application::RawAppIdToAppIdForTizenPkgmgrDB(tizen_app_id));
+ xml_writer.AddAttribute("appid", application->ID());
xml_writer.AddAttribute("exec", execute_path.MaybeAsASCII());
xml_writer.AddAttribute("type", "webapp");
xml_writer.AddAttribute("taskmanage", "true");
}
bool CreateAppSymbolicLink(const base::FilePath& app_dir,
- const std::string& tizen_app_id) {
+ const std::string& app_id) {
base::FilePath execute_path =
- app_dir.AppendASCII("bin/").AppendASCII(tizen_app_id);
+ app_dir.AppendASCII("bin/").AppendASCII(app_id);
if (!base::CreateDirectory(execute_path.DirName())) {
LOG(ERROR) << "Could not create directory '"
base::FilePath data_dir;
CHECK(PathService::Get(xwalk::DIR_DATA_PATH, &data_dir));
- std::string tizen_app_id = GetTizenAppId(app_data);
base::FilePath app_dir =
data_dir.AppendASCII(info::kAppDir).AppendASCII(app_id);
base::FilePath xml_path = data_dir.AppendASCII(info::kAppDir).AppendASCII(
return false;
}
- if (!CreateAppSymbolicLink(app_dir, tizen_app_id)) {
+ if (!CreateAppSymbolicLink(app_dir, app_id)) {
LOG(ERROR) << "Failed to create symbolic link for " << app_id;
return false;
}
base::FilePath data_dir;
CHECK(PathService::Get(xwalk::DIR_DATA_PATH, &data_dir));
- std::string tizen_app_id = GetTizenAppId(app_data);
base::FilePath app_dir =
data_dir.AppendASCII(info::kAppDir).AppendASCII(app_id);
base::FilePath new_xml_path = data_dir.AppendASCII(info::kAppDir).AppendASCII(
return false;
}
- if (!CreateAppSymbolicLink(app_dir, tizen_app_id))
+ if (!CreateAppSymbolicLink(app_dir, app_id))
return false;
base::FilePath icon =
ReferenceData reference_data;
std::set<std::string> reference_set;
ReferenceHashMap reference_hash_map;
- for (int i = 0; i < reference_vec.size(); ++i) {
+ for (size_t i = 0; i < reference_vec.size(); ++i) {
refer_node = reference_vec[i];
uri = GetAttribute(refer_node, kTokenURI);
if (uri.empty()) {
GetChildren(properties_node, ns, kTokenSignatureProperty);
std::string Id, uri, element_name, profile_uri, role_uri;
xmlNodePtr sign_property_node, child;
- for (int i = 0; i < prop_vec.size(); i++) {
+ for (size_t i = 0; i < prop_vec.size(); i++) {
sign_property_node = prop_vec[i];
Id = GetAttribute(sign_property_node, kTokenID);
child = sign_property_node->children;
}
if (!value.empty())
+#if defined(OS_TIZEN)
+ id_ = value;
+#else
id_ = GenerateId(value);
+#endif
is_valid_ = true;
data_(value.Pass()),
i18n_data_(new base::DictionaryValue),
type_(TYPE_UNKNOWN) {
+ // FIXME: Hosted apps can contain start_url. Below is wrong.
if (data_->Get(keys::kStartURLKey, NULL)) {
type_ = TYPE_PACKAGED_APP;
} else if (data_->HasKey(keys::kAppKey)) {
- if (data_->Get(keys::kWebURLsKey, NULL) ||
- data_->Get(keys::kLaunchWebURLKey, NULL)) {
+ if (data_->Get(keys::kLaunchWebURLKey, NULL)) {
type_ = TYPE_HOSTED_APP;
} else if (data_->Get(keys::kLaunchLocalPathKey, NULL)) {
type_ = TYPE_PACKAGED_APP;
data_->Get(widget_keys::kWidgetKey, NULL))
ParseWGTI18n();
+ // FIXME: Sounds like a setter calling a getter for the same value.
SetSystemLocale(GetSystemLocale());
}
bool Manifest::ValidateManifest(
std::string* error,
std::vector<InstallWarning>* warnings) const {
- // TODO(changbin): field 'manifest_version' of manifest.json is not clearly
- // defined at present. Temporarily disable check of this field.
- /*
- *error = "";
- if (type_ == Manifest::TYPE_PACKAGED_APP && GetManifestVersion() < 2) {
- *error = errors::kPlatformAppNeedsManifestVersion2;
- return false;
- }
- */
-
// TODO(xiang): support features validation
return true;
}
return other && data_->Equals(other->value());
}
-int Manifest::GetManifestVersion() const {
- int manifest_version = 1;
- data_->GetInteger(keys::kManifestVersionKey, &manifest_version);
- return manifest_version;
-}
-
bool Manifest::CanAccessPath(const std::string& path) const {
return true;
}
bool ValidateManifest(std::string* error,
std::vector<InstallWarning>* warnings) const;
- // The version of this application's manifest. We increase the manifest
- // version when making breaking changes to the application system. If the
- // manifest contains no explicit manifest version, this returns the current
- // system default.
- int GetManifestVersion() const;
-
// Returns the manifest type.
Type GetType() const { return type_; }
// are temporally removed, since they had affected some tests and legacy apps.
TEST_F(CSPHandlerTest, DISABLED_NoCSP) {
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
EXPECT_EQ(GetCSPInfo(application)->GetDirectives().size(), 2);
TEST_F(CSPHandlerTest, EmptyCSP) {
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
manifest.SetString(keys::kCSPKey, "");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
TEST_F(CSPHandlerTest, CSP) {
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
manifest.SetString(keys::kCSPKey, "default-src 'self' ");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
#if defined(OS_TIZEN)
TEST_F(CSPHandlerTest, WGTEmptyCSP) {
manifest.SetString(widget_keys::kNameKey, "no name");
- manifest.SetString(widget_keys::kVersionKey, "0");
+ manifest.SetString(widget_keys::kXWalkVersionKey, "0");
manifest.SetString(widget_keys::kCSPKey, "");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
TEST_F(CSPHandlerTest, WGTCSP) {
manifest.SetString(widget_keys::kNameKey, "no name");
- manifest.SetString(widget_keys::kVersionKey, "0");
+ manifest.SetString(widget_keys::kXWalkVersionKey, "0");
manifest.SetString(widget_keys::kCSPKey, "default-src 'self' ");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
public:
virtual void SetUp() OVERRIDE {
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
}
scoped_refptr<ApplicationData> CreateApplication() {
TEST_F(PermissionsHandlerTest, NonePermission) {
base::DictionaryValue manifest;
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
std::string error;
scoped_refptr<ApplicationData> application = ApplicationData::Create(
base::FilePath(),
TEST_F(PermissionsHandlerTest, EmptyPermission) {
base::DictionaryValue manifest;
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
base::ListValue* permissions = new base::ListValue;
manifest.Set(keys::kPermissionsKey, permissions);
std::string error;
TEST_F(PermissionsHandlerTest, DeviceAPIPermission) {
base::DictionaryValue manifest;
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kVersionKey, "0");
+ manifest.SetString(keys::kXWalkVersionKey, "0");
base::ListValue* permissions = new base::ListValue;
permissions->AppendString("geolocation");
manifest.Set(keys::kPermissionsKey, permissions);
TEST_F(ManifestTest, ApplicationData) {
scoped_ptr<base::DictionaryValue> manifest_value(new base::DictionaryValue());
manifest_value->SetString(keys::kNameKey, "extension");
- manifest_value->SetString(keys::kVersionKey, "1");
+ manifest_value->SetString(keys::kXWalkVersionKey, "1");
manifest_value->SetString("unknown_key", "foo");
scoped_ptr<Manifest> manifest(
MutateManifest(
&manifest, "foo", new base::StringValue("blah"));
EXPECT_FALSE(manifest->Equals(manifest2.get()));
-};
+}
// Verifies that key restriction based on type works.
TEST_F(ManifestTest, ApplicationTypes) {
scoped_ptr<base::DictionaryValue> value(new base::DictionaryValue());
value->SetString(keys::kNameKey, "extension");
- value->SetString(keys::kVersionKey, "1");
+ value->SetString(keys::kXWalkVersionKey, "1");
scoped_ptr<Manifest> manifest(
new Manifest(Manifest::COMMAND_LINE, value.Pass()));
// Hosted app.
MutateManifest(
- &manifest, keys::kWebURLsKey, new base::ListValue());
- AssertType(manifest.get(), Manifest::TYPE_HOSTED_APP);
- MutateManifest(
- &manifest, keys::kWebURLsKey, NULL);
- MutateManifest(
&manifest, keys::kLaunchWebURLKey, new base::StringValue("foo"));
AssertType(manifest.get(), Manifest::TYPE_HOSTED_APP);
MutateManifest(
&manifest, keys::kLaunchWebURLKey, NULL);
-};
+}
} // namespace application
} // namespace xwalk
int xwalk_change_cmdline(int argc, char** argv, const char* app_id) {
// Change /proc/<pid>/cmdline to app exec path. See XWALK-1722 for details.
- char* app_id_for_db = strdup(
- xwalk::application::RawAppIdToAppIdForTizenPkgmgrDB(app_id).c_str());
pkgmgrinfo_appinfo_h handle;
char* exec_path = NULL;
- if (pkgmgrinfo_appinfo_get_appinfo(app_id_for_db, &handle) != PMINFO_R_OK ||
+ if (pkgmgrinfo_appinfo_get_appinfo(app_id, &handle) != PMINFO_R_OK ||
pkgmgrinfo_appinfo_get_exec(handle, &exec_path) != PMINFO_R_OK ||
!exec_path) {
- fprintf(stderr, "Couldn't find exec path for application: %s\n",
- app_id_for_db);
+ fprintf(stderr, "Couldn't find exec path for application: %s\n", app_id);
return -1;
}
for (int i = 0; i < argc; ++i)
memset(argv[i], 0, strlen(argv[i]));
strncpy(argv[0], exec_path, strlen(exec_path)+1);
- g_free(app_id_for_db);
pkgmgrinfo_appinfo_destroy_appinfo(handle);
return 0;
}
app_ids.at(i).c_str());
continue;
}
-#if defined(OS_TIZEN)
- g_print("%s %s\n",
- GetTizenAppId(app_data).c_str(),
- app_data->Name().c_str());
-#else
- g_print("%s %s\n", app_data->ID().c_str(), app_data->Name().c_str());
-#endif
+ g_print("%s %s\n", app_ids.at(i).c_str(), app_data->Name().c_str());
}
g_print("-----------------------------------------------------\n");
#if defined(SHARED_PROCESS_MODE)
TerminateIfRunning(uninstall_appid);
#endif
-#if defined(OS_TIZEN)
- std::string crosswalk_app_id =
- xwalk::application::RawAppIdToCrosswalkAppId(uninstall_appid);
- uninstall_appid = strdup(crosswalk_app_id.c_str());
-#endif
success = installer->Uninstall(uninstall_appid);
} else {
success = list_applications(storage.get());
// Useful values might be "valgrind" or "xterm -e gdb --args".
const char kXWalkExtensionCmdPrefix[] = "xwalk-extension-cmd-prefix";
-// Disable XWalkExtensionSystem and all extensions only for Android OS
+// Disable XWalkExtensionSystem and all extensions
const char kXWalkDisableExtensions[] = "disable-xwalk-extensions";
} // namespace switches
%endif
Name: crosswalk
-Version: 8.37.183.0
+Version: 8.37.186.0
Release: 0
Summary: Crosswalk is an app runtime based on Chromium
License: (BSD-3-Clause and LGPL-2.1+)
# version bump.
tar --update --file "${TAR_FILE}" \
--exclude-vcs --exclude=LayoutTests \
- --exclude=src/out --directory="${BASE_SRC_DIR}" \
+ --exclude=src/out --exclude=src/third_party/android_tools \
+ --directory="${BASE_SRC_DIR}" \
--transform="s:^:crosswalk/:S" \
src
private void onActivityStateChange(Activity activity, int newState) {
assert(getActivity() == activity);
switch (newState) {
+ case ActivityState.STARTED:
+ onShow();
+ break;
case ActivityState.PAUSED:
pauseTimers();
- onHide();
break;
case ActivityState.RESUMED:
- onShow();
resumeTimers();
break;
case ActivityState.DESTROYED:
onDestroy();
break;
+ case ActivityState.STOPPED:
+ onHide();
+ break;
default:
break;
}
render_view_host_ext_->SetOriginAccessWhitelist(url, match_patterns);
std::string csp;
- ManifestGetString(manifest, keys::kCSPKey, keys::kCSPKeyLegacy, &csp);
+ ManifestGetString(manifest, keys::kCSPKey, keys::kDeprecatedCSPKey, &csp);
RuntimeContext* runtime_context =
XWalkRunner::GetInstance()->runtime_context();
CHECK(runtime_context);
command_line->AppendSwitch(switches::kAllowFileAccessFromFiles);
// Enable SIMD.JS API by default.
- /*std::string js_flags("--simd_object");
+ std::string js_flags("--simd_object");
if (command_line->HasSwitch(switches::kJavaScriptFlags)) {
js_flags += " ";
js_flags +=
command_line->GetSwitchValueASCII(switches::kJavaScriptFlags);
}
- command_line->AppendSwitchASCII(switches::kJavaScriptFlags, js_flags);*/
+ command_line->AppendSwitchASCII(switches::kJavaScriptFlags, js_flags);
startup_url_ = GetURLFromCommandLine(*command_line);
}
runtime_context_.reset(new RuntimeContext);
app_extension_bridge_.reset(new XWalkAppExtensionBridge());
-#if defined(OS_ANDROID)
CommandLine* cmd_line = CommandLine::ForCurrentProcess();
if (!cmd_line->HasSwitch(switches::kXWalkDisableExtensions))
-#endif
- {
extension_service_.reset(new extensions::XWalkExtensionService(
app_extension_bridge_.get()));
- }
CreateComponents();
app_extension_bridge_->SetApplicationSystem(app_component_->app_system());
// These variables are used to export some values from the browser process
// side to the extension side, such as application IDs and whatnot.
- virtual void InitializeRuntimeVariablesForExtensions(
+ void InitializeRuntimeVariablesForExtensions(
const content::RenderProcessHost* host,
base::ValueMap* runtime_variables);
#endif
}
-void XWalkRunnerTizen::InitializeRuntimeVariablesForExtensions(
- const content::RenderProcessHost* host,
- base::ValueMap* variables) {
- application::Application* app = app_system()->application_service()->
- GetApplicationByRenderHostID(host->GetID());
-
- if (app) {
- (*variables)["app_id"] = base::Value::CreateStringValue(app->id());
- (*variables)["tizen_app_id"] = base::Value::CreateStringValue(
- application::RawAppIdToAppIdForTizenPkgmgrDB(
- application::GetTizenAppId(app->data())));
- }
-}
-
} // namespace xwalk
friend class XWalkRunner;
XWalkRunnerTizen();
- virtual void InitializeRuntimeVariablesForExtensions(
- const content::RenderProcessHost* host,
- base::ValueMap* runtime_variables) OVERRIDE;
-
TizenLocaleListener tizen_locale_listener_;
};
render_frame()->GetWebFrame(), context);
}
+#if defined(OS_TIZEN)
+ virtual void DidCommitProvisionalLoad(bool is_new_navigation) OVERRIDE {
+ blink::WebFrame* frame = render_frame()->GetWebFrame();
+ GURL url(frame->document().url());
+ if (url.SchemeIs(application::kApplicationScheme)) {
+ blink::WebSecurityOrigin origin = frame->document().securityOrigin();
+ origin.grantLoadLocalResources();
+ }
+ }
+#endif
+
private:
extensions::XWalkExtensionRendererController* extension_controller_;
}
void XWalkContentRendererClient::RenderThreadStarted() {
-#if defined(OS_ANDROID)
CommandLine* cmd_line = CommandLine::ForCurrentProcess();
if (!cmd_line->HasSwitch(switches::kXWalkDisableExtensions))
-#endif
- {
extension_controller_.reset(
new extensions::XWalkExtensionRendererController(this));
- }
blink::WebString application_scheme(
base::ASCIIToUTF16(application::kApplicationScheme));