// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
#define V8_HYDROGEN_INSTRUCTIONS_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "code-stubs.h"
-#include "data-flow.h"
-#include "deoptimizer.h"
-#include "small-pointer-list.h"
-#include "string-stream.h"
-#include "unique.h"
-#include "v8conversions.h"
-#include "v8utils.h"
-#include "zone.h"
+#include "src/allocation.h"
+#include "src/code-stubs.h"
+#include "src/conversions.h"
+#include "src/data-flow.h"
+#include "src/deoptimizer.h"
+#include "src/hydrogen-types.h"
+#include "src/small-pointer-list.h"
+#include "src/string-stream.h"
+#include "src/unique.h"
+#include "src/utils.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
+ V(AllocateBlockContext) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(OsrEntry) \
V(Parameter) \
V(Power) \
- V(PushArgument) \
+ V(PushArguments) \
V(RegExpLiteral) \
V(Return) \
V(Ror) \
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
+ V(StoreFrameContext) \
V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(WrapReceiver)
#define GVN_TRACKED_FLAG_LIST(V) \
- V(Maps) \
V(NewSpacePromotion)
#define GVN_UNTRACKED_FLAG_LIST(V) \
V(ElementsPointer) \
V(GlobalVars) \
V(InobjectFields) \
+ V(Maps) \
V(OsrEntries) \
V(ExternalMemory) \
V(StringChars) \
};
-class HType V8_FINAL {
- public:
- static HType None() { return HType(kNone); }
- static HType Tagged() { return HType(kTagged); }
- static HType TaggedPrimitive() { return HType(kTaggedPrimitive); }
- static HType TaggedNumber() { return HType(kTaggedNumber); }
- static HType Smi() { return HType(kSmi); }
- static HType HeapNumber() { return HType(kHeapNumber); }
- static HType Float32x4() { return HType(kFloat32x4); }
- static HType Int32x4() { return HType(kInt32x4); }
- static HType String() { return HType(kString); }
- static HType Boolean() { return HType(kBoolean); }
- static HType NonPrimitive() { return HType(kNonPrimitive); }
- static HType JSArray() { return HType(kJSArray); }
- static HType JSObject() { return HType(kJSObject); }
-
- // Return the weakest (least precise) common type.
- HType Combine(HType other) {
- return HType(static_cast<Type>(type_ & other.type_));
- }
-
- bool Equals(const HType& other) const {
- return type_ == other.type_;
- }
-
- bool IsSubtypeOf(const HType& other) {
- return Combine(other).Equals(other);
- }
-
- bool IsTaggedPrimitive() const {
- return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
- }
-
- bool IsTaggedNumber() const {
- return ((type_ & kTaggedNumber) == kTaggedNumber);
- }
-
- bool IsSmi() const {
- return ((type_ & kSmi) == kSmi);
- }
-
- bool IsHeapNumber() const {
- return ((type_ & kHeapNumber) == kHeapNumber);
- }
-
- bool IsFloat32x4() const {
- return ((type_ & kFloat32x4) == kFloat32x4);
- }
-
- bool IsInt32x4() const {
- return ((type_ & kInt32x4) == kInt32x4);
- }
-
- bool IsSIMD128() const {
- return IsFloat32x4() || IsInt32x4();
- }
-
- bool IsString() const {
- return ((type_ & kString) == kString);
- }
-
- bool IsNonString() const {
- return IsTaggedPrimitive() || IsSmi() || IsHeapNumber() || IsSIMD128() ||
- IsBoolean() || IsJSArray();
- }
-
- bool IsBoolean() const {
- return ((type_ & kBoolean) == kBoolean);
- }
-
- bool IsNonPrimitive() const {
- return ((type_ & kNonPrimitive) == kNonPrimitive);
- }
-
- bool IsJSArray() const {
- return ((type_ & kJSArray) == kJSArray);
- }
-
- bool IsJSObject() const {
- return ((type_ & kJSObject) == kJSObject);
- }
-
- bool IsHeapObject() const {
- return IsHeapNumber() || IsSIMD128() || IsString() ||
- IsBoolean() || IsNonPrimitive();
- }
-
- bool ToStringOrToNumberCanBeObserved(Representation representation) {
- switch (type_) {
- case kTaggedPrimitive: // fallthru
- case kTaggedNumber: // fallthru
- case kSmi: // fallthru
- case kHeapNumber: // fallthru
- case kFloat32x4: // fallthru
- case kInt32x4: // fallthru
- case kString: // fallthru
- case kBoolean:
- return false;
- case kJSArray: // fallthru
- case kJSObject:
- return true;
- case kTagged:
- break;
- }
- return !representation.IsSmiOrInteger32() && !representation.IsDouble();
- }
-
- static HType TypeFromValue(Handle<Object> value);
- static HType TypeFromRepresentation(Representation representation);
-
- const char* ToString();
-
- private:
- enum Type {
- kNone = 0x0, // 0000 0000 0000 0000
- kTagged = 0x1, // 0000 0000 0000 0001
- kTaggedPrimitive = 0x5, // 0000 0000 0000 0101
- kTaggedNumber = 0xd, // 0000 0000 0000 1101
- kSmi = 0x1d, // 0000 0000 0001 1101
- kHeapNumber = 0x2d, // 0000 0000 0010 1101
- kFloat32x4 = 0x45, // 0000 0000 0100 0101
- kInt32x4 = 0x85, // 0000 0000 1000 0101
- kString = 0x105, // 0000 0001 0000 0101
- kBoolean = 0x205, // 0000 0010 1000 0101
- kNonPrimitive = 0x401, // 0000 0100 0000 0001
- kJSObject = 0xc01, // 0000 1100 0000 0001
- kJSArray = 0x1c01 // 0001 1100 0000 0001
- };
-
- // Make sure type fits in int16.
- STATIC_ASSERT(kJSArray < (1 << (2 * kBitsPerByte)));
-
- explicit HType(Type t) : type_(t) { }
-
- int16_t type_;
-};
-
-
class HUseListNode: public ZoneObject {
public:
HUseListNode(HValue* value, int index, HUseListNode* tail)
// flag.
kUint32,
kHasNoObservableSideEffects,
+ // Indicates an instruction shouldn't be replaced by optimization, this flag
+ // is useful to set in cases where recomputing a value is cheaper than
+ // extending the value's live range and spilling it.
+ kCantBeReplaced,
// Indicates the instruction is live during dead code elimination.
kIsLive,
HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
#undef DECLARE_PREDICATE
+ bool IsBitwiseBinaryShift() {
+ return IsShl() || IsShr() || IsSar();
+ }
+
HValue(HType type = HType::Tagged())
: block_(NULL),
id_(kNoNumber),
type_(type),
use_list_(NULL),
range_(NULL),
+#ifdef DEBUG
+ range_poisoned_(false),
+#endif
flags_(0) {}
virtual ~HValue() {}
if (t.IsSmi()) return Representation::Smi();
if (t.IsHeapNumber()) return Representation::Double();
if (t.IsFloat32x4()) return Representation::Float32x4();
+ if (t.IsFloat64x2()) return Representation::Float64x2();
if (t.IsInt32x4()) return Representation::Int32x4();
if (t.IsHeapObject()) return r;
return Representation::None();
HType type() const { return type_; }
void set_type(HType new_type) {
- ASSERT(new_type.IsSubtypeOf(type_));
+ // TODO(ningxin): for SIMD ops, the initial type is None which
+ // hit the following ASSERT.
+ // ASSERT(new_type.IsSubtypeOf(type_));
type_ = new_type;
}
- bool IsHeapObject() {
- return representation_.IsHeapObject() || type_.IsHeapObject();
- }
-
// There are HInstructions that do not really change a value, they
// only add pieces of information to it (like bounds checks, map checks,
// smi checks...).
return result;
}
- Range* range() const { return range_; }
- // TODO(svenpanne) We should really use the null object pattern here.
- bool HasRange() const { return range_ != NULL; }
+ Range* range() const {
+ ASSERT(!range_poisoned_);
+ return range_;
+ }
+ bool HasRange() const {
+ ASSERT(!range_poisoned_);
+ return range_ != NULL;
+ }
+#ifdef DEBUG
+ void PoisonRange() { range_poisoned_ = true; }
+#endif
void AddNewRange(Range* r, Zone* zone);
void RemoveLastAddedRange();
void ComputeInitialRange(Zone* zone);
virtual void PrintTo(StringStream* stream) = 0;
void PrintNameTo(StringStream* stream);
void PrintTypeTo(StringStream* stream);
- void PrintRangeTo(StringStream* stream);
void PrintChangesTo(StringStream* stream);
const char* Mnemonic() const;
// Returns true conservatively if the program might be able to observe a
// ToString() operation on this value.
bool ToStringCanBeObserved() const {
- return type().ToStringOrToNumberCanBeObserved(representation());
+ return ToStringOrToNumberCanBeObserved();
}
// Returns true conservatively if the program might be able to observe a
// ToNumber() operation on this value.
bool ToNumberCanBeObserved() const {
- return type().ToStringOrToNumberCanBeObserved(representation());
+ return ToStringOrToNumberCanBeObserved();
}
MinusZeroMode GetMinusZeroMode() {
return false;
}
+ bool ToStringOrToNumberCanBeObserved() const {
+ if (type().IsTaggedPrimitive()) return false;
+ if (type().IsJSObject()) return true;
+ return !representation().IsSmiOrInteger32() && !representation().IsDouble();
+ }
+
virtual Representation RepresentationFromInputs() {
return representation();
}
- Representation RepresentationFromUses();
+ virtual Representation RepresentationFromUses();
Representation RepresentationFromUseRequirements();
bool HasNonSmiUse();
virtual void UpdateRepresentation(Representation new_rep,
HType type_;
HUseListNode* use_list_;
Range* range_;
+#ifdef DEBUG
+ bool range_poisoned_;
+#endif
int flags_;
GVNFlagSet changes_flags_;
GVNFlagSet depends_on_flags_;
return new(zone) I(p1, p2, p3, p4, p5); \
}
+#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4, \
+ P5 p5, \
+ P6 p6) { \
+ return new(zone) I(p1, p2, p3, p4, p5, p6); \
+ }
+
#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
static I* New(Zone* zone, HValue* context) { \
return new(zone) I(context); \
position_.set_operand_position(index, pos);
}
+ bool Dominates(HInstruction* other);
+ bool CanTruncateToSmi() const { return CheckFlag(kTruncatingToSmi); }
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
}
}
Unique<Map> map() const { return map_; }
+ bool map_is_stable() const { return map_is_stable_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
- known_successor_index_(kNoKnownSuccessorIndex), map_(Unique<Map>(map)) {
- ASSERT(!map.is_null());
+ known_successor_index_(kNoKnownSuccessorIndex),
+ map_is_stable_(map->is_stable()),
+ map_(Unique<Map>::CreateImmovable(map)) {
set_representation(Representation::Tagged());
}
- int known_successor_index_;
+ int known_successor_index_ : 31;
+ bool map_is_stable_ : 1;
Unique<Map> map_;
};
set_representation(to);
SetFlag(kUseGVN);
SetFlag(kCanOverflow);
- if (is_truncating_to_smi) {
+ if (is_truncating_to_smi && to.IsSmi()) {
SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
}
} else {
if (to.IsFloat32x4()) {
set_type(HType::Float32x4());
+ } else if (to.IsFloat64x2()) {
+ set_type(HType::Float64x2());
} else if (to.IsInt32x4()) {
set_type(HType::Int32x4());
} else {
public:
static HEnterInlined* New(Zone* zone,
HValue* context,
+ BailoutId return_id,
Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
InliningKind inlining_kind,
Variable* arguments_var,
HArgumentsObject* arguments_object) {
- return new(zone) HEnterInlined(closure, arguments_count, function,
- inlining_kind, arguments_var,
+ return new(zone) HEnterInlined(return_id, closure, arguments_count,
+ function, inlining_kind, arguments_var,
arguments_object, zone);
}
void set_arguments_pushed() { arguments_pushed_ = true; }
FunctionLiteral* function() const { return function_; }
InliningKind inlining_kind() const { return inlining_kind_; }
+ BailoutId ReturnId() const { return return_id_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
private:
- HEnterInlined(Handle<JSFunction> closure,
+ HEnterInlined(BailoutId return_id,
+ Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
InliningKind inlining_kind,
Variable* arguments_var,
HArgumentsObject* arguments_object,
Zone* zone)
- : closure_(closure),
+ : return_id_(return_id),
+ closure_(closure),
arguments_count_(arguments_count),
arguments_pushed_(false),
function_(function),
return_targets_(2, zone) {
}
+ BailoutId return_id_;
Handle<JSFunction> closure_;
int arguments_count_;
bool arguments_pushed_;
};
-class HPushArgument V8_FINAL : public HUnaryOperation {
+class HPushArguments V8_FINAL : public HInstruction {
public:
- DECLARE_INSTRUCTION_FACTORY_P1(HPushArgument, HValue*);
+ static HPushArguments* New(Zone* zone, HValue* context) {
+ return new(zone) HPushArguments(zone);
+ }
+ static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1) {
+ HPushArguments* instr = new(zone) HPushArguments(zone);
+ instr->AddInput(arg1);
+ return instr;
+ }
+ static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
+ HValue* arg2) {
+ HPushArguments* instr = new(zone) HPushArguments(zone);
+ instr->AddInput(arg1);
+ instr->AddInput(arg2);
+ return instr;
+ }
+ static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
+ HValue* arg2, HValue* arg3) {
+ HPushArguments* instr = new(zone) HPushArguments(zone);
+ instr->AddInput(arg1);
+ instr->AddInput(arg2);
+ instr->AddInput(arg3);
+ return instr;
+ }
+ static HPushArguments* New(Zone* zone, HValue* context, HValue* arg1,
+ HValue* arg2, HValue* arg3, HValue* arg4) {
+ HPushArguments* instr = new(zone) HPushArguments(zone);
+ instr->AddInput(arg1);
+ instr->AddInput(arg2);
+ instr->AddInput(arg3);
+ instr->AddInput(arg4);
+ return instr;
+ }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual int argument_delta() const V8_OVERRIDE { return 1; }
- HValue* argument() { return OperandAt(0); }
+ virtual int argument_delta() const V8_OVERRIDE { return inputs_.length(); }
+ HValue* argument(int i) { return OperandAt(i); }
- DECLARE_CONCRETE_INSTRUCTION(PushArgument)
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+ return inputs_[i];
+ }
+
+ void AddInput(HValue* value);
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArguments)
+
+ protected:
+ virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+ inputs_[i] = value;
+ }
private:
- explicit HPushArgument(HValue* value) : HUnaryOperation(value) {
+ explicit HPushArguments(Zone* zone)
+ : HInstruction(HType::Tagged()), inputs_(4, zone) {
set_representation(Representation::Tagged());
}
+
+ ZoneList<HValue*> inputs_;
};
HValue* target,
int argument_count,
const CallInterfaceDescriptor* descriptor,
- Vector<HValue*>& operands) {
+ const Vector<HValue*>& operands) {
ASSERT(operands.length() == descriptor->environment_length());
HCallWithDescriptor* res =
new(zone) HCallWithDescriptor(target, argument_count,
HCallWithDescriptor(HValue* target,
int argument_count,
const CallInterfaceDescriptor* descriptor,
- Vector<HValue*>& operands,
+ const Vector<HValue*>& operands,
Zone* zone)
: descriptor_(descriptor),
values_(descriptor->environment_length() + 1, zone) {
virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual Representation RepresentationFromUses() V8_OVERRIDE;
virtual Representation RepresentationFromInputs() V8_OVERRIDE;
BuiltinFunctionId op() const { return op_; }
}
private:
+ // Indicates if we support a double (and int32) output for Math.floor and
+ // Math.round.
+ bool SupportsFlexibleFloorAndRound() const {
+#ifdef V8_TARGET_ARCH_ARM64
+ return true;
+#else
+ return false;
+#endif
+ }
HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
: HTemplateInstruction<2>(HType::TaggedNumber()), op_(op) {
SetOperandAt(0, context);
switch (op) {
case kMathFloor:
case kMathRound:
+ if (SupportsFlexibleFloorAndRound()) {
+ SetFlag(kFlexibleRepresentation);
+ } else {
+ set_representation(Representation::Integer32());
+ }
+ break;
case kMathClz32:
set_representation(Representation::Integer32());
break;
class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
- Handle<Map> map, CompilationInfo* info,
- HValue* typecheck = NULL);
+ Handle<Map> map, HValue* typecheck = NULL) {
+ return new(zone) HCheckMaps(value, new(zone) UniqueSet<Map>(
+ Unique<Map>::CreateImmovable(map), zone), typecheck);
+ }
static HCheckMaps* New(Zone* zone, HValue* context,
- HValue* value, SmallMapList* maps,
+ HValue* value, SmallMapList* map_list,
HValue* typecheck = NULL) {
- HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
- for (int i = 0; i < maps->length(); i++) {
- check_map->Add(maps->at(i), zone);
+ UniqueSet<Map>* maps = new(zone) UniqueSet<Map>(map_list->length(), zone);
+ for (int i = 0; i < map_list->length(); ++i) {
+ maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone);
}
- return check_map;
+ return new(zone) HCheckMaps(value, maps, typecheck);
}
- bool CanOmitMapChecks() { return omit_; }
+ bool IsStabilityCheck() const { return is_stability_check_; }
+ void MarkAsStabilityCheck() {
+ maps_are_stable_ = true;
+ has_migration_target_ = false;
+ is_stability_check_ = true;
+ ClearChangesFlag(kNewSpacePromotion);
+ ClearDependsOnFlag(kElementsKind);
+ ClearDependsOnFlag(kMaps);
+ }
virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) V8_OVERRIDE;
+
+ virtual HType CalculateInferredType() V8_OVERRIDE {
+ if (value()->type().IsHeapObject()) return value()->type();
+ return HType::HeapObject();
+ }
+
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- HValue* value() { return OperandAt(0); }
- HValue* typecheck() { return OperandAt(1); }
+ HValue* value() const { return OperandAt(0); }
+ HValue* typecheck() const { return OperandAt(1); }
- Unique<Map> first_map() const { return map_set_.at(0); }
- UniqueSet<Map> map_set() const { return map_set_; }
+ const UniqueSet<Map>* maps() const { return maps_; }
+ void set_maps(const UniqueSet<Map>* maps) { maps_ = maps; }
- void set_map_set(UniqueSet<Map>* maps, Zone *zone) {
- map_set_.Clear();
- for (int i = 0; i < maps->size(); i++) {
- map_set_.Add(maps->at(i), zone);
- }
+ bool maps_are_stable() const { return maps_are_stable_; }
+
+ bool HasMigrationTarget() const { return has_migration_target_; }
+
+ virtual HValue* Canonicalize() V8_OVERRIDE;
+
+ static HCheckMaps* CreateAndInsertAfter(Zone* zone,
+ HValue* value,
+ Unique<Map> map,
+ bool map_is_stable,
+ HInstruction* instr) {
+ return instr->Append(new(zone) HCheckMaps(
+ value, new(zone) UniqueSet<Map>(map, zone), map_is_stable));
}
- bool has_migration_target() const {
- return has_migration_target_;
+ static HCheckMaps* CreateAndInsertBefore(Zone* zone,
+ HValue* value,
+ const UniqueSet<Map>* maps,
+ bool maps_are_stable,
+ HInstruction* instr) {
+ return instr->Prepend(new(zone) HCheckMaps(value, maps, maps_are_stable));
}
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- return this->map_set_.Equals(&HCheckMaps::cast(other)->map_set_);
+ return this->maps()->Equals(HCheckMaps::cast(other)->maps());
}
virtual int RedefinedOperandIndex() { return 0; }
private:
- void Add(Handle<Map> map, Zone* zone) {
- map_set_.Add(Unique<Map>(map), zone);
+ HCheckMaps(HValue* value, const UniqueSet<Map>* maps, bool maps_are_stable)
+ : HTemplateInstruction<2>(HType::HeapObject()), maps_(maps),
+ has_migration_target_(false), is_stability_check_(false),
+ maps_are_stable_(maps_are_stable) {
+ ASSERT_NE(0, maps->size());
+ SetOperandAt(0, value);
+ // Use the object value for the dependency.
+ SetOperandAt(1, value);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
SetDependsOnFlag(kMaps);
SetDependsOnFlag(kElementsKind);
-
- if (!has_migration_target_ && map->is_migration_target()) {
- has_migration_target_ = true;
- SetChangesFlag(kNewSpacePromotion);
- }
}
- // Clients should use one of the static New* methods above.
- HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
- : HTemplateInstruction<2>(value->type()),
- omit_(false), has_migration_target_(false) {
+ HCheckMaps(HValue* value, const UniqueSet<Map>* maps, HValue* typecheck)
+ : HTemplateInstruction<2>(HType::HeapObject()), maps_(maps),
+ has_migration_target_(false), is_stability_check_(false),
+ maps_are_stable_(true) {
+ ASSERT_NE(0, maps->size());
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
+ SetOperandAt(1, typecheck ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kTrackSideEffectDominators);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
+ for (int i = 0; i < maps->size(); ++i) {
+ Handle<Map> map = maps->at(i).handle();
+ if (map->is_migration_target()) has_migration_target_ = true;
+ if (!map->is_stable()) maps_are_stable_ = false;
+ }
+ if (has_migration_target_) SetChangesFlag(kNewSpacePromotion);
}
- bool omit_;
- bool has_migration_target_;
- UniqueSet<Map> map_set_;
+ const UniqueSet<Map>* maps_;
+ bool has_migration_target_ : 1;
+ bool is_stability_check_ : 1;
+ bool maps_are_stable_ : 1;
};
return Representation::Tagged();
}
+ virtual HType CalculateInferredType() V8_OVERRIDE {
+ switch (check_) {
+ case IS_SPEC_OBJECT: return HType::JSObject();
+ case IS_JS_ARRAY: return HType::JSArray();
+ case IS_STRING: return HType::String();
+ case IS_INTERNALIZED_STRING: return HType::String();
+ }
+ UNREACHABLE();
+ return HType::Tagged();
+ }
+
virtual HValue* Canonicalize() V8_OVERRIDE;
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
void GetCheckInterval(InstanceType* first, InstanceType* last);
void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag);
+ Check check() const { return check_; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType)
protected:
const char* GetCheckName();
HCheckInstanceType(HValue* value, Check check)
- : HUnaryOperation(value), check_(check) {
+ : HUnaryOperation(value, HType::HeapObject()), check_(check) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
return Representation::Tagged();
}
+ virtual HType CalculateInferredType() V8_OVERRIDE {
+ if (value()->type().IsHeapObject()) return value()->type();
+ return HType::HeapObject();
+ }
+
#ifdef DEBUG
virtual void Verify() V8_OVERRIDE;
#endif
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- explicit HCheckHeapObject(HValue* value)
- : HUnaryOperation(value, HType::NonPrimitive()) {
+ explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
set_representation(Representation::Tagged());
SetFlag(kIsArguments);
}
-
- virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
};
}
static HConstant* CreateAndInsertBefore(Zone* zone,
- Unique<Object> unique,
- bool is_not_in_new_space,
+ Unique<Map> map,
+ bool map_is_stable,
HInstruction* instruction) {
return instruction->Prepend(new(zone) HConstant(
- unique, Representation::Tagged(), HType::Tagged(),
- is_not_in_new_space, false, false, kUnknownInstanceType));
+ map, Unique<Map>(Handle<Map>::null()), map_is_stable,
+ Representation::Tagged(), HType::HeapObject(), true,
+ false, false, MAP_TYPE));
+ }
+
+ static HConstant* CreateAndInsertAfter(Zone* zone,
+ Unique<Map> map,
+ bool map_is_stable,
+ HInstruction* instruction) {
+ return instruction->Append(new(zone) HConstant(
+ map, Unique<Map>(Handle<Map>::null()), map_is_stable,
+ Representation::Tagged(), HType::HeapObject(), true,
+ false, false, MAP_TYPE));
}
Handle<Object> handle(Isolate* isolate) {
return object_.handle();
}
- bool HasMap(Handle<Map> map) {
- Handle<Object> constant_object = handle(map->GetIsolate());
- return constant_object->IsHeapObject() &&
- Handle<HeapObject>::cast(constant_object)->map() == *map;
- }
-
bool IsSpecialDouble() const {
return has_double_value_ &&
(BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
return instance_type_ == CELL_TYPE || instance_type_ == PROPERTY_CELL_TYPE;
}
+ bool IsMap() const {
+ return instance_type_ == MAP_TYPE;
+ }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
bool IsUndetectable() const { return is_undetectable_; }
InstanceType GetInstanceType() const { return instance_type_; }
+ bool HasMapValue() const { return instance_type_ == MAP_TYPE; }
+ Unique<Map> MapValue() const {
+ ASSERT(HasMapValue());
+ return Unique<Map>::cast(GetUnique());
+ }
+ bool HasStableMapValue() const {
+ ASSERT(HasMapValue() || !has_stable_map_value_);
+ return has_stable_map_value_;
+ }
+
+ bool HasObjectMap() const { return !object_map_.IsNull(); }
+ Unique<Map> ObjectMap() const {
+ ASSERT(HasObjectMap());
+ return object_map_;
+ }
+
virtual intptr_t Hashcode() V8_OVERRIDE {
if (has_int32_value_) {
return static_cast<intptr_t>(int32_value_);
Representation r = Representation::None(),
bool is_not_in_new_space = true,
Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
- HConstant(Unique<Object> unique,
+ HConstant(Unique<Object> object,
+ Unique<Map> object_map,
+ bool has_stable_map_value,
Representation r,
HType type,
bool is_not_in_new_space,
// constant HeapObject.
Unique<Object> object_;
+ // If object_ is a heap object, this points to the stable map of the object.
+ Unique<Map> object_map_;
+
+ // If object_ is a map, this indicates whether the map is stable.
+ bool has_stable_map_value_ : 1;
+
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
// int32_value_ and double_value_ hold valid, safe representations
bool RightIsPowerOf2() {
if (!right()->IsInteger32Constant()) return false;
int32_t value = right()->GetInteger32Constant();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ return IsPowerOf2(value) || IsPowerOf2(-value);
}
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
class HBitwiseBinaryOperation : public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
- HType type = HType::Tagged())
+ HType type = HType::TaggedNumber())
: HBinaryOperation(context, left, right, type) {
SetFlag(kFlexibleRepresentation);
SetFlag(kTruncatingToInt32);
virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return observed_input_representation_[index];
}
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
void SetOperandPositions(Zone* zone,
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ static const int kNoKnownSuccessorIndex = -1;
+ int known_successor_index() const { return known_successor_index_; }
+ void set_known_successor_index(int known_successor_index) {
+ known_successor_index_ = known_successor_index;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
protected:
HIsStringAndBranch(HValue* value,
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {}
+ : HUnaryControlInstruction(value, true_target, false_target),
+ known_successor_index_(kNoKnownSuccessorIndex) { }
+
+ int known_successor_index_;
};
Token::Value op,
HValue* left,
HValue* right)
- : HBitwiseBinaryOperation(context, left, right, HType::TaggedNumber()),
+ : HBitwiseBinaryOperation(context, left, right),
op_(op) {
ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
// BIT_AND with a smi-range positive value will always unset the
HValue* context() { return OperandAt(0); }
HValue* size() { return OperandAt(1); }
+ bool has_size_upper_bound() { return size_upper_bound_ != NULL; }
+ HConstant* size_upper_bound() { return size_upper_bound_; }
+ void set_size_upper_bound(HConstant* value) {
+ ASSERT(size_upper_bound_ == NULL);
+ size_upper_bound_ = value;
+ }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
: HTemplateInstruction<2>(type),
flags_(ComputeFlags(pretenure_flag, instance_type)),
dominating_allocate_(NULL),
- filler_free_space_size_(NULL) {
+ filler_free_space_size_(NULL),
+ size_upper_bound_(NULL) {
SetOperandAt(0, context);
- SetOperandAt(1, size);
+ UpdateSize(size);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
SetChangesFlag(kNewSpacePromotion);
void UpdateSize(HValue* size) {
SetOperandAt(1, size);
+ if (size->IsInteger32Constant()) {
+ size_upper_bound_ = HConstant::cast(size);
+ } else {
+ size_upper_bound_ = NULL;
+ }
}
HAllocate* GetFoldableDominator(HAllocate* dominator);
Handle<Map> known_initial_map_;
HAllocate* dominating_allocate_;
HStoreNamedField* filler_free_space_size_;
+ HConstant* size_upper_bound_;
};
HValue* context,
HValue* value,
HValue* offset,
- HType type = HType::Tagged()) {
+ HType type) {
return new(zone) HInnerAllocatedObject(value, offset, type);
}
private:
HInnerAllocatedObject(HValue* value,
HValue* offset,
- HType type = HType::Tagged())
- : HTemplateInstruction<2>(type) {
+ HType type) : HTemplateInstruction<2>(type) {
ASSERT(value->IsAllocate());
+ ASSERT(type.IsHeapObject());
SetOperandAt(0, value);
SetOperandAt(1, offset);
- set_type(type);
set_representation(Representation::Tagged());
}
};
inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsBoolean()
- && !value->type().IsSmi()
+ return !value->type().IsSmi()
+ && !value->type().IsNull()
+ && !value->type().IsBoolean()
+ && !value->type().IsUndefined()
&& !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
}
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* value,
- HValue* new_space_dominator) {
+ HValue* dominator) {
while (object->IsInnerAllocatedObject()) {
object = HInnerAllocatedObject::cast(object)->base_object();
}
// Stores to external references require no write barriers
return false;
}
- if (object != new_space_dominator) return true;
- if (object->IsAllocate()) {
- // Stores to new space allocations require no write barriers if the object
- // is the new space dominator.
+ // We definitely need a write barrier unless the object is the allocation
+ // dominator.
+ if (object == dominator && object->IsAllocate()) {
+ // Stores to new space allocations require no write barriers.
if (HAllocate::cast(object)->IsNewSpaceAllocation()) {
return false;
}
- // Likewise we don't need a write barrier if we store a value that
- // originates from the same allocation (via allocation folding).
+ // Stores to old space allocations require no write barriers if the value is
+ // a constant provably not in new space.
+ if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) {
+ return false;
+ }
+ // Stores to old space allocations require no write barriers if the value is
+ // an old space allocation.
while (value->IsInnerAllocatedObject()) {
value = HInnerAllocatedObject::cast(value)->base_object();
}
- return object != value;
+ if (value->IsAllocate() &&
+ !HAllocate::cast(value)->IsNewSpaceAllocation()) {
+ return false;
+ }
}
return true;
}
+inline PointersToHereCheck PointersToHereCheckForObject(HValue* object,
+ HValue* dominator) {
+ while (object->IsInnerAllocatedObject()) {
+ object = HInnerAllocatedObject::cast(object)->base_object();
+ }
+ if (object == dominator &&
+ object->IsAllocate() &&
+ HAllocate::cast(object)->IsNewSpaceAllocation()) {
+ return kPointersToHereAreAlwaysInteresting;
+ }
+ return kPointersToHereMaybeInteresting;
+}
+
+
class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
kCheckReturnUndefined
};
- HLoadContextSlot(HValue* context, Variable* var)
- : HUnaryOperation(context), slot_index_(var->index()) {
- ASSERT(var->IsContextSlot());
- switch (var->mode()) {
- case LET:
- case CONST:
- mode_ = kCheckDeoptimize;
- break;
- case CONST_LEGACY:
- mode_ = kCheckReturnUndefined;
- break;
- default:
- mode_ = kNoCheck;
- }
+ HLoadContextSlot(HValue* context, int slot_index, Mode mode)
+ : HUnaryOperation(context), slot_index_(slot_index), mode_(mode) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetDependsOnFlag(kContextSlots);
return portion() == kStringLengths;
}
+ inline bool IsMap() const {
+ return portion() == kMaps;
+ }
+
inline int offset() const {
return OffsetField::decode(value_);
}
Representation::Integer32());
}
+ static HObjectAccess ForSIMD128Double0() {
+ return HObjectAccess(
+ kDouble, Float32x4::kValueOffset, Representation::Double());
+ }
+
+ static HObjectAccess ForSIMD128Double1() {
+ return HObjectAccess(kDouble,
+ Float32x4::kValueOffset + kDoubleSize,
+ Representation::Double());
+ }
+
static HObjectAccess ForElementsPointer() {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
return HObjectAccess(kMaps, JSObject::kMapOffset);
}
- static HObjectAccess ForMapInstanceSize() {
+ static HObjectAccess ForMapAsInteger32() {
+ return HObjectAccess(kMaps, JSObject::kMapOffset,
+ Representation::Integer32());
+ }
+
+ static HObjectAccess ForMapInObjectProperties() {
return HObjectAccess(kInobject,
- Map::kInstanceSizeOffset,
+ Map::kInObjectPropertiesOffset,
Representation::UInteger8());
}
Representation::UInteger8());
}
+ static HObjectAccess ForMapInstanceSize() {
+ return HObjectAccess(kInobject,
+ Map::kInstanceSizeOffset,
+ Representation::UInteger8());
+ }
+
+ static HObjectAccess ForMapBitField() {
+ return HObjectAccess(kInobject,
+ Map::kBitFieldOffset,
+ Representation::UInteger8());
+ }
+
+ static HObjectAccess ForMapBitField2() {
+ return HObjectAccess(kInobject,
+ Map::kBitField2Offset,
+ Representation::UInteger8());
+ }
+
+ static HObjectAccess ForNameHashField() {
+ return HObjectAccess(kInobject,
+ Name::kHashFieldOffset,
+ Representation::Integer32());
+ }
+
+ static HObjectAccess ForMapInstanceTypeAndBitField() {
+ STATIC_ASSERT((Map::kInstanceTypeOffset & 1) == 0);
+ STATIC_ASSERT(Map::kBitFieldOffset == Map::kInstanceTypeOffset + 1);
+ return HObjectAccess(kInobject,
+ Map::kInstanceTypeOffset,
+ Representation::UInteger16());
+ }
+
static HObjectAccess ForMapPrototype() {
return HObjectAccess(kInobject, Map::kPrototypeOffset);
}
JSArrayBuffer::kBackingStoreOffset, Representation::External());
}
+ static HObjectAccess ForJSArrayBufferByteLength() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ JSArrayBuffer::kByteLengthOffset, Representation::Tagged());
+ }
+
static HObjectAccess ForExternalArrayExternalPointer() {
return HObjectAccess::ForObservableJSObjectOffset(
ExternalArray::kExternalPointerOffset, Representation::External());
class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*, HValue*,
- HObjectAccess);
+ DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*,
+ HValue*, HObjectAccess);
+ DECLARE_INSTRUCTION_FACTORY_P5(HLoadNamedField, HValue*, HValue*,
+ HObjectAccess, const UniqueSet<Map>*, HType);
HValue* object() { return OperandAt(0); }
HValue* dependency() {
return access_.representation();
}
+ const UniqueSet<Map>* maps() const { return maps_; }
+
virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
return !access().IsInobject() || access().offset() >= size;
virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ bool CanBeReplacedWith(HValue* other) const {
+ if (!CheckFlag(HValue::kCantBeReplaced)) return false;
+ if (!type().Equals(other->type())) return false;
+ if (!representation().Equals(other->representation())) return false;
+ if (!other->IsLoadNamedField()) return true;
+ HLoadNamedField* that = HLoadNamedField::cast(other);
+ if (this->maps_ == that->maps_) return true;
+ if (this->maps_ == NULL || that->maps_ == NULL) return false;
+ return this->maps_->IsSubset(that->maps_);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- HLoadNamedField* b = HLoadNamedField::cast(other);
- return access_.Equals(b->access_);
+ HLoadNamedField* that = HLoadNamedField::cast(other);
+ if (!this->access_.Equals(that->access_)) return false;
+ if (this->maps_ == that->maps_) return true;
+ return (this->maps_ != NULL &&
+ that->maps_ != NULL &&
+ this->maps_->Equals(that->maps_));
}
private:
HLoadNamedField(HValue* object,
HValue* dependency,
- HObjectAccess access) : access_(access) {
- ASSERT(object != NULL);
+ HObjectAccess access)
+ : access_(access), maps_(NULL) {
+ ASSERT_NOT_NULL(object);
SetOperandAt(0, object);
- SetOperandAt(1, dependency != NULL ? dependency : object);
+ SetOperandAt(1, dependency ? dependency : object);
Representation representation = access.representation();
if (representation.IsInteger8() ||
representation.IsInteger32()) {
set_representation(representation);
} else if (representation.IsHeapObject()) {
- set_type(HType::NonPrimitive());
+ set_type(HType::HeapObject());
set_representation(Representation::Tagged());
} else {
set_representation(Representation::Tagged());
access.SetGVNFlags(this, LOAD);
}
+ HLoadNamedField(HValue* object,
+ HValue* dependency,
+ HObjectAccess access,
+ const UniqueSet<Map>* maps,
+ HType type)
+ : HTemplateInstruction<2>(type), access_(access), maps_(maps) {
+ ASSERT_NOT_NULL(maps);
+ ASSERT_NE(0, maps->size());
+
+ ASSERT_NOT_NULL(object);
+ SetOperandAt(0, object);
+ SetOperandAt(1, dependency ? dependency : object);
+
+ ASSERT(access.representation().IsHeapObject());
+ ASSERT(type.IsHeapObject());
+ set_representation(Representation::Tagged());
+
+ access.SetGVNFlags(this, LOAD);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
HObjectAccess access_;
+ const UniqueSet<Map>* maps_;
};
public:
virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
- virtual void SetIndexOffset(uint32_t index_offset) = 0;
- virtual int MaxIndexOffsetBits() = 0;
+ virtual ElementsKind elements_kind() const = 0;
+ virtual void IncreaseBaseOffset(uint32_t base_offset) = 0;
+ virtual int MaxBaseOffsetBits() = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
- virtual ~ArrayInstructionInterface() { };
+ virtual ~ArrayInstructionInterface() { }
static Representation KeyedAccessIndexRequirement(Representation r) {
return r.IsInteger32() || SmiValuesAre32Bits()
};
+static const int kDefaultKeyedHeaderOffsetSentinel = -1;
+
enum LoadKeyedHoleMode {
NEVER_RETURN_HOLE,
ALLOW_RETURN_HOLE
ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*,
ElementsKind, LoadKeyedHoleMode);
+ DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, LoadKeyedHoleMode, int);
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
return OperandAt(2);
}
bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
- uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
- void SetIndexOffset(uint32_t index_offset) {
- bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
- }
- virtual int MaxIndexOffsetBits() {
- return kBitsForIndexOffset;
+ uint32_t base_offset() { return BaseOffsetField::decode(bit_field_); }
+ void IncreaseBaseOffset(uint32_t base_offset) {
+ // The base offset is usually simply the size of the array header, except
+ // with dehoisting adds an addition offset due to a array index key
+ // manipulation, in which case it becomes (array header size +
+ // constant-offset-from-key * kPointerSize)
+ base_offset += BaseOffsetField::decode(bit_field_);
+ bit_field_ = BaseOffsetField::update(bit_field_, base_offset);
+ }
+ virtual int MaxBaseOffsetBits() {
+ return kBitsForBaseOffset;
}
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
void SetDehoisted(bool is_dehoisted) {
bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
}
- ElementsKind elements_kind() const {
+ virtual ElementsKind elements_kind() const V8_OVERRIDE {
return ElementsKindField::decode(bit_field_);
}
LoadKeyedHoleMode hole_mode() const {
if (!other->IsLoadKeyed()) return false;
HLoadKeyed* other_load = HLoadKeyed::cast(other);
- if (IsDehoisted() && index_offset() != other_load->index_offset())
+ if (IsDehoisted() && base_offset() != other_load->base_offset())
return false;
return elements_kind() == other_load->elements_kind();
}
HValue* key,
HValue* dependency,
ElementsKind elements_kind,
- LoadKeyedHoleMode mode = NEVER_RETURN_HOLE)
+ LoadKeyedHoleMode mode = NEVER_RETURN_HOLE,
+ int offset = kDefaultKeyedHeaderOffsetSentinel)
: bit_field_(0) {
+ offset = offset == kDefaultKeyedHeaderOffsetSentinel
+ ? GetDefaultHeaderSizeForElementsKind(elements_kind)
+ : offset;
bit_field_ = ElementsKindField::encode(elements_kind) |
- HoleModeField::encode(mode);
+ HoleModeField::encode(mode) |
+ BaseOffsetField::encode(offset);
SetOperandAt(0, obj);
SetOperandAt(1, key);
elements_kind == FLOAT64_ELEMENTS) {
set_representation(Representation::Double());
} else if (IsFloat32x4ElementsKind(elements_kind)) {
- set_representation(CPU::SupportsSIMD128InCrankshaft() ?
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
Representation::Float32x4() : Representation::Tagged());
+ } else if (IsFloat64x2ElementsKind(elements_kind)) {
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float64x2() : Representation::Tagged());
} else if (IsInt32x4ElementsKind(elements_kind)) {
- set_representation(CPU::SupportsSIMD128InCrankshaft() ?
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
Representation::Int32x4() : Representation::Tagged());
} else {
set_representation(Representation::Integer32());
enum LoadKeyedBits {
kBitsForElementsKind = 5,
kBitsForHoleMode = 1,
- kBitsForIndexOffset = 25,
+ kBitsForBaseOffset = 25,
kBitsForIsDehoisted = 1,
kStartElementsKind = 0,
kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
- kStartIndexOffset = kStartHoleMode + kBitsForHoleMode,
- kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
+ kStartBaseOffset = kStartHoleMode + kBitsForHoleMode,
+ kStartIsDehoisted = kStartBaseOffset + kBitsForBaseOffset
};
- STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset +
+ STATIC_ASSERT((kBitsForElementsKind + kBitsForBaseOffset +
kBitsForIsDehoisted) <= sizeof(uint32_t)*8);
STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
class ElementsKindField:
class HoleModeField:
public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
{}; // NOLINT
- class IndexOffsetField:
- public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
+ class BaseOffsetField:
+ public BitField<uint32_t, kStartBaseOffset, kBitsForBaseOffset>
{}; // NOLINT
class IsDehoistedField:
public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
HValue* dominator) V8_OVERRIDE {
ASSERT(side_effect == kNewSpacePromotion);
if (!FLAG_use_write_barrier_elimination) return false;
- new_space_dominator_ = dominator;
+ dominator_ = dominator;
return false;
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- void SkipWriteBarrier() { write_barrier_mode_ = SKIP_WRITE_BARRIER; }
- bool IsSkipWriteBarrier() const {
- return write_barrier_mode_ == SKIP_WRITE_BARRIER;
- }
-
HValue* object() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
HValue* transition() const { return OperandAt(2); }
HObjectAccess access() const { return access_; }
- HValue* new_space_dominator() const { return new_space_dominator_; }
+ HValue* dominator() const { return dominator_; }
bool has_transition() const { return has_transition_; }
StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
}
}
- void SetTransition(HConstant* map_constant, CompilationInfo* info) {
+ void SetTransition(HConstant* transition) {
ASSERT(!has_transition()); // Only set once.
- Handle<Map> map = Handle<Map>::cast(map_constant->handle(info->isolate()));
- if (map->CanBeDeprecated()) {
- map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info);
- }
- SetOperandAt(2, map_constant);
+ SetOperandAt(2, transition);
has_transition_ = true;
+ SetChangesFlag(kMaps);
}
bool NeedsWriteBarrier() {
ASSERT(!field_representation().IsDouble() || !has_transition());
- if (IsSkipWriteBarrier()) return false;
if (field_representation().IsDouble()) return false;
if (field_representation().IsSmi()) return false;
if (field_representation().IsInteger32()) return false;
if (field_representation().IsExternal()) return false;
return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(object(), value(),
- new_space_dominator());
+ ReceiverObjectNeedsWriteBarrier(object(), value(), dominator());
}
bool NeedsWriteBarrierForMap() {
- if (IsSkipWriteBarrier()) return false;
return ReceiverObjectNeedsWriteBarrier(object(), transition(),
- new_space_dominator());
+ dominator());
+ }
+
+ SmiCheck SmiCheckForWriteBarrier() const {
+ if (field_representation().IsHeapObject()) return OMIT_SMI_CHECK;
+ if (value()->type().IsHeapObject()) return OMIT_SMI_CHECK;
+ return INLINE_SMI_CHECK;
+ }
+
+ PointersToHereCheck PointersToHereCheckForValue() const {
+ return PointersToHereCheckForObject(value(), dominator());
}
Representation field_representation() const {
SetOperandAt(1, value);
}
+ bool CanBeReplacedWith(HStoreNamedField* that) const {
+ if (!this->access().Equals(that->access())) return false;
+ if (SmiValuesAre32Bits() &&
+ this->field_representation().IsSmi() &&
+ this->store_mode() == INITIALIZING_STORE &&
+ that->store_mode() == STORE_TO_INITIALIZED_ENTRY) {
+ // We cannot replace an initializing store to a smi field with a store to
+ // an initialized entry on 64-bit architectures (with 32-bit smis).
+ return false;
+ }
+ return true;
+ }
+
private:
HStoreNamedField(HValue* obj,
HObjectAccess access,
HValue* val,
StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
: access_(access),
- new_space_dominator_(NULL),
- write_barrier_mode_(UPDATE_WRITE_BARRIER),
+ dominator_(NULL),
has_transition_(false),
store_mode_(store_mode) {
// Stores to a non existing in-object property are allowed only to the
}
HObjectAccess access_;
- HValue* new_space_dominator_;
- WriteBarrierMode write_barrier_mode_ : 1;
+ HValue* dominator_;
bool has_transition_ : 1;
StoreFieldOrKeyedMode store_mode_ : 1;
};
ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
ElementsKind, StoreFieldOrKeyedMode);
+ DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, StoreFieldOrKeyedMode, int);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// kind_fast: tagged[int32] = tagged
}
ASSERT_EQ(index, 2);
+ return RequiredValueRepresentation(elements_kind_, store_mode_);
+ }
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
+ static Representation RequiredValueRepresentation(
+ ElementsKind kind, StoreFieldOrKeyedMode mode) {
+ if (IsDoubleOrFloatElementsKind(kind)) {
return Representation::Double();
}
- if (IsFloat32x4ElementsKind(elements_kind())) {
- return CPU::SupportsSIMD128InCrankshaft() ?
+
+ if (kind == FAST_SMI_ELEMENTS && SmiValuesAre32Bits() &&
+ mode == STORE_TO_INITIALIZED_ENTRY) {
+ return Representation::Integer32();
+ }
+
+ if (IsFloat32x4ElementsKind(kind)) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
Representation::Float32x4() : Representation::Tagged();
}
- if (IsInt32x4ElementsKind(elements_kind())) {
- return CPU::SupportsSIMD128InCrankshaft() ?
- Representation::Int32x4() : Representation::Tagged();
+ if (IsFloat64x2ElementsKind(kind)) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float64x2() : Representation::Tagged();
}
- if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
- return Representation::Integer32();
+ if (IsInt32x4ElementsKind(kind)) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Int32x4() : Representation::Tagged();
}
- if (IsFastSmiElementsKind(elements_kind())) {
+
+ if (IsFastSmiElementsKind(kind)) {
return Representation::Smi();
}
- return is_external() || is_fixed_typed_array()
- ? Representation::Integer32()
- : Representation::Tagged();
+ return IsExternalArrayElementsKind(kind) ||
+ IsFixedTypedArrayElementsKind(kind)
+ ? Representation::Integer32()
+ : Representation::Tagged();
}
bool is_external() const {
if (IsUninitialized()) {
return Representation::None();
}
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
- return Representation::Double();
- }
- if (IsFloat32x4ElementsKind(elements_kind())) {
- return CPU::SupportsSIMD128InCrankshaft() ?
- Representation::Float32x4() : Representation::Tagged();
- }
- if (IsInt32x4ElementsKind(elements_kind())) {
- return CPU::SupportsSIMD128InCrankshaft() ?
- Representation::Int32x4() : Representation::Tagged();
- }
- if (SmiValuesAre32Bits() && store_mode_ == STORE_TO_INITIALIZED_ENTRY) {
- return Representation::Integer32();
- }
- if (IsFastSmiElementsKind(elements_kind())) {
- return Representation::Smi();
- }
- if (is_typed_elements()) {
- return Representation::Integer32();
- }
+ Representation r = RequiredValueRepresentation(elements_kind_, store_mode_);
// For fast object elements kinds, don't assume anything.
- return Representation::None();
+ if (r.IsTagged()) return Representation::None();
+ return r;
}
- HValue* elements() { return OperandAt(0); }
- HValue* key() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
+ HValue* elements() const { return OperandAt(0); }
+ HValue* key() const { return OperandAt(1); }
+ HValue* value() const { return OperandAt(2); }
bool value_is_smi() const {
return IsFastSmiElementsKind(elements_kind_);
}
StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- virtual int MaxIndexOffsetBits() {
+ uint32_t base_offset() { return base_offset_; }
+ void IncreaseBaseOffset(uint32_t base_offset) {
+ // The base offset is usually simply the size of the array header, except
+ // with dehoisting adds an addition offset due to a array index key
+ // manipulation, in which case it becomes (array header size +
+ // constant-offset-from-key * kPointerSize)
+ base_offset_ += base_offset;
+ }
+ virtual int MaxBaseOffsetBits() {
return 31 - ElementsKindToShiftSize(elements_kind_);
}
HValue* GetKey() { return key(); }
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
ASSERT(side_effect == kNewSpacePromotion);
- new_space_dominator_ = dominator;
+ dominator_ = dominator;
return false;
}
- HValue* new_space_dominator() const { return new_space_dominator_; }
+ HValue* dominator() const { return dominator_; }
bool NeedsWriteBarrier() {
if (value_is_smi()) {
return false;
} else {
return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(elements(), value(),
- new_space_dominator());
+ ReceiverObjectNeedsWriteBarrier(elements(), value(), dominator());
}
}
+ PointersToHereCheck PointersToHereCheckForValue() const {
+ return PointersToHereCheckForObject(value(), dominator());
+ }
+
bool NeedsCanonicalization();
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
private:
HStoreKeyed(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind,
- StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
+ StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE,
+ int offset = kDefaultKeyedHeaderOffsetSentinel)
: elements_kind_(elements_kind),
- index_offset_(0),
+ base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel
+ ? GetDefaultHeaderSizeForElementsKind(elements_kind)
+ : offset),
is_dehoisted_(false),
is_uninitialized_(false),
store_mode_(store_mode),
- new_space_dominator_(NULL) {
+ dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
- ASSERT(store_mode != STORE_TO_INITIALIZED_ENTRY ||
- elements_kind == FAST_SMI_ELEMENTS);
-
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
SetDependsOnFlag(kNewSpacePromotion);
}
ElementsKind elements_kind_;
- uint32_t index_offset_;
+ uint32_t base_offset_;
bool is_dehoisted_ : 1;
bool is_uninitialized_ : 1;
StoreFieldOrKeyedMode store_mode_: 1;
- HValue* new_space_dominator_;
+ HValue* dominator_;
};
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual HType CalculateInferredType() V8_OVERRIDE {
- return HType::Tagged();
+ if (value()->type().IsHeapObject()) return value()->type();
+ return HType::HeapObject();
}
- HValue* value() { return OperandAt(0); }
- HValue* map() { return OperandAt(1); }
+ HValue* value() const { return OperandAt(0); }
+ HValue* map() const { return OperandAt(1); }
+
+ virtual HValue* Canonicalize() V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
}
private:
- HCheckMapValue(HValue* value,
- HValue* map) {
+ HCheckMapValue(HValue* value, HValue* map)
+ : HTemplateInstruction<2>(HType::HeapObject()) {
SetOperandAt(0, value);
SetOperandAt(1, map);
set_representation(Representation::Tagged());
class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
public:
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadFieldByIndex, HValue*, HValue*);
+
HLoadFieldByIndex(HValue* object,
HValue* index) {
SetOperandAt(0, object);
SetOperandAt(1, index);
+ SetChangesFlag(kNewSpacePromotion);
set_representation(Representation::Tagged());
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ if (index == 1) {
+ return Representation::Smi();
+ } else {
+ return Representation::Tagged();
+ }
}
HValue* object() { return OperandAt(0); }
};
+class HStoreFrameContext: public HUnaryOperation {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HStoreFrameContext, HValue*);
+
+ HValue* context() { return OperandAt(0); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext)
+ private:
+ explicit HStoreFrameContext(HValue* context)
+ : HUnaryOperation(context) {
+ set_representation(Representation::Tagged());
+ SetChangesFlag(kContextSlots);
+ }
+};
+
+
+class HAllocateBlockContext: public HTemplateInstruction<2> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P3(HAllocateBlockContext, HValue*,
+ HValue*, Handle<ScopeInfo>);
+ HValue* context() { return OperandAt(0); }
+ HValue* function() { return OperandAt(1); }
+ Handle<ScopeInfo> scope_info() { return scope_info_; }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext)
+
+ private:
+ HAllocateBlockContext(HValue* context,
+ HValue* function,
+ Handle<ScopeInfo> scope_info)
+ : scope_info_(scope_info) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, function);
+ set_representation(Representation::Tagged());
+ }
+
+ Handle<ScopeInfo> scope_info_;
+};
+
+
class HNullarySIMDOperation V8_FINAL : public HTemplateInstruction<1> {
public:
static HInstruction* New(Zone* zone,
#define SIMD_NULLARY_OPERATION_CASE_ITEM(p1, p2, name, representation) \
case k##name: \
set_representation(Representation::representation()); \
- set_type(HType::TypeFromRepresentation(representation_)); \
+ set_type(HType::FromRepresentation(representation_)); \
break;
SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
#undef SIMD_NULLARY_OPERATION_CASE_ITEM
switch (op) {
case kSIMD128Change:
set_representation(to);
- set_type(HType::TypeFromRepresentation(to));
+ set_type(HType::FromRepresentation(to));
break;
#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5) \
case k##name: \
set_representation(Representation::representation()); \
- set_type(HType::TypeFromRepresentation(representation_)); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
break;
SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5, p6) \
case k##name: \
set_representation(Representation::representation()); \
- set_type(HType::TypeFromRepresentation(representation_)); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
break;
SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
#undef SIMD_BINARY_OPERATION_CASE_ITEM
p6, p7) \
case k##name: \
set_representation(Representation::representation()); \
- set_type(HType::TypeFromRepresentation(representation_)); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32() || \
+ Representation::p7().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
break;
SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
#undef SIMD_TERNARY_OPERATION_CASE_ITEM
p6, p7, p8) \
case k##name: \
set_representation(Representation::representation()); \
- set_type(HType::TypeFromRepresentation(representation_)); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32() || \
+ Representation::p7().IsInteger32() || \
+ Representation::p8().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
break;
SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM