}
-void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
- if (FLAG_trace_deopt) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::POSITION, raw_position);
- RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
- }
-}
-
-
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
// Use --code-comments to enable.
void RecordComment(const char* msg);
- // Record a deoptimization reason that can be used by a log or cpu profiler.
- // Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
-
// Record the emission of a constant pool.
//
// The emission of constant pool depends on the size of the code generated and
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
}
Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
+ DeoptimizeIf(condition, instr, detail, bailout_type);
}
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ b(&done);
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
}
__ bind(&done);
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, "division by zero");
}
__ Move(result_reg, left_reg);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, "minus zero");
}
__ bind(&done);
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, "lost precision");
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ TruncatingDiv(result, dividend, Abs(divisor));
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, "lost precision");
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
__ bind(&positive);
}
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, "overflow");
}
if (CpuFeatures::IsSupported(SUDIV)) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, "lost precision");
}
}
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
return;
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
__ bind(&positive);
}
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, "overflow");
}
if (CpuFeatures::IsSupported(SUDIV)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ rsb(result, left, Operand::Zero());
}
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, "minus zero");
}
__ mov(result, Operand::Zero());
break;
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, "overflow");
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
__ bind(&done);
}
}
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(mi, instr, "negative value");
} else {
__ mov(result, Operand(left, LSR, scratch));
}
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ne, instr, "negative value");
}
__ Move(result, left);
}
} else {
__ SmiTag(result, left, SetCC);
}
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
}
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
}
DCHECK(!scratch.is(object));
__ SmiTst(object);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, "Smi");
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
+ DeoptimizeIf(ne, instr, "not a date object");
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, "Smi");
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(al, instr, "unexpected object");
}
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
}
}
Register payload = ToRegister(instr->temp());
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
}
// Store the value.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
} else {
__ b(ne, &skip_assignment);
}
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(cs, instr, "negative value");
}
break;
case FLOAT32_ELEMENTS:
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(ne, instr, "not a Smi");
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
}
}
}
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, "Smi");
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(lt, instr, "not a JavaScript object");
__ b(&result_in_receiver);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(hi, instr, "too many arguments");
// Push the receiver and use the register to keep the original
// number of arguments.
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, "not a heap number");
Label done;
Register exponent = scratch0();
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, "lost precision or NaN");
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, "minus zero");
}
__ bind(&done);
}
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
// [-0.5, -0].
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, "minus zero");
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, "lost precision or NaN");
__ bind(&done);
}
__ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, "memento found");
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, "overflow");
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ SmiTag(output, input);
}
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(cs, instr, "not a Smi");
} else {
__ SmiUntag(result, input);
}
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, "not a heap number");
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined");
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false");
__ mov(input_reg, Operand::Zero());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, "not a heap number");
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, "minus zero");
}
}
__ bind(&done);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, "minus zero");
__ bind(&done);
}
}
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, "minus zero");
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(ne, instr, "not a Smi");
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, "Smi");
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, "wrong instance type");
} else {
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lo, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(hi, instr, "wrong instance type");
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type");
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, "wrong instance type");
}
}
}
} else {
__ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, "value mismatch");
}
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(eq, instr, "instance migration failed");
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, "wrong map");
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined");
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kUndefined);
+ DeoptimizeIf(eq, instr, "undefined");
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
- DeoptimizeIf(eq, instr, Deoptimizer::kNull);
+ DeoptimizeIf(eq, instr, "null");
__ SmiTst(r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(le, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, "wrong map");
__ bind(&use_cache);
}
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(eq, instr, "no cache");
__ bind(&done);
}
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, "wrong map");
}
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
- Deoptimizer::BailoutType bailout_type);
+ const char* detail, Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ const char* detail);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
}
-void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
- if (FLAG_trace_deopt) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::POSITION, raw_position);
- RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
- }
-}
-
-
int Assembler::buffer_space() const {
return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
}
// Debugging ----------------------------------------------------------------
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
void RecordComment(const char* msg);
-
- // Record a deoptimization reason that can be used by a log or cpu profiler.
- // Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
-
int buffer_space() const;
// Mark address of the ExitJSFrame code.
void LCodeGen::DeoptimizeBranch(
- LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
- BranchType branch_type, Register reg, int bit,
- Deoptimizer::BailoutType* override_bailout_type) {
+ LInstruction* instr, const char* detail, BranchType branch_type,
+ Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
}
Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
}
-void LCodeGen::Deoptimize(LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
Deoptimizer::BailoutType* override_bailout_type) {
- DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
- override_bailout_type);
+ DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
}
void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_zero, rt);
}
void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_not_zero, rt);
}
void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ const char* detail) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
- DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
+ DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
}
void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
+ const char* detail) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
}
void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
+ const char* detail) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
}
void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ LInstruction* instr, const char* detail) {
__ CompareRoot(rt, index);
- DeoptimizeIf(eq, instr, deopt_reason);
+ DeoptimizeIf(eq, instr, detail);
}
void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ LInstruction* instr, const char* detail) {
__ CompareRoot(rt, index);
- DeoptimizeIf(ne, instr, deopt_reason);
+ DeoptimizeIf(ne, instr, detail);
}
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ const char* detail) {
__ TestForMinusZero(input);
- DeoptimizeIf(vs, instr, deopt_reason);
+ DeoptimizeIf(vs, instr, detail);
}
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber);
+ DeoptimizeIf(ne, instr, "not heap number");
}
void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
}
void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
+ const char* detail) {
+ DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
}
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, right);
}
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, right);
}
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(hi, instr, "too many arguments");
// Push the receiver and use the register to keep the original
// number of arguments.
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cond, instr, "out of bounds");
}
}
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(value, instr, "Smi");
}
Register map = NoReg;
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr, Deoptimizer::kUnexpectedObject);
+ Deoptimize(instr, "unexpected object");
}
}
}
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIfSmi(temp, instr, "instance migration failed");
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, "wrong map");
}
__ Bind(&success);
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
}
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(value, instr, "not a Smi");
}
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, "wrong instance type");
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lo, instr, "wrong instance type");
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(hi, instr, "wrong instance type");
}
} else {
uint8_t mask;
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
- Deoptimizer::kWrongInstanceType);
+ "wrong instance type");
} else {
DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
- Deoptimizer::kWrongInstanceType);
+ "wrong instance type");
}
} else {
if (tag == 0) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, "wrong instance type");
}
}
}
// Check for undefined. Undefined is coverted to zero for clamping conversion.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefined);
+ "not a heap number/undefined");
__ Mov(result, 0);
__ B(&done);
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, "value mismatch");
}
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
- DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(object, instr, "Smi");
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
+ DeoptimizeIf(ne, instr, "not a date object");
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(dividend, instr, "division by zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, "lost precision");
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(dividend, instr, "minus zero");
}
__ TruncatingDiv(result, dividend, Abs(divisor));
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIfNotZero(temp, instr, "lost precision");
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, "division by zero");
}
// Check for (0 / -x) as that will produce negative zero.
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Check for (kMinInt / -1).
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, "overflow");
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIfNotZero(remainder, instr, "lost precision");
}
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(input, instr, "minus zero");
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->tag_result()) {
__ SmiTag(result.X());
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
+ DeoptimizeIfZero(result, instr, "no cache");
__ Bind(&done);
}
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
- DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kUndefined);
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
- DeoptimizeIf(eq, instr, Deoptimizer::kNull);
+ DeoptimizeIf(eq, instr, "null");
- DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(object, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(le, instr, "not a JavaScript object");
Label use_cache, call_runtime;
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
- Deoptimizer::kWrongMap);
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
__ Bind(&use_cache);
}
__ Ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
}
}
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ne, instr, "negative value");
}
break;
case FLOAT32_ELEMENTS:
Register scratch = ToRegister(instr->temp());
__ Fmov(scratch, result);
__ Eor(scratch, scratch, kHoleNanInt64);
- DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
+ DeoptimizeIfZero(scratch, instr, "hole");
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(result, instr, "not a Smi");
} else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
}
}
}
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
}
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(input, instr, "minus zero");
}
__ Fcvtms(result, input);
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
}
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
}
return;
}
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(dividend, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, "division by zero");
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, "overflow");
}
// Check for (0 / -x) that will produce negative zero.
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
Label done;
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(hi, instr, "overflow");
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(result, instr, "minus zero");
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(vs, instr, "NaN");
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ B(&done);
}
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, "division by zero");
return;
}
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(dividend, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, "division by zero");
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(dividend, instr, "minus zero");
}
__ Bind(&done);
}
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(left, instr, "minus zero");
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(left, instr, "minus zero");
}
}
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Neg(result, left);
}
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Add(result, left, left);
}
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(lt, instr, "overflow");
}
if (right >= 0) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, "minus zero");
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, "overflow");
} else {
__ Mul(result, left, right);
}
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, "minus zero");
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, "overflow");
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(result, instr, "minus zero");
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefined);
+ "not a heap number/undefined");
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
+ DeoptimizeIfNegative(input.W(), instr, "overflow");
}
__ SmiTag(output, input);
}
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(input, instr, "not a Smi");
}
__ Bind(&untag);
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(result, instr, "negative value");
}
break;
default: UNREACHABLE();
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(left, instr, "negative value");
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(result, instr, "negative value");
}
break;
default: UNREACHABLE();
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(left, instr, "negative value");
}
__ Mov(result, left);
} else {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
if (instr->hydrogen()->RequiresHoleCheck()) {
Register payload = ToRegister(instr->temp2());
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
}
// Store the value.
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Sub(result, left, right);
}
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, "overflow");
} else {
__ Sub(result, left, right);
}
// Output contains zero, undefined is converted to zero for truncating
// conversions.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ "not a heap number/undefined/true/false");
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(scratch1, instr, "minus zero");
}
}
__ Bind(&done);
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, "memento found");
__ Bind(&no_memento_found);
}
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, "wrong map");
}
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(receiver, instr, "Smi");
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, ©_receiver);
- Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
+ Deoptimize(instr, "not a JavaScript object");
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
Register temp,
LOperand* index,
String::Encoding encoding);
- void DeoptimizeBranch(LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ void DeoptimizeBranch(LInstruction* instr, const char* detail,
BranchType branch_type, Register reg = NoReg,
int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void Deoptimize(LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
+ void Deoptimize(LInstruction* instr, const char* detail,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void DeoptimizeIf(Condition cond, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
- void DeoptimizeIfZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ void DeoptimizeIf(Condition cond, LInstruction* instr, const char* detail);
+ void DeoptimizeIfZero(Register rt, LInstruction* instr, const char* detail);
void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ const char* detail);
void DeoptimizeIfNegative(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
- void DeoptimizeIfSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
- void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ const char* detail);
+ void DeoptimizeIfSmi(Register rt, LInstruction* instr, const char* detail);
+ void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, const char* detail);
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ LInstruction* instr, const char* detail);
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ LInstruction* instr, const char* detail);
void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ const char* detail);
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ const char* detail);
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ const char* detail);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
// position: 01
// statement_position: 10
// comment: 11 (not used in short_data_record)
-// deopt_reason: 11 (not used in long_data_record)
//
// Long record format:
// 4-bit middle_tag:
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
-// Reuse the same value for deopt reason tag in short record format.
-// It is possible because we use kCommentTag only for the long record format.
-const int kDeoptReasonTag = 3;
-
const int kPoolExtraTag = kPCJumpExtraTag - 2;
const int kConstPoolTag = 0;
const int kVeneerPoolTag = 1;
WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
}
last_id_ = static_cast<int>(rinfo->data());
- } else if (rmode == RelocInfo::DEOPT_REASON) {
- DCHECK(rinfo->data() < (1 << kSmallDataBits));
- WriteTaggedPC(pc_delta, kLocatableTag);
- WriteTaggedData(rinfo->data(), kDeoptReasonTag);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for position.
DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
}
-inline void RelocIterator::ReadTaggedData() {
- uint8_t unsigned_b = *pos_;
- rinfo_.data_ = unsigned_b >> kTagBits;
-}
-
-
static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
DCHECK(tag == kNonstatementPositionTag ||
tag == kStatementPositionTag);
ReadTaggedId();
return;
}
- } else if (locatable_tag == kDeoptReasonTag) {
- ReadTaggedData();
- if (SetMode(RelocInfo::DEOPT_REASON)) return;
} else {
+ // Compact encoding is never used for comments,
+ // so it must be a position.
DCHECK(locatable_tag == kNonstatementPositionTag ||
locatable_tag == kStatementPositionTag);
if (mode_mask_ & RelocInfo::kPositionMask) {
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
return "internal reference";
- case RelocInfo::DEOPT_REASON:
- return "deopt reason";
case RelocInfo::CONST_POOL:
return "constant pool";
case RelocInfo::VENEER_POOL:
os << static_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (IsComment(rmode_)) {
os << " (" << reinterpret_cast<char*>(data_) << ")";
- } else if (rmode_ == DEOPT_REASON) {
- os << " (" << Deoptimizer::GetDeoptReason(
- static_cast<Deoptimizer::DeoptReason>(data_)) << ")";
} else if (rmode_ == EMBEDDED_OBJECT) {
os << " (" << Brief(target_object()) << ")";
} else if (rmode_ == EXTERNAL_REFERENCE) {
case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
- case DEOPT_REASON:
case CONST_POOL:
case VENEER_POOL:
case DEBUG_BREAK_SLOT:
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- DEBUG_BREAK, // Code target for the debugger statement.
+ DEBUG_BREAK, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
RUNTIME_ENTRY,
JS_RETURN, // Marks start of the ExitJSFrame code.
COMMENT,
- DEOPT_REASON, // Deoptimization reason index.
- POSITION, // See comment for kNoPosition above.
+ POSITION, // See comment for kNoPosition above.
STATEMENT_POSITION, // See comment for kNoPosition above.
- DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
+ DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// add more as needed
// Pseudo-types
- NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
- NONE32, // never recorded 32-bit value
- NONE64, // never recorded 64-bit value
+ NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
+ NONE32, // never recorded 32-bit value
+ NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
int GetLocatableTypeTag();
void ReadTaggedId();
void ReadTaggedPosition();
- void ReadTaggedData();
// If the given mode is wanted, set it in rinfo_ and return true.
// Else return false. Used for efficiently skipping unwanted modes.
IfBuilder builder(this);
builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
builder.Then();
- builder.ElseDeopt(Deoptimizer::kForcedDeoptToRuntime);
+ builder.ElseDeopt("Forced deopt to runtime");
return undefined;
}
if_fixed_cow.End();
zero_capacity.End();
- checker.ElseDeopt(Deoptimizer::kUninitializedBoilerplateLiterals);
+ checker.ElseDeopt("Uninitialized boilerplate literals");
checker.End();
return environment()->Pop();
}
environment()->Push(object);
- checker.ElseDeopt(Deoptimizer::kUninitializedBoilerplateInFastClone);
+ checker.ElseDeopt("Uninitialized boilerplate in fast clone");
checker.End();
return environment()->Pop();
result = Add<HLoadKeyed>(backing_store, key, nullptr, FAST_HOLEY_ELEMENTS,
NEVER_RETURN_HOLE);
}
- in_unmapped_range.ElseDeopt(Deoptimizer::kOutsideOfRange);
+ in_unmapped_range.ElseDeopt("Outside of range");
in_unmapped_range.End();
return result;
}
IfBuilder positive_smi(this);
positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
Token::LT);
- positive_smi.ThenDeopt(Deoptimizer::kKeyIsNegative);
+ positive_smi.ThenDeopt("key is negative");
positive_smi.End();
HValue* constant_two = Add<HConstant>(2);
Add<HLoadNamedField>(global, nullptr, HObjectAccess::ForMap());
IfBuilder map_check(this);
map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt(Deoptimizer::kUnknownMap);
+ map_check.ThenDeopt("Unknown map");
map_check.End();
}
IfBuilder builder(this);
builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
builder.Then();
- builder.ElseDeopt(
- Deoptimizer::kUnexpectedCellContentsInConstantGlobalStore);
+ builder.ElseDeopt("Unexpected cell contents in constant global store");
builder.End();
} else {
// Load the payload of the global parameter cell. A hole indicates that the
HValue* hole_value = graph()->GetConstantHole();
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
- builder.Deopt(Deoptimizer::kUnexpectedCellContentsInGlobalStore);
+ builder.Deopt("Unexpected cell contents in global store");
builder.Else();
HStoreNamedField* store = Add<HStoreNamedField>(cell, access, value);
store->MarkReceiverAsCell();
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
- Add<HDeoptimize>(Deoptimizer::kTracingElementsTransitions,
- Deoptimizer::EAGER);
+ Add<HDeoptimize>("Tracing elements transitions", Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();
BuildElementsKindLimitCheck(&kind_if, bit_field2,
SLOPPY_ARGUMENTS_ELEMENTS);
// Non-strict elements are not handled.
- Add<HDeoptimize>(Deoptimizer::kNonStrictElementsInKeyedLoadGenericStub,
+ Add<HDeoptimize>("non-strict elements in KeyedLoadGenericStub",
Deoptimizer::EAGER);
Push(graph()->GetConstant0());
BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
EXTERNAL_UINT8_CLAMPED_ELEMENTS);
- kind_if.ElseDeopt(
- Deoptimizer::kElementsKindUnhandledInKeyedLoadGenericStub);
+ kind_if.ElseDeopt("ElementsKind unhandled in KeyedLoadGenericStub");
kind_if.End();
}
v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
}
-
-const char* Deoptimizer::GetDeoptReason(DeoptReason deopt_reason) {
- DCHECK(deopt_reason < kLastDeoptReason);
-#define DEOPT_MESSAGES_TEXTS(C, T) T,
- static const char* deopt_messages_[] = {
- DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_TEXTS)};
-#undef DEOPT_MESSAGES_TEXTS
- return deopt_messages_[deopt_reason];
-}
} } // namespace v8::internal
};
-#define DEOPT_MESSAGES_LIST(V) \
- V(kNoReason, "no reason") \
- V(kConstantGlobalVariableAssignment, "Constant global variable assignment") \
- V(kConversionOverflow, "conversion overflow") \
- V(kDivisionByZero, "division by zero") \
- V(kElementsKindUnhandledInKeyedLoadGenericStub, \
- "ElementsKind unhandled in KeyedLoadGenericStub") \
- V(kExpectedHeapNumber, "Expected heap number") \
- V(kExpectedSmi, "Expected smi") \
- V(kForcedDeoptToRuntime, "Forced deopt to runtime") \
- V(kHole, "hole") \
- V(kHoleyArrayDespitePackedElements_kindFeedback, \
- "Holey array despite packed elements_kind feedback") \
- V(kInstanceMigrationFailed, "instance migration failed") \
- V(kInsufficientTypeFeedbackForCallWithArguments, \
- "Insufficient type feedback for call with arguments") \
- V(kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
- "Insufficient type feedback for combined type of binary operation") \
- V(kInsufficientTypeFeedbackForGenericNamedAccess, \
- "Insufficient type feedback for generic named access") \
- V(kInsufficientTypeFeedbackForKeyedLoad, \
- "Insufficient type feedback for keyed load") \
- V(kInsufficientTypeFeedbackForKeyedStore, \
- "Insufficient type feedback for keyed store") \
- V(kInsufficientTypeFeedbackForLHSOfBinaryOperation, \
- "Insufficient type feedback for LHS of binary operation") \
- V(kInsufficientTypeFeedbackForRHSOfBinaryOperation, \
- "Insufficient type feedback for RHS of binary operation") \
- V(kKeyIsNegative, "key is negative") \
- V(kLostPrecision, "lost precision") \
- V(kLostPrecisionOrNaN, "lost precision or NaN") \
- V(kMementoFound, "memento found") \
- V(kMinusZero, "minus zero") \
- V(kNaN, "NaN") \
- V(kNegativeKeyEncountered, "Negative key encountered") \
- V(kNegativeValue, "negative value") \
- V(kNoCache, "no cache") \
- V(kNonStrictElementsInKeyedLoadGenericStub, \
- "non-strict elements in KeyedLoadGenericStub") \
- V(kNotADateObject, "not a date object") \
- V(kNotAHeapNumber, "not a heap number") \
- V(kNotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
- V(kNotAHeapNumberUndefined, "not a heap number/undefined") \
- V(kNotAJavaScriptObject, "not a JavaScript object") \
- V(kNotASmi, "not a Smi") \
- V(kNotHeapNumber, "not heap number") \
- V(kNull, "null") \
- V(kOutOfBounds, "out of bounds") \
- V(kOutsideOfRange, "Outside of range") \
- V(kOverflow, "overflow") \
- V(kReceiverWasAGlobalObject, "receiver was a global object") \
- V(kSmi, "Smi") \
- V(kTooManyArguments, "too many arguments") \
- V(kTooManyUndetectableTypes, "Too many undetectable types") \
- V(kTracingElementsTransitions, "Tracing elements transitions") \
- V(kTypeMismatchBetweenFeedbackAndConstant, \
- "Type mismatch between feedback and constant") \
- V(kUndefined, "undefined") \
- V(kUnexpectedCellContentsInConstantGlobalStore, \
- "Unexpected cell contents in constant global store") \
- V(kUnexpectedCellContentsInGlobalStore, \
- "Unexpected cell contents in global store") \
- V(kUnexpectedObject, "unexpected object") \
- V(kUnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
- V(kUninitializedBoilerplateInFastClone, \
- "Uninitialized boilerplate in fast clone") \
- V(kUninitializedBoilerplateLiterals, "Uninitialized boilerplate literals") \
- V(kUnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
- V(kUnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
- V(kUnknownMapInPolymorphicElementAccess, \
- "Unknown map in polymorphic element access") \
- V(kUnknownMap, "Unknown map") \
- V(kValueMismatch, "value mismatch") \
- V(kWrongInstanceType, "wrong instance type") \
- V(kWrongMap, "wrong map")
-
-
class Deoptimizer : public Malloced {
public:
enum BailoutType {
kBailoutTypesWithCodeEntry = SOFT + 1
};
-#define DEOPT_MESSAGES_CONSTANTS(C, T) C,
- enum DeoptReason {
- DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_CONSTANTS) kLastDeoptReason
- };
-#undef DEOPT_MESSAGES_CONSTANTS
-
- static const char* GetDeoptReason(DeoptReason deopt_reason);
-
struct Reason {
- Reason(int r, const char* m, DeoptReason d)
- : raw_position(r), mnemonic(m), deopt_reason(d) {}
+ Reason(int r, const char* m, const char* d)
+ : raw_position(r), mnemonic(m), detail(d) {}
bool operator==(const Reason& other) const {
return raw_position == other.raw_position &&
CStringEquals(mnemonic, other.mnemonic) &&
- deopt_reason == other.deopt_reason;
+ CStringEquals(detail, other.detail);
}
bool operator!=(const Reason& other) const { return !(*this == other); }
int raw_position;
const char* mnemonic;
- DeoptReason deopt_reason;
+ const char* detail;
};
struct JumpTableEntry : public ZoneObject {
} else {
out.AddFormatted(" ;; debug: position %d", relocinfo.data());
}
- } else if (rmode == RelocInfo::DEOPT_REASON) {
- Deoptimizer::DeoptReason reason =
- static_cast<Deoptimizer::DeoptReason>(relocinfo.data());
- out.AddFormatted(" ;; debug: deopt reason '%s'",
- Deoptimizer::GetDeoptReason(reason));
} else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
class HDeoptimize FINAL : public HTemplateControlInstruction<1, 0> {
public:
static HDeoptimize* New(Isolate* isolate, Zone* zone, HValue* context,
- Deoptimizer::DeoptReason reason,
- Deoptimizer::BailoutType type,
+ const char* reason, Deoptimizer::BailoutType type,
HBasicBlock* unreachable_continuation) {
return new(zone) HDeoptimize(reason, type, unreachable_continuation);
}
return Representation::None();
}
- Deoptimizer::DeoptReason reason() const { return reason_; }
+ const char* reason() const { return reason_; }
Deoptimizer::BailoutType type() { return type_; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
private:
- explicit HDeoptimize(Deoptimizer::DeoptReason reason,
+ explicit HDeoptimize(const char* reason,
Deoptimizer::BailoutType type,
HBasicBlock* unreachable_continuation)
: reason_(reason), type_(type) {
SetSuccessorAt(0, unreachable_continuation);
}
- Deoptimizer::DeoptReason reason_;
+ const char* reason_;
Deoptimizer::BailoutType type_;
};
}
-void HGraphBuilder::IfBuilder::Deopt(Deoptimizer::DeoptReason reason) {
+void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
DCHECK(did_then_);
builder()->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
AddMergeAtJoinBlock(true);
}
-void HGraphBuilder::FinishExitWithHardDeoptimization(
- Deoptimizer::DeoptReason reason) {
+void HGraphBuilder::FinishExitWithHardDeoptimization(const char* reason) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
FinishExitCurrentBlock(New<HAbnormalExit>());
}
if_global_object.If<HCompareNumericAndBranch>(instance_type,
min_global_type,
Token::GTE);
- if_global_object.ThenDeopt(Deoptimizer::kReceiverWasAGlobalObject);
+ if_global_object.ThenDeopt("receiver was a global object");
if_global_object.End();
}
if_objectissmi.Else();
{
if (type->Is(Type::SignedSmall())) {
- if_objectissmi.Deopt(Deoptimizer::kExpectedSmi);
+ if_objectissmi.Deopt("Expected smi");
} else {
// Check if the object is a heap number.
IfBuilder if_objectisnumber(this);
if_objectisnumber.Else();
{
if (type->Is(Type::Number())) {
- if_objectisnumber.Deopt(Deoptimizer::kExpectedHeapNumber);
+ if_objectisnumber.Deopt("Expected heap number");
}
}
if_objectisnumber.JoinContinuation(&found);
negative_checker.Then();
HInstruction* result = AddElementAccess(
backing_store, key, val, bounds_check, elements_kind, access_type);
- negative_checker.ElseDeopt(Deoptimizer::kNegativeKeyEncountered);
+ negative_checker.ElseDeopt("Negative key encountered");
negative_checker.End();
length_checker.End();
return result;
if (!(top_info()->IsStub()) &&
IsFastPackedElementsKind(array_builder->kind())) {
// We'll come back later with better (holey) feedback.
- if_builder.Deopt(
- Deoptimizer::kHoleyArrayDespitePackedElements_kindFeedback);
+ if_builder.Deopt("Holey array despite packed elements_kind feedback");
} else {
Push(checked_length); // capacity
Push(checked_length); // length
Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
IfBuilder map_check(this);
map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt(Deoptimizer::kUnknownMap);
+ map_check.ThenDeopt("Unknown map");
map_check.End();
} else {
DCHECK(map_embedding == kEmbedMapsDirectly);
Add<HCheckMaps>(value, type->Classes().Current());
}
} else {
- if_nil.Deopt(Deoptimizer::kTooManyUndetectableTypes);
+ if_nil.Deopt("Too many undetectable types");
}
}
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization(
- Deoptimizer::kUnknownMapInPolymorphicAccess);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic access");
} else {
HInstruction* instr = BuildNamedGeneric(access_type, expr, object, name,
value);
if (value->IsConstant()) {
HConstant* c_value = HConstant::cast(value);
if (!constant.is_identical_to(c_value->handle(isolate()))) {
- Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
+ Add<HDeoptimize>("Constant global variable assignment",
Deoptimizer::EAGER);
}
} else {
}
builder.Then();
builder.Else();
- Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
+ Add<HDeoptimize>("Constant global variable assignment",
Deoptimizer::EAGER);
builder.End();
}
HValue* value,
bool is_uninitialized) {
if (is_uninitialized) {
- Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForGenericNamedAccess,
- Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for generic named access",
+ Deoptimizer::SOFT);
}
if (access_type == LOAD) {
HLoadNamedGeneric* result = New<HLoadNamedGeneric>(object, name);
DCHECK(join->predecessors()->length() > 0);
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization(
- Deoptimizer::kUnknownMapInPolymorphicElementAccess);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
set_current_block(join);
return access_type == STORE ? val : Pop();
}
if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
- Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedStore,
+ Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
- Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedLoad,
+ Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
}
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization(Deoptimizer::kUnknownMapInPolymorphicCall);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic call");
} else {
Property* prop = expr->expression()->AsProperty();
HInstruction* function = BuildNamedGeneric(
if (CanBeFunctionApplyArguments(expr) && expr->is_uninitialized()) {
// We have to use EAGER deoptimization here because Deoptimizer::SOFT
// gets ignored by the always-opt flag, which leads to incorrect code.
- Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForCallWithArguments,
- Deoptimizer::EAGER);
+ Add<HDeoptimize>("Insufficient type feedback for call with arguments",
+ Deoptimizer::EAGER);
arguments_flag = ARGUMENTS_FAKED;
}
right_type->Maybe(Type::Receiver()));
if (!left_type->IsInhabited()) {
- Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
- Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
+ Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous
// defaults.
left_type = Type::Any(zone());
}
if (!right_type->IsInhabited()) {
- Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
- Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
+ Deoptimizer::SOFT);
right_type = Type::Any(zone());
} else {
if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
IfBuilder if_same(this);
if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
if_same.Then();
- if_same.ElseDeopt(Deoptimizer::kUnexpectedRHSOfBinaryOperation);
+ if_same.ElseDeopt("Unexpected RHS of binary operation");
right = fixed_right;
}
instr = AddUncasted<HMod>(left, right);
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (!combined_type->IsInhabited()) {
- Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
- Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for combined type "
+ "of binary operation",
+ Deoptimizer::SOFT);
combined_type = left_type = right_type = Type::Any(zone());
}
HConstant::cast(left)->HasNumberValue()) ||
(right->IsConstant() &&
HConstant::cast(right)->HasNumberValue())) {
- Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
+ Add<HDeoptimize>("Type mismatch between feedback and constant",
Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
!HConstant::cast(left)->HasInternalizedStringValue()) ||
(right->IsConstant() &&
!HConstant::cast(right)->HasInternalizedStringValue())) {
- Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
+ Add<HDeoptimize>("Type mismatch between feedback and constant",
Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
HValue* EnforceNumberType(HValue* number, Type* expected);
HValue* TruncateToNumber(HValue* value, Type** expected);
- void FinishExitWithHardDeoptimization(Deoptimizer::DeoptReason reason);
+ void FinishExitWithHardDeoptimization(const char* reason);
void AddIncrementCounter(StatsCounter* counter);
void Else();
void End();
- void Deopt(Deoptimizer::DeoptReason reason);
- void ThenDeopt(Deoptimizer::DeoptReason reason) {
+ void Deopt(const char* reason);
+ void ThenDeopt(const char* reason) {
Then();
Deopt(reason);
}
- void ElseDeopt(Deoptimizer::DeoptReason reason) {
+ void ElseDeopt(const char* reason) {
Else();
Deopt(reason);
}
};
-template <>
+template<>
inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
- Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
+ const char* reason, Deoptimizer::BailoutType type) {
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_requested()->Increment();
if (FLAG_always_opt) return NULL;
}
-template <>
+template<>
inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
- Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
+ const char* reason, Deoptimizer::BailoutType type) {
return Add<HDeoptimize>(reason, type);
}
}
-void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
- if (FLAG_trace_deopt) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::POSITION, raw_position);
- RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
- }
-}
-
-
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
// write a comment.
void RecordComment(const char* msg, bool force = false);
- // Record a deoptimization reason that can be used by a log or cpu profiler.
- // Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
-
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
}
Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
DeoptComment(reason);
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
+ DeoptimizeIf(cc, instr, detail, bailout_type);
}
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
}
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for kMinInt % -1, idiv would signal a divide error. We
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(equal, instr, "minus zero");
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ TruncatingDiv(dividend, Abs(divisor));
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_equal, instr, "lost precision");
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ test(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(÷nd_not_zero);
}
__ cmp(dividend, kMinInt);
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(÷nd_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
}
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
return;
}
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ test(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(÷nd_not_zero);
}
__ cmp(dividend, kMinInt);
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(÷nd_not_min_int);
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, "minus zero");
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
}
__ bind(&done);
}
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, "negative value");
}
break;
case Token::SHL:
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, "negative value");
} else {
__ ror(ToRegister(left), shift_count);
}
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, "negative value");
}
break;
case Token::SHL:
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
__ shl(ToRegister(left), shift_count);
}
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
DCHECK(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
+ DeoptimizeIf(not_equal, instr, "not a date object");
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
}
Register map = no_reg; // Keep the compiler happy.
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(no_condition, instr, "unexpected object");
}
}
}
__ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
}
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
// Store the value.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
Operand double_load_operand = BuildFastArrayOperand(
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_equal, instr, "not a Smi");
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
}
}
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(equal, instr, "Smi");
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(below, instr, "not a JavaScript object");
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(above, instr, "too many arguments");
__ push(receiver);
__ mov(receiver, length);
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
__ j(not_equal, &non_zero, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
__ bind(&non_zero);
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(parity_even, instr, "NaN");
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
__ bind(&done);
}
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, dist);
__ bind(&below_one_half);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
// If the sign is positive, we return +0.
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ Move(output_reg, Immediate(0));
__ bind(&done);
__ JumpIfSmi(tagged_exponent, &no_deopt);
DCHECK(!ecx.is(tagged_exponent));
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(equal, instr, "memento found");
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(not_zero, instr, "overflow");
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_zero, instr, "not a Smi");
} else {
__ AssertSmi(result);
}
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
}
// Heap number to XMM conversion.
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
__ test_b(temp_reg, 1);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
// Convert undefined to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
- DeoptimizeIf(not_equal, instr,
- Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
__ Move(input_reg, Immediate(0));
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ Cvtsi2sd(scratch, Operand(input_reg));
__ ucomisd(xmm0, scratch);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(parity_even, instr, "NaN");
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
}
}
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, "NaN");
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
}
}
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, "NaN");
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_zero, instr, "not a Smi");
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
} else {
- DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
- DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(above, instr, "wrong instance type");
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
}
}
}
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(not_equal, instr, "value mismatch");
}
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(zero, instr, "instance migration failed");
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kUndefined);
+ DeoptimizeIf(equal, instr, "undefined");
__ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kNull);
+ DeoptimizeIf(equal, instr, "null");
__ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below_equal, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
__ bind(&use_cache);
}
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(equal, instr, "no cache");
}
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
void LCodeGenBase::DeoptComment(const Deoptimizer::Reason& reason) {
- masm()->RecordDeoptReason(reason.deopt_reason, reason.raw_position);
+ std::ostringstream os;
+ os << ";;; deoptimize at " << HSourcePosition(reason.raw_position) << " "
+ << reason.mnemonic;
+ if (reason.detail != NULL) os << ": " << reason.detail;
+ Comment("%s", os.str().c_str());
}
}
-void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
- if (FLAG_trace_deopt) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::POSITION, raw_position);
- RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
- }
-}
-
-
int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
Instr instr = instr_at(pc);
DCHECK(IsJ(instr) || IsLui(instr));
// Use --code-comments to enable.
void RecordComment(const char* msg);
- // Record a deoptimization reason that can be used by a log or cpu profiler.
- // Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
-
-
static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
// Writes a single byte or word of data in the code stream. Used for
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
- Register src1, const Operand& src2) {
+ const char* detail, Register src1,
+ const Operand& src2) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
}
Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
- Register src1, const Operand& src2) {
+ const char* detail, Register src1,
+ const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
+ DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
}
__ subu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
__ subu(dividend, zero_reg, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
__ bind(&remainder_not_zero);
}
}
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
}
// Check for kMinInt % -1, div will return kMinInt, which is not what we
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
// If we care about -0, test if the dividend is <0 and the result is 0.
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
}
__ bind(&done);
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
__ TruncatingDiv(result, dividend, Abs(divisor));
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Mul(scratch0(), result, Operand(divisor));
__ Subu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
}
}
__ Subu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
}
// Dividing by -1 is basically negation, unless we overflow.
__ Xor(scratch, scratch, result);
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ DeoptimizeIf(ge, instr, "overflow", scratch, Operand(zero_reg));
}
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
}
switch (constant) {
case -1:
if (overflow) {
__ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
} else {
__ Subu(result, zero_reg, left);
}
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
}
__ mov(result, zero_reg);
break;
__ Mul(scratch, result, left, right);
}
__ sra(at, result, 31);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
__ bind(&done);
}
}
case Token::SHR:
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
}
break;
case Token::SHL:
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
}
__ Move(result, left);
}
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
} else {
__ sll(result, left, shift_count);
}
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
}
}
DCHECK(!scratch.is(object));
__ SmiTst(object, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject, scratch,
- Operand(JS_DATE_TYPE));
+ DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
+ DeoptimizeIf(al, instr, "unexpected object", zero_reg,
Operand(zero_reg));
}
}
__ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
}
}
Register payload = ToRegister(instr->temp());
__ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, payload, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
}
// Store the value.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
- result, Operand(0x80000000));
+ DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
+ Operand(0x80000000));
}
break;
case FLOAT32_ELEMENTS:
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
- Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
}
}
}
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
+ DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
Operand(FIRST_SPEC_OBJECT_TYPE));
__ Branch(&result_in_receiver);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
+ DeoptimizeIf(hi, instr, "too many arguments", length,
Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
// Deoptimize if not a heap number.
__ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
Label done;
Register exponent = scratch0();
__ mov(result, input);
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
__ bind(&done);
}
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ Mfhc1(scratch1, input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+ DeoptimizeIf(ge, instr, "overflow", scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ bind(&check_sign_on_zero);
__ Mfhc1(scratch, input);
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
}
__ bind(&done);
}
DCHECK(!t3.is(tagged_exponent));
__ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
+ DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
}
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
- Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
}
// Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
__ Mfhc1(scratch, result_reg);
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
+ DeoptimizeIf(eq, instr, "minus zero", scratch,
Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+ DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
- scratch2, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
+ Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
- Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
// Load the double value.
__ ldc1(double_scratch,
except_flag,
kCheckForInexactConversion);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Mfhc1(scratch1, double_scratch);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
}
}
__ bind(&done);
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
__ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", scratch1, Operand(zero_reg));
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
- Operand(first));
+ DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
} else {
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
- Operand(first));
+ DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
- Operand(last));
+ DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
- at, Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
+ Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
- Operand(tag));
+ DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
}
}
}
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
+ DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
+ DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
}
}
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+ DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
type = Deoptimizer::LAZY;
}
- DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
+ DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg,
Operand(zero_reg));
}
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kUndefined, object, Operand(at));
+ DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
Register null_value = t1;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kNull, object, Operand(null_value));
+ DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
__ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
+ DeoptimizeIf(le, instr, "not a JavaScript object", a1,
Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
+ DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
__ bind(&use_cache);
}
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
__ bind(&done);
}
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
}
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
- Deoptimizer::BailoutType bailout_type,
+ Deoptimizer::BailoutType bailout_type, const char* detail,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(
- Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
- Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail = NULL, Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
__ dsubu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
__ dsubu(dividend, zero_reg, dividend);
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
__ bind(&remainder_not_zero);
}
}
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
}
// Check for kMinInt % -1, div will return kMinInt, which is not what we
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
}
__ bind(&done);
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
__ TruncatingDiv(result, dividend, Abs(divisor));
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Dmul(scratch0(), result, Operand(divisor));
__ Dsubu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
__ bind(&left_not_min_int);
}
} else {
__ dmod(remainder, dividend, divisor);
}
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
}
}
__ Dsubu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
}
__ Xor(scratch, scratch, result);
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, "overflow", result, Operand(kMaxInt));
}
return;
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
}
switch (constant) {
case -1:
if (overflow) {
__ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, scratch,
- Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, "overflow", scratch, Operand(kMaxInt));
} else {
__ Dsubu(result, zero_reg, left);
}
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
}
__ mov(result, zero_reg);
break;
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
__ bind(&done);
}
}
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
// TODO(yy): (-1) >>> 0. anything else?
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
- Operand(zero_reg));
- DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
- Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
+ DeoptimizeIf(gt, instr, "negative value", result, Operand(kMaxInt));
}
break;
case Token::SHL:
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
}
__ Move(result, left);
}
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, ToRegister(result),
- Operand(kMaxInt));
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, ToRegister(result),
- Operand(kMinInt));
+ DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt));
}
}
}
DCHECK(!scratch.is(object));
__ SmiTst(object, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject, scratch,
- Operand(JS_DATE_TYPE));
+ DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
// if not smi, it must int32.
if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, ToRegister(result),
- Operand(kMaxInt));
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, ToRegister(result),
- Operand(kMinInt));
+ DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt));
}
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
+ DeoptimizeIf(al, instr, "unexpected object", zero_reg,
Operand(zero_reg));
}
}
__ ld(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
}
}
Register payload = ToRegister(instr->temp());
__ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, payload, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
}
// Store the value.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
- result, Operand(0x80000000));
+ DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
+ Operand(0x80000000));
}
break;
case FLOAT32_ELEMENTS:
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lwu(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr, Deopt::kHole, scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
}
}
if (hinstr->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
+ DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
}
}
}
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
+ DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
Operand(FIRST_SPEC_OBJECT_TYPE));
__ Branch(&result_in_receiver);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
+ DeoptimizeIf(hi, instr, "too many arguments", length,
Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
// Deoptimize if not a heap number.
__ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
Label done;
Register exponent = scratch0();
__ mov(result, input);
__ dsubu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
__ bind(&done);
}
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ mfhc1(scratch1, input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+ DeoptimizeIf(ge, instr, "overflow", scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ bind(&check_sign_on_zero);
__ mfhc1(scratch, input); // Get exponent/sign bits.
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
}
__ bind(&done);
}
DCHECK(!a7.is(tagged_exponent));
__ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", a7, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
+ DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
}
}
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
ne, &no_memento_found);
- DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(al, instr, "memento found");
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
- Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
}
// Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ mfc1(at, result_reg);
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfhc1(scratch, result_reg); // Get exponent/sign bits.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
+ DeoptimizeIf(eq, instr, "minus zero", scratch,
Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+ DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedTrueFalse,
- scratch2, Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
+ Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
- Operand(at));
+ DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
// Load the double value.
__ ldc1(double_scratch,
except_flag,
kCheckForInexactConversion);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
}
}
__ bind(&done);
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
- Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
__ bind(&done);
}
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
- Operand(first));
+ DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
} else {
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
- Operand(first));
+ DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
- Operand(last));
+ DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
- at, Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
+ Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
- Operand(tag));
+ DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
}
}
}
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ ld(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
+ DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
+ DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
}
}
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+ DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kUndefined, object, Operand(at));
+ DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
Register null_value = a5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kNull, object, Operand(null_value));
+ DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
__ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
+ DeoptimizeIf(le, instr, "not a JavaScript object", a1,
Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
__ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
+ DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
__ bind(&use_cache);
}
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ld(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
__ bind(&done);
}
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
}
void Code::PrintDeoptLocation(FILE* out, int bailout_id) {
- int last_position = 0;
- Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
- int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
- RelocInfo::ModeMask(RelocInfo::POSITION) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ const char* last_comment = NULL;
+ int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
+ | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::POSITION) {
- last_position = static_cast<int>(info->data());
- } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
- last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
- } else if (last_reason != Deoptimizer::kNoReason) {
+ if (info->rmode() == RelocInfo::COMMENT) {
+ last_comment = reinterpret_cast<const char*>(info->data());
+ } else if (last_comment != NULL) {
if ((bailout_id == Deoptimizer::GetDeoptimizationId(
GetIsolate(), info->target_address(), Deoptimizer::EAGER)) ||
(bailout_id == Deoptimizer::GetDeoptimizationId(
(bailout_id == Deoptimizer::GetDeoptimizationId(
GetIsolate(), info->target_address(), Deoptimizer::LAZY))) {
CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
- PrintF(out, " ;;; deoptimize at %d: %s\n", last_position,
- Deoptimizer::GetDeoptReason(last_reason));
+ PrintF(out, " %s\n", last_comment);
return;
}
}
__ ExtractBitRange(dividend, dividend, shift - 1, 0);
__ neg(dividend, dividend, LeaveOE, SetRC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+ DeoptimizeIf(eq, instr, "minus zero", cr0);
}
} else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ li(dividend, Operand::Zero());
} else {
- DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(al, instr, "minus zero");
}
__ b(&done);
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
Label remainder_not_zero;
__ bne(&remainder_not_zero, cr0);
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
// Check for x % 0.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for kMinInt % -1, divw will return undefined, which is not what we
if (hmod->CheckFlag(HValue::kCanOverflow)) {
Label no_overflow_possible;
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
+ DeoptimizeIf(overflow, instr, "minus zero", cr0);
} else {
__ bnooverflow(&no_overflow_possible, cr0);
__ li(result_reg, Operand::Zero());
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ bne(&done, cr0);
__ cmpwi(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
}
__ bind(&done);
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, "overflow");
}
int32_t shift = WhichPowerOf2Abs(divisor);
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
__ TestBitRange(dividend, shift - 1, 0, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
+ DeoptimizeIf(ne, instr, "lost precision", cr0);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ TruncatingDiv(result, dividend, Abs(divisor));
__ mov(ip, Operand(divisor));
__ mullw(scratch, result, ip);
__ cmpw(scratch, dividend);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, "lost precision");
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ cmpwi(dividend, Operand::Zero());
__ bne(÷nd_not_zero);
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
__ bind(÷nd_not_zero);
}
if (hdiv->CheckFlag(HValue::kCanOverflow)) {
Label no_overflow_possible;
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
__ bnooverflow(&no_overflow_possible, cr0);
Register scratch = scratch0();
__ mullw(scratch, divisor, result);
__ cmpw(dividend, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, "lost precision");
}
}
if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, "overflow");
}
#else
if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
__ neg(result, dividend, oe, SetRC);
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+ DeoptimizeIf(eq, instr, "minus zero", cr0);
}
// If the negation could not overflow, simply shifting is OK.
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
return;
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, "division by zero");
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ cmpwi(dividend, Operand::Zero());
__ bne(÷nd_not_zero);
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
__ bind(÷nd_not_zero);
}
if (hdiv->CheckFlag(HValue::kCanOverflow)) {
Label no_overflow_possible;
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
__ bnooverflow(&no_overflow_possible, cr0);
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmpi(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
switch (constant) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
__ neg(result, left, SetOE, SetRC);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
#if V8_TARGET_ARCH_PPC64
} else {
__ neg(result, left);
__ TestIfInt32(result, scratch, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, "overflow");
}
#endif
} else {
__ cmpwi(left, Operand::Zero());
}
#endif
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
}
__ li(result, Operand::Zero());
break;
__ Mul(result, left, right);
}
__ TestIfInt32(result, scratch, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, "overflow");
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
__ mullw(result, left, right);
}
__ TestIfInt32(scratch, result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, "overflow");
#endif
} else {
if (instr->hydrogen()->representation().IsSmi()) {
#endif
// Bail out if the result is minus zero.
__ cmpi(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
__ bind(&done);
}
}
#if V8_TARGET_ARCH_PPC64
__ extsw(result, result, SetRC);
#endif
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
+ DeoptimizeIf(lt, instr, "negative value", cr0);
} else {
__ srw(result, left, scratch);
}
} else {
if (instr->can_deopt()) {
__ cmpwi(left, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(lt, instr, "negative value");
}
__ Move(result, left);
}
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, "overflow", cr0);
#endif
} else {
__ slwi(result, left, Operand(shift_count));
__ extsw(scratch0(), scratch0(), SetRC);
}
#endif
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, "overflow", cr0);
}
#if V8_TARGET_ARCH_PPC64
DCHECK(!scratch.is(object));
__ TestIfSmi(object, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, "Smi", cr0);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
+ DeoptimizeIf(ne, instr, "not a date object");
if (index->value() == 0) {
__ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
__ extsw(scratch0(), scratch0(), SetRC);
}
#endif
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, "overflow", cr0);
}
#if V8_TARGET_ARCH_PPC64
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ TestIfSmi(reg, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, "Smi", cr0);
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(al, instr, "unexpected object");
}
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
}
}
Register payload = ToRegister(instr->temp());
__ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
}
// Store the value.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
} else {
Label skip;
__ bne(&skip);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
} else {
__ bne(&skip_assignment);
}
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmplw(result, r0);
- DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ge, instr, "negative value");
}
break;
case FLOAT32_ELEMENTS:
__ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
}
__ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
}
}
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
__ TestIfSmi(result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, "not a Smi", cr0);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, "hole");
}
}
}
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, "Smi");
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(lt, instr, "not a JavaScript object");
__ b(&result_in_receiver);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpli(length, Operand(kArgumentsLimit));
- DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(gt, instr, "too many arguments");
// Push the receiver and use the register to keep the original
// number of arguments.
__ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, "not a heap number");
Label done;
Register exponent = scratch0();
__ mtxer(r0);
__ neg(result, result, SetOE, SetRC);
// Deoptimize on overflow.
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
__ bind(&done);
}
// Deoptimize on overflow.
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(input, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, "overflow");
__ neg(result, result);
__ bind(&done);
__ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
&exact);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, "lost precision or NaN");
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmpi(result, Operand::Zero());
__ bne(&done);
__ cmpwi(input_high, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
}
__ bind(&done);
}
__ LoadDoubleLiteral(dot_five, 0.5, r0);
__ fabs(double_scratch1, input);
__ fcmpu(double_scratch1, dot_five);
- DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(unordered, instr, "lost precision or NaN");
// If input is in [-0.5, -0], the result is -0.
// If input is in [+0, +0.5[, the result is +0.
// If the input is +0.5, the result is 1.
#endif
__ cmpi(scratch1, Operand::Zero());
// [-0.5, -0].
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
}
Label return_zero;
__ fcmpu(input, dot_five);
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
double_scratch0(), &done, &done);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, "lost precision or NaN");
__ bind(&done);
}
__ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r10, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, "memento found");
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ TestUnsignedSmiCandidate(input, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(ne, instr, "overflow", cr0);
}
#if !V8_TARGET_ARCH_PPC64
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, r0);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, "overflow", cr0);
} else {
#endif
__ SmiTag(output, input);
// If the input is a HeapObject, value of scratch won't be zero.
__ andi(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, "not a Smi", cr0);
} else {
__ SmiUntag(result, input);
}
if (can_convert_undefined_to_nan) {
__ bne(&convert);
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, "not a heap number");
}
// load heap number
__ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ bne(&done);
__ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
#endif
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, "minus zero");
}
__ b(&done);
if (can_convert_undefined_to_nan) {
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined");
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ b(&done);
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedTrueFalse,
- cr7);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", cr7);
__ li(input_reg, Operand::Zero());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, cr7);
+ DeoptimizeIf(ne, instr, "not a heap number", cr7);
__ lfd(double_scratch2,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
}
__ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
double_scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, cr7);
+ DeoptimizeIf(ne, instr, "lost precision or NaN", cr7);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmpi(input_reg, Operand::Zero());
FieldMemOperand(scratch2, HeapNumber::kValueOffset +
Register::kExponentOffset));
__ cmpwi(scratch1, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, cr7);
+ DeoptimizeIf(lt, instr, "minus zero", cr7);
}
}
__ bind(&done);
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmpi(result_reg, Operand::Zero());
__ MovDoubleHighToInt(scratch1, double_input);
#endif
__ cmpi(scratch1, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
__ bind(&done);
}
}
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmpi(result_reg, Operand::Zero());
__ MovDoubleHighToInt(scratch1, double_input);
#endif
__ cmpi(scratch1, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, "minus zero");
__ bind(&done);
}
}
__ SmiTag(result_reg);
#else
__ SmiTagCheckOverflow(result_reg, r0);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, "overflow", cr0);
#endif
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input), r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, "not a Smi", cr0);
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input), r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, "Smi", cr0);
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, "wrong instance type");
} else {
- DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lt, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpli(scratch, Operand(last));
- DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(gt, instr, "wrong instance type");
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ andi(r0, scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
- cr0);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", cr0);
} else {
__ andi(scratch, scratch, Operand(mask));
__ cmpi(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, "wrong instance type");
}
}
}
} else {
__ Cmpi(reg, Operand(object), r0);
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, "value mismatch");
}
__ StoreToSafepointRegisterSlot(r3, scratch0());
}
__ TestIfSmi(scratch0(), r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
+ DeoptimizeIf(eq, instr, "instance migration failed", cr0);
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ bne(deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, "wrong map");
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined");
__ li(result_reg, Operand::Zero());
__ b(&done);
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r3, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kUndefined);
+ DeoptimizeIf(eq, instr, "undefined");
Register null_value = r8;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r3, null_value);
- DeoptimizeIf(eq, instr, Deoptimizer::kNull);
+ DeoptimizeIf(eq, instr, "null");
__ TestIfSmi(r3, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, "Smi", cr0);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(le, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r4, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, "wrong map");
__ bind(&use_cache);
}
__ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmpi(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(eq, instr, "no cache");
__ bind(&done);
}
Register map = ToRegister(instr->map());
__ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, "wrong map");
}
}
-void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
- if (FLAG_trace_deopt) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::POSITION, raw_position);
- RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
- }
-}
-
-
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
// Use --code-comments to enable.
void RecordComment(const char* msg, bool force = false);
- // Record a deoptimization reason that can be used by a log or cpu profiler.
- // Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
-
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ const char* detail,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
}
Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ instr->Mnemonic(), detail);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ const char* detail) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
+ DeoptimizeIf(cc, instr, detail, bailout_type);
}
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
}
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for kMinInt % -1, idiv would signal a divide error. We
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(equal, instr, "minus zero");
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
return;
}
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ testl(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(÷nd_not_zero);
}
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(÷nd_not_min_int);
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ TruncatingDiv(dividend, Abs(divisor));
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_equal, instr, "lost precision");
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ testl(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(÷nd_not_zero);
}
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(÷nd_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
}
}
if (can_overflow) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, "minus zero");
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
} else {
__ orl(kScratchRegister, ToOperand(right));
}
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
} else {
__ orl(kScratchRegister, ToRegister(right));
}
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
}
__ bind(&done);
}
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case Token::SHL:
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case Token::SHL:
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
DCHECK(object.is(rax));
Condition cc = masm()->CheckSmi(object);
- DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(cc, instr, "Smi");
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
+ DeoptimizeIf(not_equal, instr, "not a date object");
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
}
const Register map = kScratchRegister;
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(no_condition, instr, "unexpected object");
}
}
}
__ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
}
DCHECK(!value.is(cell));
__ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
// Store the value.
__ movp(Operand(cell, 0), value);
} else {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
} else {
__ j(not_equal, &skip_assignment);
}
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
Operand double_load_operand = BuildFastArrayOperand(
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(NegateCondition(smi), instr, "not a Smi");
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
}
}
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(is_smi, instr, "Smi");
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(below, instr, "not a JavaScript object");
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(above, instr, "too many arguments");
__ Push(receiver);
__ movp(receiver, length);
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
// Deoptimize if minus zero.
__ movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(overflow, instr, "minus zero");
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(parity_even, instr, "NaN");
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ testq(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
__ bind(&done);
}
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
__ jmp(&done, dist);
__ bind(&below_one_half);
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
- DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(negative, instr, "minus zero");
}
__ Set(output_reg, 0);
__ bind(&done);
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(equal, instr, "memento found");
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
- DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(NegateCondition(is_smi), instr, "overflow");
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi");
} else {
__ AssertSmi(input);
}
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
}
if (deoptimize_on_minus_zero) {
__ j(not_equal, &done, Label::kNear);
__ movmskpd(kScratchRegister, result_reg);
__ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
- DeoptimizeIf(not_equal, instr,
- Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
__ Set(input_reg, 0);
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, xmm0);
__ Cvtlsi2sd(scratch, input_reg);
__ ucomisd(xmm0, scratch);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(parity_even, instr, "NaN");
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
}
}
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, "NaN");
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
}
}
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, "NaN");
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
__ Integer32ToSmi(result_reg, result_reg);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(NegateCondition(cc), instr, "not a Smi");
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(cc, instr, "Smi");
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
} else {
- DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(above, instr, "wrong instance type");
}
}
} else {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
}
}
}
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(not_equal, instr, "value mismatch");
}
__ testp(rax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(zero, instr, "instance migration failed");
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kUndefined);
+ DeoptimizeIf(equal, instr, "undefined");
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpp(rax, null_value);
- DeoptimizeIf(equal, instr, Deoptimizer::kNull);
+ DeoptimizeIf(equal, instr, "null");
Condition cc = masm()->CheckSmi(rax);
- DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(cc, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below_equal, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
__ bind(&use_cache);
}
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(cc, instr, "no cache");
}
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
}
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, "minus zero");
__ bind(&remainder_not_zero);
}
}
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for kMinInt % -1, idiv would signal a divide error. We
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(equal, instr, "minus zero");
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
__ TruncatingDiv(dividend, Abs(divisor));
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_equal, instr, "lost precision");
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ test(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(÷nd_not_zero);
}
__ cmp(dividend, kMinInt);
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(÷nd_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, "lost precision");
}
}
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
return;
}
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, "division by zero");
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, "minus zero");
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, "division by zero");
}
// Check for (0 / -x) that will produce negative zero.
__ test(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
__ bind(÷nd_not_zero);
}
__ cmp(dividend, kMinInt);
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, "overflow");
__ bind(÷nd_not_min_int);
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(not_zero, &done);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, "minus zero");
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, "minus zero");
}
__ bind(&done);
}
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, "negative value");
}
break;
case Token::SHL:
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, "negative value");
} else {
__ ror(ToRegister(left), shift_count);
}
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, "negative value");
}
break;
case Token::SHL:
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
} else {
__ shl(ToRegister(left), shift_count);
}
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
DCHECK(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject);
+ DeoptimizeIf(not_equal, instr, "not a date object");
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
}
Register map = no_reg; // Keep the compiler happy.
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(no_condition, instr, "unexpected object");
}
}
}
__ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
}
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
// Store the value.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
// If the function does not have an initial map, we're done.
Label done;
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, "negative value");
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
Operand double_load_operand = BuildFastArrayOperand(
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_equal, instr, "not a Smi");
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, "hole");
}
}
}
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(equal, instr, "Smi");
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(below, instr, "not a JavaScript object");
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(above, instr, "too many arguments");
__ push(receiver);
__ mov(receiver, length);
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(negative, instr, "overflow");
__ bind(&is_positive);
}
__ fldz();
__ fld(1);
__ FCmp();
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(parity_even, instr, "NaN");
__ j(below, ¬_minus_zero, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// +- 0.0.
__ fld(0);
__ FXamSign();
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kFar);
}
__ fist_s(Operand(esp, 0));
__ pop(output_reg);
__ X87CheckIA();
- DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(equal, instr, "overflow");
__ fnclex();
__ X87SetRC(0x0000);
__ bind(&done);
// Check overflow.
__ X87CheckIA();
__ pop(result);
- DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
+ DeoptimizeIf(equal, instr, "conversion overflow");
__ fnclex();
// Restore round mode.
__ X87SetRC(0x0000);
// If the sign is positive, we return +0.
__ fld(0);
__ FXamSign();
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ Move(result, Immediate(0));
__ jmp(&done);
// Check overflow.
__ X87CheckIA();
__ pop(result);
- DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
+ DeoptimizeIf(equal, instr, "conversion overflow");
__ fnclex();
// Restore round mode.
__ X87SetRC(0x0000);
X87LoadForUsage(base);
__ JumpIfSmi(exponent, &no_deopt);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
// Heap number(double)
__ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
__ jmp(&done);
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, "out of bounds");
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(equal, instr, "memento found");
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(not_zero, instr, "overflow");
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
}
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_zero, instr, "not a Smi");
} else {
__ AssertSmi(result);
}
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
} else {
Label heap_number, convert;
__ j(equal, &heap_number);
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ bind(&convert);
__ push(Immediate(0xffffffff));
// Pop FPU stack before deoptimizing.
__ fstp(0);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
}
__ jmp(&done, Label::kNear);
} else {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
- DeoptimizeIf(not_equal, instr,
- Deoptimizer::kNotAHeapNumberUndefinedTrueFalse);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
__ Move(input_reg, Immediate(0));
} else {
// TODO(olivf) Converting a number on the fpu is actually quite slow. We
// should first try a fast conversion and then bailout to this slow case.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, "not a heap number");
__ sub(esp, Immediate(kPointerSize));
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
__ j(equal, &no_precision_lost, Label::kNear);
__ fstp(0);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
__ bind(&no_precision_lost);
__ j(parity_odd, ¬_nan);
__ fstp(0);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, "NaN");
__ bind(¬_nan);
__ test(input_reg, Operand(input_reg));
__ fstp_s(Operand(esp, 0));
__ pop(input_reg);
__ test(input_reg, Operand(input_reg));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, "minus zero");
} else {
__ fist_s(MemOperand(esp, 0));
__ fild_s(MemOperand(esp, 0));
__ FCmp();
__ pop(input_reg);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(not_equal, instr, "lost precision");
+ DeoptimizeIf(parity_even, instr, "NaN");
}
}
}
&lost_precision, &is_nan, &minus_zero);
__ jmp(&done);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, "NaN");
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
}
}
&lost_precision, &is_nan, &minus_zero);
__ jmp(&done);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, "lost precision");
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, "NaN");
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, "minus zero");
__ bind(&done);
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, "overflow");
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_zero, instr, "not a Smi");
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
} else {
- DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below, instr, "wrong instance type");
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
- DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(above, instr, "wrong instance type");
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, "wrong instance type");
}
}
}
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(not_equal, instr, "value mismatch");
}
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(zero, instr, "instance migration failed");
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
__ jmp(&zero_result, Label::kNear);
// Heap number
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kUndefined);
+ DeoptimizeIf(equal, instr, "undefined");
__ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kNull);
+ DeoptimizeIf(equal, instr, "null");
__ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, "Smi");
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below_equal, instr, "wrong instance type");
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
__ bind(&use_cache);
}
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(equal, instr, "no cache");
}
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, "wrong map");
}