}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, environment, bailout_type);
+ DeoptimizeIf(condition, instr, bailout_type);
}
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ b(&done);
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
__ bind(&remainder_not_zero);
}
}
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
}
__ bind(&done);
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ Move(result_reg, left_reg);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ bind(&done);
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (0 / -x) that will produce negative zero.
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&positive);
}
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
if (CpuFeatures::IsSupported(SUDIV)) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
return;
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (0 / -x) that will produce negative zero.
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&positive);
}
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
if (CpuFeatures::IsSupported(SUDIV)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ rsb(result, left, Operand::Zero());
}
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ mov(result, Operand::Zero());
break;
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&done);
}
}
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
} else {
__ mov(result, Operand(left, LSR, scratch));
}
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ Move(result, left);
}
} else {
__ SmiTag(result, left, SetCC);
}
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
DCHECK(!scratch.is(object));
__ SmiTst(object);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
}
if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
}
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
Register payload = ToRegister(instr->temp());
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Store the value.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
} else {
__ b(ne, &skip_assignment);
}
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
// If the function does not have an initial map, we're done.
Label done;
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr);
}
break;
case FLOAT32_ELEMENTS:
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
}
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
__ b(&result_in_receiver);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
// Push the receiver and use the register to keep the original
// number of arguments.
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
Label done;
Register exponent = scratch0();
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
__ bind(&done);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
+ DeoptimizeIf(mi, instr); // [-0.5, -0].
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&done);
}
__ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr);
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ SmiTag(output, input);
}
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr->environment());
+ DeoptimizeIf(cs, instr);
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DwVfpRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
DCHECK(!result_reg.is(double_scratch0()));
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
- DeoptimizeIf(ne, env);
+ DeoptimizeIf(ne, instr);
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
+ DeoptimizeIf(eq, instr);
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
+ DeoptimizeIf(ne, instr);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
__ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ mov(input_reg, Operand::Zero());
} else {
__ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
__ RecordComment("Deferred TaggedToI: lost precision or NaN");
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
__ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
__ bind(&done);
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&done);
}
}
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
- DeoptimizeIf(lo, instr->environment());
+ DeoptimizeIf(lo, instr);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+ DeoptimizeIf(tag == 0 ? ne : eq, instr);
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
}
} else {
__ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(al, instr->environment(), type);
+ DeoptimizeIf(al, instr, type);
}
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ SmiTst(r0);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
+ DeoptimizeIf(le, instr);
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
__ bind(&use_cache);
}
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ bind(&done);
}
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition condition, LEnvironment* environment);
+ void DeoptimizeIf(Condition condition, LInstruction* instr);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition);
- void EmitNumberUntagD(Register input,
- DwVfpRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DwVfpRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
void LCodeGen::DeoptimizeBranch(
- LEnvironment* environment,
- BranchType branch_type, Register reg, int bit,
+ LInstruction* instr, BranchType branch_type, Register reg, int bit,
Deoptimizer::BailoutType* override_bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
}
-void LCodeGen::Deoptimize(LEnvironment* environment,
+void LCodeGen::Deoptimize(LInstruction* instr,
Deoptimizer::BailoutType* override_bailout_type) {
- DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
+ DeoptimizeBranch(instr, always, NoReg, -1, override_bailout_type);
}
-void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
- DeoptimizeBranch(environment, static_cast<BranchType>(cond));
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr) {
+ DeoptimizeBranch(instr, static_cast<BranchType>(cond));
}
-void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_zero, rt);
+void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr) {
+ DeoptimizeBranch(instr, reg_zero, rt);
}
-void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_not_zero, rt);
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr) {
+ DeoptimizeBranch(instr, reg_not_zero, rt);
}
-void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
- DeoptimizeIfBitSet(rt, sign_bit, environment);
+ DeoptimizeIfBitSet(rt, sign_bit, instr);
}
-void LCodeGen::DeoptimizeIfSmi(Register rt,
- LEnvironment* environment) {
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
+void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr);
}
-void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr);
}
-void LCodeGen::DeoptimizeIfRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr) {
__ CompareRoot(rt, index);
- DeoptimizeIf(eq, environment);
+ DeoptimizeIf(eq, instr);
}
-void LCodeGen::DeoptimizeIfNotRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr) {
__ CompareRoot(rt, index);
- DeoptimizeIf(ne, environment);
+ DeoptimizeIf(ne, instr);
}
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
- LEnvironment* environment) {
+ LInstruction* instr) {
__ TestForMinusZero(input);
- DeoptimizeIf(vs, environment);
+ DeoptimizeIf(vs, instr);
}
-void LCodeGen::DeoptimizeIfBitSet(Register rt,
- int bit,
- LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_bit_set, rt, bit);
+void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr) {
+ DeoptimizeBranch(instr, reg_bit_set, rt, bit);
}
-void LCodeGen::DeoptimizeIfBitClear(Register rt,
- int bit,
- LEnvironment* environment) {
- DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
+void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr) {
+ DeoptimizeBranch(instr, reg_bit_clear, rt, bit);
}
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ Add(result, left, right);
}
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ Add(result, left, right);
}
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
// Push the receiver and use the register to keep the original
// number of arguments.
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr->environment());
+ DeoptimizeIf(cond, instr);
}
}
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr->environment());
+ DeoptimizeIfSmi(value, instr);
}
Register map = NoReg;
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr->environment());
+ Deoptimize(instr);
}
}
}
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr->environment());
+ DeoptimizeIfSmi(temp, instr);
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ Bind(&success);
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr);
}
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr->environment());
+ DeoptimizeIfNotSmi(value, instr);
}
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr->environment());
+ DeoptimizeIf(lo, instr);
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
}
} else {
uint8_t mask;
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
- DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
+ DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr);
} else {
- DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
+ DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr);
}
} else {
if (tag == 0) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
}
}
__ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
// Check for undefined. Undefined is coverted to zero for clamping conversion.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
__ Mov(result, 0);
__ B(&done);
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
- DeoptimizeIfSmi(object, instr->environment());
+ DeoptimizeIfSmi(object, instr);
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- Deoptimize(instr->environment(), &type);
+ Deoptimize(instr, &type);
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr->environment());
+ DeoptimizeIfNotZero(temp, instr);
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr);
}
// Check for (0 / -x) as that will produce negative zero.
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (kMinInt / -1).
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr->environment());
+ DeoptimizeIfNotZero(remainder, instr);
}
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr->environment());
+ DeoptimizeIfMinusZero(input, instr);
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->tag_result()) {
__ SmiTag(result.X());
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr->environment());
+ DeoptimizeIfZero(result, instr);
__ Bind(&done);
}
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
- DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr);
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
- DeoptimizeIfSmi(object, instr->environment());
+ DeoptimizeIfSmi(object, instr);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
+ DeoptimizeIf(le, instr);
Label use_cache, call_runtime;
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr);
__ Bind(&use_cache);
}
__ Ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
// If the function does not have an initial map, we're done.
Label done;
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(
- result, Heap::kTheHoleValueRootIndex, instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
}
}
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
break;
case FLOAT32_ELEMENTS:
STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
__ Ldr(scratch, mem_op);
__ Cmn(scratch, 1);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr->environment());
+ DeoptimizeIfNotSmi(result, instr);
} else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
}
}
}
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
}
// Deoptimize if the input is not a HeapNumber.
__ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex, instr);
// If the argument is positive, we can return it as-is, without any need to
// allocate a new HeapNumber for the result. We have to do this in integer
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr->environment());
+ DeoptimizeIfMinusZero(input, instr);
}
__ Fcvtms(result, input);
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
}
return;
}
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr->environment());
+ DeoptimizeIfZero(dividend, instr);
}
// Easy case: We need no dynamic check for the dividend and the flooring
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr);
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
// Check for (0 / -x) that will produce negative zero.
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
Label done;
__ JumpIfSmi(tagged_exponent, &no_deopt);
DCHECK(!x0.is(tagged_exponent));
__ Ldr(x0, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex, instr);
__ Bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr->environment());
+ DeoptimizeIf(hi, instr);
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr->environment());
+ DeoptimizeIfNegative(result, instr);
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
}
__ B(&done);
}
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr->environment());
+ Deoptimize(instr);
return;
}
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr->environment());
+ DeoptimizeIfNegative(dividend, instr);
__ bind(&remainder_not_zero);
}
}
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr->environment());
+ DeoptimizeIfZero(divisor, instr);
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr->environment());
+ DeoptimizeIfNegative(dividend, instr);
}
__ Bind(&done);
}
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr->environment());
+ DeoptimizeIfZero(left, instr);
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr);
}
}
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ Neg(result, left);
}
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ Add(result, left, left);
}
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr->environment());
+ DeoptimizeIf(lt, instr);
}
if (right >= 0) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
__ Mul(result, left, right);
}
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(mi, instr);
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
__ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
&convert_undefined);
} else {
- DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, instr);
}
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr->environment());
+ DeoptimizeIfMinusZero(result, instr);
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr->environment());
+ DeoptimizeIfNegative(input.W(), instr);
}
__ SmiTag(output, input);
}
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr->environment());
+ DeoptimizeIfNotSmi(input, instr);
}
__ Bind(&untag);
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr->environment());
+ DeoptimizeIfNegative(result, instr);
}
break;
default: UNREACHABLE();
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr);
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr->environment());
+ DeoptimizeIfNegative(result, instr);
}
break;
default: UNREACHABLE();
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr->environment());
+ DeoptimizeIfNegative(left, instr);
}
__ Mov(result, left);
} else {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
- instr->environment());
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr);
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
if (instr->hydrogen()->RequiresHoleCheck()) {
Register payload = ToRegister(instr->temp2());
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(
- payload, Heap::kTheHoleValueRootIndex, instr->environment());
+ DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr);
}
// Store the value.
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ Sub(result, left, right);
}
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr->environment());
+ DeoptimizeIf(vs, instr);
} else {
__ Sub(result, left, right);
}
// Output contains zero, undefined is converted to zero for truncating
// conversions.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
__ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
- instr->environment());
+ DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr);
// A heap number: load value and convert to int32 using non-truncating
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
__ RecordComment("Deferred TaggedToI: lost precision or NaN");
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
__ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIfNegative(scratch1, instr->environment());
+ DeoptimizeIfNegative(scratch1, instr);
}
}
__ Bind(&done);
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr->environment());
+ DeoptimizeIf(eq, instr);
__ Bind(&no_memento_found);
}
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr->environment());
+ DeoptimizeIf(ne, instr);
}
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr->environment());
+ DeoptimizeIfSmi(receiver, instr);
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, ©_receiver);
- Deoptimize(instr->environment());
+ Deoptimize(instr);
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
Register temp,
LOperand* index,
String::Encoding encoding);
- void DeoptimizeBranch(
- LEnvironment* environment,
- BranchType branch_type, Register reg = NoReg, int bit = -1,
- Deoptimizer::BailoutType* override_bailout_type = NULL);
- void Deoptimize(LEnvironment* environment,
+ void DeoptimizeBranch(LInstruction* instr, BranchType branch_type,
+ Register reg = NoReg, int bit = -1,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void Deoptimize(LInstruction* instr,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void DeoptimizeIf(Condition cond, LEnvironment* environment);
- void DeoptimizeIfZero(Register rt, LEnvironment* environment);
- void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
- void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
- void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
- void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
- void DeoptimizeIfRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment);
- void DeoptimizeIfNotRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment);
- void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
- void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
- void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
+ void DeoptimizeIf(Condition cond, LInstruction* instr);
+ void DeoptimizeIfZero(Register rt, LInstruction* instr);
+ void DeoptimizeIfNotZero(Register rt, LInstruction* instr);
+ void DeoptimizeIfNegative(Register rt, LInstruction* instr);
+ void DeoptimizeIfSmi(Register rt, LInstruction* instr);
+ void DeoptimizeIfNotSmi(Register rt, LInstruction* instr);
+ void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr);
+ void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+ LInstruction* instr);
+ void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr);
+ void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr);
+ void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type);
+ DeoptimizeIf(cc, instr, bailout_type);
}
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
__ jmp(&done, Label::kNear);
}
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr);
__ bind(&remainder_not_zero);
}
}
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
__ TruncatingDiv(dividend, Abs(divisor));
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (0 / -x) that will produce negative zero.
__ test(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
__ bind(÷nd_not_zero);
}
__ cmp(dividend, kMinInt);
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ bind(÷nd_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
}
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
return;
}
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (0 / -x) that will produce negative zero.
__ test(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
__ bind(÷nd_not_zero);
}
__ cmp(dividend, kMinInt);
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ bind(÷nd_not_min_int);
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr);
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
__ bind(&done);
}
__ ror_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
break;
case Token::SAR:
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
break;
case Token::SHL:
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
} else {
__ ror(ToRegister(left), shift_count);
}
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
break;
case Token::SHL:
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
} else {
__ shl(ToRegister(left), shift_count);
}
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
DCHECK(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
Register map = no_reg; // Keep the compiler happy.
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
}
}
}
__ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
}
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
// Store the value.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
// If the function does not have an initial map, we're done.
Label done;
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
Operand double_load_operand = BuildFastArrayOperand(
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
}
}
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr);
__ push(receiver);
__ mov(receiver, length);
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
__ bind(&is_positive);
}
__ j(not_equal, &non_zero, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
__ bind(&non_zero);
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
+ DeoptimizeIf(parity_even, instr);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
__ bind(&done);
}
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
__ jmp(&done, dist);
__ bind(&below_one_half);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmp(output_reg, 0x1);
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
__ RecordComment("Minus zero");
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
__ Move(output_reg, Immediate(0));
__ bind(&done);
__ JumpIfSmi(tagged_exponent, &no_deopt);
DCHECK(!ecx.is(tagged_exponent));
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr);
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
} else {
__ AssertSmi(result);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- Register temp_reg,
- XMMRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ Register temp_reg, XMMRegister result_reg,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr);
}
// Heap number to XMM conversion.
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
__ test_b(temp_reg, 1);
- DeoptimizeIf(not_zero, env);
+ DeoptimizeIf(not_zero, instr);
}
__ jmp(&done, Label::kNear);
// Convert undefined (and hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ Move(input_reg, Immediate(0));
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ Cvtsi2sd(scratch, Operand(input_reg));
__ ucomisd(xmm0, scratch);
__ RecordComment("Deferred TaggedToI: lost precision");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ RecordComment("Deferred TaggedToI: NaN");
- DeoptimizeIf(parity_even, instr->environment());
+ DeoptimizeIf(parity_even, instr);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
__ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
}
}
DCHECK(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = ToRegister(temp);
HValue* value = instr->hydrogen()->value();
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- deoptimize_on_minus_zero,
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
}
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
__ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
__ bind(&done);
}
}
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
__ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
__ bind(&done);
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
} else {
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr);
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr);
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
}
}
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
type = Deoptimizer::LAZY;
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(no_condition, instr->environment(), type);
+ DeoptimizeIf(no_condition, instr, type);
}
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr->environment());
+ DeoptimizeIf(below_equal, instr);
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ bind(&use_cache);
}
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIf(Condition cc, LInstruction* instr);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagD(
- Register input,
- Register temp,
- XMMRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
+ XMMRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::BailoutType bailout_type,
- Register src1,
- const Operand& src2) {
+ Register src1, const Operand& src2) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Register src1,
- const Operand& src2) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ Register src1, const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, environment, bailout_type, src1, src2);
+ DeoptimizeIf(condition, instr, bailout_type, src1, src2);
}
__ subu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
__ subu(dividend, zero_reg, dividend);
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, dividend, Operand(zero_reg));
__ bind(&remainder_not_zero);
}
}
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, right_reg, Operand(zero_reg));
}
// Check for kMinInt % -1, div will return kMinInt, which is not what we
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, right_reg, Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
// If we care about -0, test if the dividend is <0 and the result is 0.
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, result_reg, Operand(zero_reg));
}
__ bind(&done);
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, dividend, Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, at, Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
}
__ TruncatingDiv(result, dividend, Abs(divisor));
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Mul(scratch0(), result, Operand(divisor));
__ Subu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch0(), Operand(zero_reg));
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, remainder, Operand(zero_reg));
}
}
__ Subu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, result, Operand(zero_reg));
}
// Dividing by -1 is basically negation, unless we overflow.
__ Xor(scratch, scratch, result);
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ge, instr, scratch, Operand(zero_reg));
}
return;
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, left, Operand(zero_reg));
}
switch (constant) {
case -1:
if (overflow) {
__ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, scratch, Operand(zero_reg));
} else {
__ Subu(result, zero_reg, left);
}
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, left, Operand(zero_reg));
}
__ mov(result, zero_reg);
break;
__ Mul(scratch, result, left, right);
}
__ sra(at, result, 31);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(ne, instr, scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq,
- instr->environment(),
- result,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, result, Operand(zero_reg));
__ bind(&done);
}
}
case Token::SHR:
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, result, Operand(zero_reg));
}
break;
case Token::SHL:
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, at, Operand(zero_reg));
}
__ Move(result, left);
}
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, scratch, Operand(zero_reg));
} else {
__ sll(result, left, shift_count);
}
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
}
}
DCHECK(!scratch.is(object));
__ SmiTst(object, at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
+ DeoptimizeIf(ne, instr, scratch, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ DeoptimizeIf(al, instr, zero_reg, Operand(zero_reg));
}
}
}
__ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, result, Operand(at));
}
}
Register payload = ToRegister(instr->temp());
__ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
+ DeoptimizeIf(eq, instr, payload, Operand(at));
}
// Store the value.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(eq, instr, scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
+ DeoptimizeIf(Ugreater_equal, instr, result, Operand(0x80000000));
}
break;
case FLOAT32_ELEMENTS:
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, scratch, Operand(kHoleNanUpper32));
}
}
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ DeoptimizeIf(eq, instr, result, Operand(scratch));
}
}
}
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr->environment(),
- scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+ DeoptimizeIf(lt, instr, scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+ DeoptimizeIf(hi, instr, length, Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
// number of arguments.
// Deoptimize if not a heap number.
__ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(ne, instr, scratch, Operand(at));
Label done;
Register exponent = scratch0();
__ mov(result, input);
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, result, Operand(zero_reg));
__ bind(&done);
}
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
__ Mfhc1(scratch1, input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
__ bind(&done);
}
}
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr->environment(), scratch,
- Operand(HeapNumber::kExponentBias + 32));
+ DeoptimizeIf(ge, instr, scratch, Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
__ And(scratch, result, Operand(HeapNumber::kSignMask));
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr->environment(), result,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, result, Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ bind(&check_sign_on_zero);
__ Mfhc1(scratch, input);
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
}
__ bind(&done);
}
DCHECK(!t3.is(tagged_exponent));
__ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
+ DeoptimizeIf(ne, instr, t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment(), reg, operand);
+ DeoptimizeIf(cc, instr, reg, operand);
}
}
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
ne, &no_memento_found);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DoubleRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, env, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, scratch, Operand(at));
}
// Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
__ Mfhc1(scratch, result_reg);
- DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(eq, instr, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
if (can_convert_undefined_to_nan) {
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
+ DeoptimizeIf(ne, instr, input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+ DeoptimizeIf(ne, instr, scratch2, Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
__ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+ DeoptimizeIf(ne, instr, scratch1, Operand(at));
// Load the double value.
__ ldc1(double_scratch,
kCheckForInexactConversion);
__ RecordComment("Deferred TaggedToI: lost precision or NaN");
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_scratch);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
__ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
}
}
__ bind(&done);
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
__ bind(&done);
}
}
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
__ bind(&done);
}
}
__ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
- DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, scratch1, Operand(zero_reg));
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, at, Operand(zero_reg));
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+ DeoptimizeIf(ne, instr, scratch, Operand(first));
} else {
- DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+ DeoptimizeIf(lo, instr, scratch, Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+ DeoptimizeIf(hi, instr, scratch, Operand(last));
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
- at, Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, at, Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, scratch, Operand(tag));
}
}
}
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(at));
+ DeoptimizeIf(ne, instr, reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(object));
+ DeoptimizeIf(ne, instr, reg, Operand(object));
}
}
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, map_reg, Operand(map));
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr->environment(), input_reg,
- Operand(factory()->undefined_value()));
+ DeoptimizeIf(ne, instr, input_reg, Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
+ DeoptimizeIf(al, instr, type, zero_reg, Operand(zero_reg));
}
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(at));
+ DeoptimizeIf(eq, instr, object, Operand(at));
Register null_value = t1;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
+ DeoptimizeIf(eq, instr, object, Operand(null_value));
__ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
+ DeoptimizeIf(le, instr, a1, Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
+ DeoptimizeIf(ne, instr, a1, Operand(at));
__ bind(&use_cache);
}
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, result, Operand(zero_reg));
__ bind(&done);
}
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, map, Operand(scratch0()));
}
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
FPURegister src1,
FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input,
- DoubleRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DoubleRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::BailoutType bailout_type,
- Register src1,
- const Operand& src2) {
+ Register src1, const Operand& src2) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
}
-void LCodeGen::DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Register src1,
- const Operand& src2) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ Register src1, const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(condition, environment, bailout_type, src1, src2);
+ DeoptimizeIf(condition, instr, bailout_type, src1, src2);
}
__ dsubu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
__ dsubu(dividend, zero_reg, dividend);
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, dividend, Operand(zero_reg));
__ bind(&remainder_not_zero);
}
}
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, right_reg, Operand(zero_reg));
}
// Check for kMinInt % -1, div will return kMinInt, which is not what we
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, right_reg, Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, result_reg, Operand(zero_reg));
}
__ bind(&done);
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, dividend, Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, at, Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
}
__ TruncatingDiv(result, dividend, Abs(divisor));
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Dmul(scratch0(), result, Operand(divisor));
__ Dsubu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch0(), Operand(zero_reg));
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
} else {
__ dmod(remainder, dividend, divisor);
}
- DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, remainder, Operand(zero_reg));
}
}
__ Dsubu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, result, Operand(zero_reg));
}
__ Xor(scratch, scratch, result);
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, result, Operand(kMaxInt));
}
return;
}
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
__ bind(&left_not_zero);
}
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, left, Operand(zero_reg));
}
switch (constant) {
case -1:
if (overflow) {
__ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(gt, instr->environment(), scratch, Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, scratch, Operand(kMaxInt));
} else {
__ Dsubu(result, zero_reg, left);
}
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, left, Operand(zero_reg));
}
__ mov(result, zero_reg);
break;
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(ne, instr, scratch, Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq,
- instr->environment(),
- result,
- Operand(zero_reg));
+ DeoptimizeIf(eq, instr, result, Operand(zero_reg));
__ bind(&done);
}
}
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
// TODO(yy): (-1) >>> 0. anything else?
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
- DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, result, Operand(zero_reg));
+ DeoptimizeIf(gt, instr, result, Operand(kMaxInt));
}
break;
case Token::SHL:
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, at, Operand(zero_reg));
}
__ Move(result, left);
}
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr->environment(),
- ToRegister(result), Operand(kMaxInt));
- DeoptimizeIf(lt, instr->environment(),
- ToRegister(result), Operand(kMinInt));
+ DeoptimizeIf(gt, instr, ToRegister(result), Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, ToRegister(result), Operand(kMinInt));
}
}
}
DCHECK(!scratch.is(object));
__ SmiTst(object, at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
+ DeoptimizeIf(ne, instr, scratch, Operand(JS_DATE_TYPE));
if (index->value() == 0) {
__ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
ToRegister(right),
overflow); // Reg at also used as scratch.
}
- DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
// if not smi, it must int32.
if (!instr->hydrogen()->representation().IsSmi()) {
- DeoptimizeIf(gt, instr->environment(),
- ToRegister(result), Operand(kMaxInt));
- DeoptimizeIf(lt, instr->environment(),
- ToRegister(result), Operand(kMinInt));
+ DeoptimizeIf(gt, instr, ToRegister(result), Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, ToRegister(result), Operand(kMinInt));
}
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
}
const Register map = scratch0();
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+ DeoptimizeIf(al, instr, zero_reg, Operand(zero_reg));
}
}
}
__ ld(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, result, Operand(at));
}
}
Register payload = ToRegister(instr->temp());
__ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
+ DeoptimizeIf(eq, instr, payload, Operand(at));
}
// Store the value.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(eq, instr, scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ DeoptimizeIf(eq, instr, result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
+ DeoptimizeIf(Ugreater_equal, instr, result, Operand(0x80000000));
}
break;
case FLOAT32_ELEMENTS:
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, scratch, Operand(kHoleNanUpper32));
}
}
if (hinstr->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+ DeoptimizeIf(eq, instr, result, Operand(scratch));
}
}
}
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr->environment(),
- scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+ DeoptimizeIf(lt, instr, scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+ DeoptimizeIf(hi, instr, length, Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
// number of arguments.
// Deoptimize if not a heap number.
__ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+ DeoptimizeIf(ne, instr, scratch, Operand(at));
Label done;
Register exponent = scratch0();
__ mov(result, input);
__ dsubu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, result, Operand(zero_reg));
__ bind(&done);
}
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
__ mfhc1(scratch1, input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
__ bind(&done);
}
}
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr->environment(), scratch,
- Operand(HeapNumber::kExponentBias + 32));
+ DeoptimizeIf(ge, instr, scratch, Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
__ And(scratch, result, Operand(HeapNumber::kSignMask));
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr->environment(), result,
- Operand(zero_reg));
+ DeoptimizeIf(lt, instr, result, Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ bind(&check_sign_on_zero);
__ mfhc1(scratch, input); // Get exponent/sign bits.
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
}
__ bind(&done);
}
DCHECK(!a7.is(tagged_exponent));
__ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), a7, Operand(at));
+ DeoptimizeIf(ne, instr, a7, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment(), reg, operand);
+ DeoptimizeIf(cc, instr, reg, operand);
}
}
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
ne, &no_memento_found);
- DeoptimizeIf(al, instr->environment());
+ DeoptimizeIf(al, instr);
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DoubleRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Register scratch = scratch0();
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, env, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, scratch, Operand(at));
}
// Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ mfc1(at, result_reg);
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfhc1(scratch, result_reg); // Get exponent/sign bits.
- DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(eq, instr, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
if (can_convert_undefined_to_nan) {
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
+ DeoptimizeIf(ne, instr, input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+ DeoptimizeIf(ne, instr, scratch2, Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
__ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+ DeoptimizeIf(ne, instr, scratch1, Operand(at));
// Load the double value.
__ ldc1(double_scratch,
kCheckForInexactConversion);
__ RecordComment("Deferred TaggedToI: lost precision or NaN");
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
__ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
}
}
__ bind(&done);
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
__ bind(&done);
}
}
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
__ bind(&done);
}
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, at, Operand(zero_reg));
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+ DeoptimizeIf(ne, instr, scratch, Operand(first));
} else {
- DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+ DeoptimizeIf(lo, instr, scratch, Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+ DeoptimizeIf(hi, instr, scratch, Operand(last));
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
- at, Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, at, Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, scratch, Operand(tag));
}
}
}
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ ld(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(at));
+ DeoptimizeIf(ne, instr, reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(object));
+ DeoptimizeIf(ne, instr, reg, Operand(object));
}
}
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, map_reg, Operand(map));
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr->environment(), input_reg,
- Operand(factory()->undefined_value()));
+ DeoptimizeIf(ne, instr, input_reg, Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
+ DeoptimizeIf(al, instr, type, zero_reg, Operand(zero_reg));
}
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(at));
+ DeoptimizeIf(eq, instr, object, Operand(at));
Register null_value = a5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
+ DeoptimizeIf(eq, instr, object, Operand(null_value));
__ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
+ DeoptimizeIf(le, instr, a1, Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
__ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
+ DeoptimizeIf(ne, instr, a1, Operand(at));
__ bind(&use_cache);
}
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ld(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, result, Operand(zero_reg));
__ bind(&done);
}
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, map, Operand(scratch0()));
}
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
FPURegister src1,
FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input,
- DoubleRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DoubleRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type);
+ DeoptimizeIf(cc, instr, bailout_type);
}
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
__ jmp(&done, Label::kNear);
}
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr);
__ bind(&remainder_not_zero);
}
}
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
return;
}
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (0 / -x) that will produce negative zero.
__ testl(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
__ bind(÷nd_not_zero);
}
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ bind(÷nd_not_min_int);
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
__ TruncatingDiv(dividend, Abs(divisor));
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (0 / -x) that will produce negative zero.
__ testl(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
__ bind(÷nd_not_zero);
}
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ bind(÷nd_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
}
}
if (can_overflow) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr);
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
} else {
__ orl(kScratchRegister, ToOperand(right));
}
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
} else {
__ orl(kScratchRegister, ToRegister(right));
}
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
__ bind(&done);
}
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
}
break;
case Token::SHL:
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
}
break;
case Token::SHL:
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
DCHECK(object.is(rax));
Condition cc = masm()->CheckSmi(object);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr);
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
const Register map = kScratchRegister;
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
}
}
}
__ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
}
DCHECK(!value.is(cell));
__ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
// Store the value.
__ movp(Operand(cell, 0), value);
} else {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
__ j(not_equal, &skip_assignment);
}
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
// If the function does not have an initial map, we're done.
Label done;
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
Operand double_load_operand = BuildFastArrayOperand(
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr->environment());
+ DeoptimizeIf(NegateCondition(smi), instr);
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
}
}
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr->environment());
+ DeoptimizeIf(is_smi, instr);
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr);
__ Push(receiver);
__ movp(receiver, length);
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
__ bind(&is_positive);
}
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
__ bind(&is_positive);
}
// Deoptimize if minus zero.
__ movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
+ DeoptimizeIf(parity_even, instr);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ testq(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
__ bind(&done);
}
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
__ jmp(&done, dist);
__ bind(&below_one_half);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
__ movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
__ RecordComment("Minus zero");
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
}
__ Set(output_reg, 0);
__ bind(&done);
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr);
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ DeoptimizeIf(NegateCondition(is_smi), instr);
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ DeoptimizeIf(NegateCondition(is_smi), instr);
} else {
__ AssertSmi(input);
}
}
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- XMMRegister result_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ XMMRegister result_reg, NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr);
}
if (deoptimize_on_minus_zero) {
__ j(not_equal, &done, Label::kNear);
__ movmskpd(kScratchRegister, result_reg);
__ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, env);
+ DeoptimizeIf(not_zero, instr);
}
__ jmp(&done, Label::kNear);
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr);
__ xorps(result_reg, result_reg);
__ divsd(result_reg, result_reg);
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
__ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ Set(input_reg, 0);
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, xmm0);
__ Cvtlsi2sd(scratch, input_reg);
__ ucomisd(xmm0, scratch);
__ RecordComment("Deferred TaggedToI: lost precision");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ RecordComment("Deferred TaggedToI: NaN");
- DeoptimizeIf(parity_even, instr->environment());
+ DeoptimizeIf(parity_even, instr);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ andl(input_reg, Immediate(1));
__ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
}
}
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
__ jmp(&done, Label::kNear);
__ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
__ bind(&done);
}
}
__ jmp(&done, Label::kNear);
__ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
__ bind(&done);
__ Integer32ToSmi(result_reg, result_reg);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr->environment());
+ DeoptimizeIf(NegateCondition(cc), instr);
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr);
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
} else {
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr);
}
}
} else {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr);
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
}
}
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
__ testp(rax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(no_condition, instr->environment(), type);
+ DeoptimizeIf(no_condition, instr, type);
}
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpp(rax, null_value);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
Condition cc = masm()->CheckSmi(rax);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- DeoptimizeIf(below_equal, instr->environment());
+ DeoptimizeIf(below_equal, instr);
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ bind(&use_cache);
}
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr);
}
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
int argc);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIf(Condition cc, LInstruction* instr);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagD(
- Register input,
- XMMRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ XMMRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::BailoutType bailout_type) {
+ LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
}
-void LCodeGen::DeoptimizeIf(Condition cc,
- LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type);
+ DeoptimizeIf(cc, instr, bailout_type);
}
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
__ jmp(&done, Label::kNear);
}
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr);
__ bind(&remainder_not_zero);
}
}
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
__ TruncatingDiv(dividend, Abs(divisor));
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
}
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (0 / -x) that will produce negative zero.
__ test(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
__ bind(÷nd_not_zero);
}
__ cmp(dividend, kMinInt);
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ bind(÷nd_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
}
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
return;
}
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
return;
}
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Easy case: We need no dynamic check for the dividend and the flooring
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
// Check for (0 / -x) that will produce negative zero.
__ test(dividend, dividend);
__ j(not_zero, ÷nd_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
__ bind(÷nd_not_zero);
}
__ cmp(dividend, kMinInt);
__ j(not_zero, ÷nd_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ bind(÷nd_not_min_int);
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr->environment());
+ DeoptimizeIf(less, instr);
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
__ bind(&done);
}
__ ror_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
break;
case Token::SAR:
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
break;
case Token::SHL:
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
} else {
__ ror(ToRegister(left), shift_count);
}
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr->environment());
+ DeoptimizeIf(sign, instr);
}
break;
case Token::SHL:
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
} else {
__ shl(ToRegister(left), shift_count);
}
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
DCHECK(object.is(eax));
__ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
}
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
Register map = no_reg; // Keep the compiler happy.
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
}
}
}
__ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
}
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
// Store the value.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
// If the function does not have an initial map, we're done.
Label done;
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
Operand double_load_operand = BuildFastArrayOperand(
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
}
}
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr);
__ push(receiver);
__ mov(receiver, length);
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
+ DeoptimizeIf(negative, instr);
__ bind(&is_positive);
}
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr->environment());
+ DeoptimizeIf(cc, instr);
}
}
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ bind(&no_memento_found);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
}
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
} else {
__ AssertSmi(result);
}
}
-void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
- Register temp_reg,
- X87Register res_reg,
- bool can_convert_undefined_to_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
+void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
+ Register temp_reg, X87Register res_reg,
NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
Label load_smi, done;
X87PrepareToWrite(res_reg);
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr);
} else {
Label heap_number, convert;
__ j(equal, &heap_number, Label::kNear);
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr);
__ bind(&convert);
ExternalReference nan =
// Pop FPU stack before deoptimizing.
__ fstp(0);
- DeoptimizeIf(not_zero, env);
+ DeoptimizeIf(not_zero, instr);
}
__ jmp(&done, Label::kNear);
} else {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ Move(input_reg, Immediate(0));
} else {
// TODO(olivf) Converting a number on the fpu is actually quite slow. We
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ sub(esp, Immediate(kPointerSize));
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
__ j(equal, &no_precision_lost, Label::kNear);
__ fstp(0);
__ RecordComment("Deferred TaggedToI: lost precision");
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
__ bind(&no_precision_lost);
__ j(parity_odd, ¬_nan);
__ fstp(0);
__ RecordComment("Deferred TaggedToI: NaN");
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
__ bind(¬_nan);
__ test(input_reg, Operand(input_reg));
__ pop(input_reg);
__ test(input_reg, Operand(input_reg));
__ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
} else {
__ fist_s(MemOperand(esp, 0));
__ fild_s(MemOperand(esp, 0));
__ FCmp();
__ pop(input_reg);
__ RecordComment("Deferred TaggedToI: lost precision");
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ RecordComment("Deferred TaggedToI: NaN");
- DeoptimizeIf(parity_even, instr->environment());
+ DeoptimizeIf(parity_even, instr);
}
}
}
DCHECK(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = ToRegister(temp);
HValue* value = instr->hydrogen()->value();
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
- EmitNumberUntagDNoSSE2(input_reg,
- temp_reg,
- ToX87Register(result),
- instr->hydrogen()->can_convert_undefined_to_nan(),
- deoptimize_on_minus_zero,
- instr->environment(),
+ EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
mode);
}
&bailout, Label::kNear);
__ jmp(&done, Label::kNear);
__ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
__ bind(&done);
}
}
&bailout, Label::kNear);
__ jmp(&done, Label::kNear);
__ bind(&bailout);
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(no_condition, instr);
__ bind(&done);
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr->environment());
+ DeoptimizeIf(overflow, instr);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
+ DeoptimizeIf(not_zero, instr);
}
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
}
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
} else {
- DeoptimizeIf(below, instr->environment());
+ DeoptimizeIf(below, instr);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(last));
- DeoptimizeIf(above, instr->environment());
+ DeoptimizeIf(above, instr);
}
}
} else {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr);
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
}
}
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
}
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
__ bind(&success);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ jmp(&zero_result, Label::kNear);
// Heap number
type = Deoptimizer::LAZY;
}
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeIf(no_condition, instr->environment(), type);
+ DeoptimizeIf(no_condition, instr, type);
}
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
__ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
+ DeoptimizeIf(zero, instr);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr->environment());
+ DeoptimizeIf(below_equal, instr);
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
__ bind(&use_cache);
}
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(equal, instr);
}
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(not_equal, instr);
}
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
- LEnvironment* environment,
+ void DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIf(Condition cc, LInstruction* instr);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagDNoSSE2(
- Register input,
- Register temp,
- X87Register res_reg,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ void EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input,
+ Register temp, X87Register res_reg,
+ NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to