From: jkummerow@chromium.org Date: Fri, 8 Nov 2013 10:52:07 +0000 (+0000) Subject: Use nearlabel AFAP in lithium codegen X-Git-Tag: upstream/4.7.83~11830 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a556b7c99bb3e7acaf6ff2eef2f2ce5fb0a4a47e;p=platform%2Fupstream%2Fv8.git Use nearlabel AFAP in lithium codegen BUG= R=jkummerow@chromium.org Review URL: https://codereview.chromium.org/47533002 Patch from Weiliang Lin . git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17583 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 44b952c..f457da0 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -1043,7 +1043,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, return; } - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { + if (DeoptEveryNTimes()) { ExternalReference count = ExternalReference::stress_deopt_count(isolate()); Label no_deopt; __ pushfd(); @@ -2047,7 +2047,7 @@ void LCodeGen::DoDateField(LDateField* instr) { __ j(not_equal, &runtime, Label::kNear); __ mov(result, FieldOperand(object, JSDate::kValueOffset + kPointerSize * index->value())); - __ jmp(&done); + __ jmp(&done, Label::kNear); } __ bind(&runtime); __ PrepareCallCFunction(2, scratch); @@ -2647,7 +2647,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { __ fld(0); __ FCmp(); Label ok; - __ j(parity_even, &ok); + __ j(parity_even, &ok, Label::kNear); __ fstp(0); EmitFalseBranch(instr, no_condition); __ bind(&ok); @@ -2971,7 +2971,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Register temp = ToRegister(instr->temp()); // A Smi is not an instance of anything. - __ JumpIfSmi(object, &false_result); + __ JumpIfSmi(object, &false_result, Label::kNear); // This is the inlined call site instanceof cache. The two occurences of the // hole value will be patched to the last map/result pair generated by the @@ -2984,18 +2984,18 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map. __ j(not_equal, &cache_miss, Label::kNear); __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. - __ jmp(&done); + __ jmp(&done, Label::kNear); // The inlined call site cache did not match. Check for null and string // before calling the deferred code. __ bind(&cache_miss); // Null is not an instance of anything. __ cmp(object, factory()->null_value()); - __ j(equal, &false_result); + __ j(equal, &false_result, Label::kNear); // String values are not instances of anything. Condition is_string = masm_->IsObjectStringType(object, temp, temp); - __ j(is_string, &false_result); + __ j(is_string, &false_result, Label::kNear); // Go to the deferred code. __ jmp(deferred->entry()); @@ -3140,7 +3140,7 @@ void LCodeGen::DoReturn(LReturn* instr) { if (dynamic_frame_alignment_) { Label no_padding; __ cmp(edx, Immediate(kNoAlignmentPadding)); - __ j(equal, &no_padding); + __ j(equal, &no_padding, Label::kNear); EmitReturn(instr, true); __ bind(&no_padding); @@ -3658,6 +3658,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { // object as a receiver to normal functions. Values have to be // passed unchanged to builtins and strict-mode functions. Label global_object, receiver_ok; + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; // Do not transform the receiver to object for strict mode // functions. @@ -3665,12 +3666,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset), 1 << SharedFunctionInfo::kStrictModeBitWithinByte); - __ j(not_equal, &receiver_ok); // A near jump is not sufficient here! + __ j(not_equal, &receiver_ok, dist); // Do not transform the receiver to object for builtins. __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset), 1 << SharedFunctionInfo::kNativeBitWithinByte); - __ j(not_equal, &receiver_ok); + __ j(not_equal, &receiver_ok, dist); // Normal function. Replace undefined or null with global receiver. __ cmp(receiver, factory()->null_value()); @@ -3879,7 +3880,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { // |result| are the same register and |input| will be restored // unchanged by popping safepoint registers. __ test(tmp, Immediate(HeapNumber::kSignMask)); - __ j(zero, &done); + __ j(zero, &done, Label::kNear); __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); __ jmp(&allocated, Label::kNear); @@ -4033,9 +4034,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ExternalReference::address_of_minus_one_half(); Label done, round_to_zero, below_one_half, do_not_compensate; + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; + __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); __ ucomisd(xmm_scratch, input_reg); - __ j(above, &below_one_half); + __ j(above, &below_one_half, Label::kNear); // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). __ addsd(xmm_scratch, input_reg); @@ -4044,12 +4047,12 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ cmp(output_reg, 0x80000000u); __ RecordComment("D2I conversion overflow"); DeoptimizeIf(equal, instr->environment()); - __ jmp(&done); + __ jmp(&done, dist); __ bind(&below_one_half); __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); __ ucomisd(xmm_scratch, input_reg); - __ j(below_equal, &round_to_zero); + __ j(below_equal, &round_to_zero, Label::kNear); // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then // compare and compensate. @@ -4063,10 +4066,10 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ Cvtsi2sd(xmm_scratch, output_reg); __ ucomisd(xmm_scratch, input_temp); - __ j(equal, &done); + __ j(equal, &done, dist); __ sub(output_reg, Immediate(1)); // No overflow because we already ruled out minint. - __ jmp(&done); + __ jmp(&done, dist); __ bind(&round_to_zero); // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if @@ -4421,13 +4424,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { // look at the first argument __ mov(ecx, Operand(esp, 0)); __ test(ecx, ecx); - __ j(zero, &packed_case); + __ j(zero, &packed_case, Label::kNear); ElementsKind holey_kind = GetHoleyElementsKind(kind); ArraySingleArgumentConstructorStub stub(holey_kind, context_mode, override_mode); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); - __ jmp(&done); + __ jmp(&done, Label::kNear); __ bind(&packed_case); } @@ -4724,7 +4727,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { Label have_value; __ ucomisd(value, value); - __ j(parity_odd, &have_value); // NaN. + __ j(parity_odd, &have_value, Label::kNear); // NaN. __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); __ bind(&have_value); @@ -4760,15 +4763,15 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { __ fld(0); __ FCmp(); - __ j(parity_odd, &no_special_nan_handling); + __ j(parity_odd, &no_special_nan_handling, Label::kNear); __ sub(esp, Immediate(kDoubleSize)); __ fst_d(MemOperand(esp, 0)); __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), Immediate(kHoleNanUpper32)); __ add(esp, Immediate(kDoubleSize)); Label canonicalize; - __ j(not_equal, &canonicalize); - __ jmp(&no_special_nan_handling); + __ j(not_equal, &canonicalize, Label::kNear); + __ jmp(&no_special_nan_handling, Label::kNear); __ bind(&canonicalize); __ fstp(0); __ fld_d(Operand::StaticVariable(canonical_nan_reference)); @@ -5748,12 +5751,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { Label success; for (int i = 0; i < map_set.size() - 1; i++) { Handle map = map_set.at(i).handle(); - __ CompareMap(reg, map, &success); - __ j(equal, &success); + __ CompareMap(reg, map); + __ j(equal, &success, Label::kNear); } Handle map = map_set.at(map_set.size() - 1).handle(); - __ CompareMap(reg, map, &success); + __ CompareMap(reg, map); if (instr->hydrogen()->has_migration_target()) { __ j(not_equal, deferred->entry()); } else { @@ -5831,13 +5834,13 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { // Check for heap number __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); - __ j(equal, &heap_number, Label::kFar); + __ j(equal, &heap_number, Label::kNear); // Check for undefined. Undefined is converted to zero for clamping // conversions. __ cmp(input_reg, factory()->undefined_value()); DeoptimizeIf(not_equal, instr->environment()); - __ jmp(&zero_result); + __ jmp(&zero_result, Label::kNear); // Heap number __ bind(&heap_number); @@ -5852,15 +5855,15 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { // Test for negative values --> clamp to zero __ test(scratch, scratch); - __ j(negative, &zero_result); + __ j(negative, &zero_result, Label::kNear); // Get exponent alone in scratch2. __ mov(scratch2, scratch); __ and_(scratch2, HeapNumber::kExponentMask); __ shr(scratch2, HeapNumber::kExponentShift); - __ j(zero, &zero_result); + __ j(zero, &zero_result, Label::kNear); __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); - __ j(negative, &zero_result); + __ j(negative, &zero_result, Label::kNear); const uint32_t non_int8_exponent = 7; __ cmp(scratch2, Immediate(non_int8_exponent + 1)); @@ -5891,18 +5894,18 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); Label no_round; - __ j(less, &no_round); + __ j(less, &no_round, Label::kNear); Label round_up; __ mov(scratch2, Immediate(1 << one_half_bit_shift)); - __ j(greater, &round_up); + __ j(greater, &round_up, Label::kNear); __ test(scratch3, scratch3); - __ j(not_zero, &round_up); + __ j(not_zero, &round_up, Label::kNear); __ mov(scratch2, scratch); __ and_(scratch2, Immediate(1 << one_bit_shift)); __ shr(scratch2, 1); __ bind(&round_up); __ add(scratch, scratch2); - __ j(overflow, &largest_value); + __ j(overflow, &largest_value, Label::kNear); __ bind(&no_round); __ shr(scratch, 23); __ mov(result_reg, scratch); @@ -5917,7 +5920,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { // bit is set. __ and_(scratch, HeapNumber::kMantissaMask); __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); - __ j(not_zero, &zero_result); // M!=0 --> NaN + __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN // Infinity -> Fall through to map to 255. __ bind(&largest_value); @@ -5926,7 +5929,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { __ bind(&zero_result); __ xor_(result_reg, result_reg); - __ jmp(&done); + __ jmp(&done, Label::kNear); // smi __ bind(&is_smi); @@ -6074,7 +6077,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); - __ jmp(&allocated); + __ jmp(&allocated, Label::kNear); __ bind(&runtime_allocate); __ push(ebx); @@ -6400,9 +6403,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { Label load_cache, done; __ EnumLength(result, map); __ cmp(result, Immediate(Smi::FromInt(0))); - __ j(not_equal, &load_cache); + __ j(not_equal, &load_cache, Label::kNear); __ mov(result, isolate()->factory()->empty_fixed_array()); - __ jmp(&done); + __ jmp(&done, Label::kNear); __ bind(&load_cache); __ LoadInstanceDescriptors(map, result); @@ -6430,7 +6433,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { Label out_of_object, done; __ cmp(index, Immediate(0)); - __ j(less, &out_of_object); + __ j(less, &out_of_object, Label::kNear); __ mov(object, FieldOperand(object, index, times_half_pointer_size, diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h index 77a37a6..6c9cde3 100644 --- a/src/ia32/lithium-codegen-ia32.h +++ b/src/ia32/lithium-codegen-ia32.h @@ -268,6 +268,10 @@ class LCodeGen: public LCodeGenBase { void DeoptimizeIf(Condition cc, LEnvironment* environment); void ApplyCheckIf(Condition cc, LBoundsCheck* check); + bool DeoptEveryNTimes() { + return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); + } + void AddToTranslation(LEnvironment* environment, Translation* translation, LOperand* op, diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index 8414f85..235e38c 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -867,9 +867,7 @@ void MacroAssembler::StoreNumberToDoubleElements( } -void MacroAssembler::CompareMap(Register obj, - Handle map, - Label* early_success) { +void MacroAssembler::CompareMap(Register obj, Handle map) { cmp(FieldOperand(obj, HeapObject::kMapOffset), map); } @@ -882,10 +880,8 @@ void MacroAssembler::CheckMap(Register obj, JumpIfSmi(obj, fail); } - Label success; - CompareMap(obj, map, &success); + CompareMap(obj, map); j(not_equal, fail); - bind(&success); } diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index 023a08b..5f1e428 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -417,13 +417,8 @@ class MacroAssembler: public Assembler { bool specialize_for_processor, int offset = 0); - // Compare an object's map with the specified map and its transitioned - // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with - // result of map compare. If multiple map compares are required, the compare - // sequences branches to early_success. - void CompareMap(Register obj, - Handle map, - Label* early_success); + // Compare an object's map with the specified map. + void CompareMap(Register obj, Handle map); // Check if the map of an object is equal to a specified map and branch to // label if not. Skip the smi check if not required (object is known to be a diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index be8160b..5a08f0e 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -2222,7 +2222,7 @@ static void CheckInputType(MacroAssembler* masm, __ JumpIfNotSmi(input, fail); } else if (expected == CompareIC::NUMBER) { __ JumpIfSmi(input, &ok); - __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL); + __ CompareMap(input, masm->isolate()->factory()->heap_number_map()); __ j(not_equal, fail); } // We could be strict about internalized/non-internalized here, but as long as @@ -3206,7 +3206,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // __ j(not_equal, &cache_miss); // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); // before the offset of the hole value in the root array. - static const unsigned int kWordBeforeResultValue = 0x458B4909; + static const unsigned int kWordBeforeResultValue = 0x458B4906; // Only the inline check flag is supported on X64. ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck()); int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0; @@ -4543,7 +4543,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { // Load left and right operand. Label done, left, left_smi, right_smi; __ JumpIfSmi(rax, &right_smi, Label::kNear); - __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL); + __ CompareMap(rax, masm->isolate()->factory()->heap_number_map()); __ j(not_equal, &maybe_undefined1, Label::kNear); __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); __ jmp(&left, Label::kNear); @@ -4553,7 +4553,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { __ bind(&left); __ JumpIfSmi(rdx, &left_smi, Label::kNear); - __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL); + __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map()); __ j(not_equal, &maybe_undefined2, Label::kNear); __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); __ jmp(&done); diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index ba186a8..be7d19a 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -649,7 +649,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, return; } - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { + if (DeoptEveryNTimes()) { ExternalReference count = ExternalReference::stress_deopt_count(isolate()); Label no_deopt; __ pushfq(); @@ -1639,7 +1639,7 @@ void LCodeGen::DoDateField(LDateField* instr) { __ j(not_equal, &runtime, Label::kNear); __ movq(result, FieldOperand(object, JSDate::kValueOffset + kPointerSize * index->value())); - __ jmp(&done); + __ jmp(&done, Label::kNear); } __ bind(&runtime); __ PrepareCallCFunction(2); @@ -2515,7 +2515,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Register object = ToRegister(instr->value()); // A Smi is not an instance of anything. - __ JumpIfSmi(object, &false_result); + __ JumpIfSmi(object, &false_result, Label::kNear); // This is the inlined call site instanceof cache. The two occurences of the // hole value will be patched to the last map/result pair generated by the @@ -2537,7 +2537,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { __ bind(&end_of_patched_code); ASSERT(true); #endif - __ jmp(&done); + __ jmp(&done, Label::kNear); // The inlined call site cache did not match. Check for null and string // before calling the deferred code. @@ -2592,9 +2592,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, __ testq(kScratchRegister, kScratchRegister); Label load_false; Label done; - __ j(not_zero, &load_false); + __ j(not_zero, &load_false, Label::kNear); __ LoadRoot(rax, Heap::kTrueValueRootIndex); - __ jmp(&done); + __ jmp(&done, Label::kNear); __ bind(&load_false); __ LoadRoot(rax, Heap::kFalseValueRootIndex); __ bind(&done); @@ -3159,6 +3159,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { // object as a receiver to normal functions. Values have to be // passed unchanged to builtins and strict-mode functions. Label global_object, receiver_ok; + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; // Do not transform the receiver to object for strict mode // functions. @@ -3167,13 +3168,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { __ testb(FieldOperand(kScratchRegister, SharedFunctionInfo::kStrictModeByteOffset), Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); - __ j(not_equal, &receiver_ok, Label::kNear); + __ j(not_equal, &receiver_ok, dist); // Do not transform the receiver to object for builtins. __ testb(FieldOperand(kScratchRegister, SharedFunctionInfo::kNativeByteOffset), Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); - __ j(not_equal, &receiver_ok, Label::kNear); + __ j(not_equal, &receiver_ok, dist); // Normal function. Replace undefined or null with global receiver. __ CompareRoot(receiver, Heap::kNullValueRootIndex); @@ -3495,7 +3496,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { __ testq(output_reg, Immediate(1)); DeoptimizeIf(not_zero, instr->environment()); __ Set(output_reg, 0); - __ jmp(&done); + __ jmp(&done, Label::kNear); __ bind(&positive_sign); } @@ -3529,10 +3530,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) { static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 Label done, round_to_zero, below_one_half, do_not_compensate, restore; + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; __ movq(kScratchRegister, one_half); __ movq(xmm_scratch, kScratchRegister); __ ucomisd(xmm_scratch, input_reg); - __ j(above, &below_one_half); + __ j(above, &below_one_half, Label::kNear); // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). __ addsd(xmm_scratch, input_reg); @@ -3541,13 +3543,13 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ cmpl(output_reg, Immediate(0x80000000)); __ RecordComment("D2I conversion overflow"); DeoptimizeIf(equal, instr->environment()); - __ jmp(&done); + __ jmp(&done, dist); __ bind(&below_one_half); __ movq(kScratchRegister, minus_one_half); __ movq(xmm_scratch, kScratchRegister); __ ucomisd(xmm_scratch, input_reg); - __ j(below_equal, &round_to_zero); + __ j(below_equal, &round_to_zero, Label::kNear); // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then // compare and compensate. @@ -3566,7 +3568,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { // No overflow because we already ruled out minint. __ bind(&restore); __ movq(input_reg, kScratchRegister); // Restore input_reg. - __ jmp(&done); + __ jmp(&done, dist); __ bind(&round_to_zero); // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if @@ -3639,7 +3641,7 @@ void LCodeGen::DoPower(LPower* instr) { __ CallStub(&stub); } else if (exponent_type.IsTagged()) { Label no_deopt; - __ JumpIfSmi(exponent, &no_deopt); + __ JumpIfSmi(exponent, &no_deopt, Label::kNear); __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx); DeoptimizeIf(not_equal, instr->environment()); __ bind(&no_deopt); @@ -3902,13 +3904,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { // look at the first argument __ movq(rcx, Operand(rsp, 0)); __ testq(rcx, rcx); - __ j(zero, &packed_case); + __ j(zero, &packed_case, Label::kNear); ElementsKind holey_kind = GetHoleyElementsKind(kind); ArraySingleArgumentConstructorStub stub(holey_kind, context_mode, override_mode); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); - __ jmp(&done); + __ jmp(&done, Label::kNear); __ bind(&packed_case); } @@ -4204,7 +4206,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { Label have_value; __ ucomisd(value, value); - __ j(parity_odd, &have_value); // NaN. + __ j(parity_odd, &have_value, Label::kNear); // NaN. __ Set(kScratchRegister, BitCast( FixedDoubleArray::canonical_not_the_hole_nan_as_double())); @@ -4673,7 +4675,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); if (can_convert_undefined_to_nan) { - __ j(not_equal, &convert); + __ j(not_equal, &convert, Label::kNear); } else { DeoptimizeIf(not_equal, env); } @@ -4971,12 +4973,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { Label success; for (int i = 0; i < map_set.size() - 1; i++) { Handle map = map_set.at(i).handle(); - __ CompareMap(reg, map, &success); - __ j(equal, &success); + __ CompareMap(reg, map); + __ j(equal, &success, Label::kNear); } Handle map = map_set.at(map_set.size() - 1).handle(); - __ CompareMap(reg, map, &success); + __ CompareMap(reg, map); if (instr->hydrogen()->has_migration_target()) { __ j(not_equal, deferred->entry()); } else { @@ -5008,8 +5010,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); XMMRegister xmm_scratch = double_scratch0(); Label is_smi, done, heap_number; - - __ JumpIfSmi(input_reg, &is_smi); + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; + __ JumpIfSmi(input_reg, &is_smi, dist); // Check for heap number __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), @@ -5166,7 +5168,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); - __ jmp(&allocated); + __ jmp(&allocated, Label::kNear); __ bind(&runtime_allocate); __ push(rbx); @@ -5499,9 +5501,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { Label load_cache, done; __ EnumLength(result, map); __ Cmp(result, Smi::FromInt(0)); - __ j(not_equal, &load_cache); + __ j(not_equal, &load_cache, Label::kNear); __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); - __ jmp(&done); + __ jmp(&done, Label::kNear); __ bind(&load_cache); __ LoadInstanceDescriptors(map, result); __ movq(result, @@ -5529,7 +5531,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { Label out_of_object, done; __ SmiToInteger32(index, index); __ cmpl(index, Immediate(0)); - __ j(less, &out_of_object); + __ j(less, &out_of_object, Label::kNear); __ movq(object, FieldOperand(object, index, times_pointer_size, diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h index 090f961..c58a37c 100644 --- a/src/x64/lithium-codegen-x64.h +++ b/src/x64/lithium-codegen-x64.h @@ -220,6 +220,10 @@ class LCodeGen: public LCodeGenBase { void DeoptimizeIf(Condition cc, LEnvironment* environment); void ApplyCheckIf(Condition cc, LBoundsCheck* check); + bool DeoptEveryNTimes() { + return FLAG_deopt_every_n_times != 0 && !info()->IsStub(); + } + void AddToTranslation(LEnvironment* environment, Translation* translation, LOperand* op, diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 9ffc451..c8d67b7 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -3035,9 +3035,7 @@ void MacroAssembler::StoreNumberToDoubleElements( } -void MacroAssembler::CompareMap(Register obj, - Handle map, - Label* early_success) { +void MacroAssembler::CompareMap(Register obj, Handle map) { Cmp(FieldOperand(obj, HeapObject::kMapOffset), map); } @@ -3050,10 +3048,8 @@ void MacroAssembler::CheckMap(Register obj, JumpIfSmi(obj, fail); } - Label success; - CompareMap(obj, map, &success); + CompareMap(obj, map); j(not_equal, fail); - bind(&success); } diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 7e00d64..2f7bdd2 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -937,13 +937,8 @@ class MacroAssembler: public Assembler { Label* fail, int elements_offset = 0); - // Compare an object's map with the specified map and its transitioned - // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with - // result of map compare. If multiple map compares are required, the compare - // sequences branches to early_success. - void CompareMap(Register obj, - Handle map, - Label* early_success); + // Compare an object's map with the specified map. + void CompareMap(Register obj, Handle map); // Check if the map of an object is equal to a specified map and branch to // label if not. Skip the smi check if not required (object is known to be a