From: haitao.feng@intel.com Date: Thu, 7 Nov 2013 09:34:52 +0000 (+0000) Subject: Refine CountOperation of FullCodeGen X-Git-Tag: upstream/4.7.83~11853 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e7ef18110de1aa92759c8956c7a55755c3d78447;p=platform%2Fupstream%2Fv8.git Refine CountOperation of FullCodeGen R=danno@chromium.org Review URL: https://codereview.chromium.org/42973002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17547 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 1ee612b..a773893 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -4394,14 +4394,44 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { PrepareForBailoutForId(prop->LoadId(), TOS_REG); } - // Call ToNumber only if operand is not a smi. - Label no_conversion; + // Inline smi case if we are in a loop. + Label stub_call, done; + JumpPatchSite patch_site(masm_); + + int count_value = expr->op() == Token::INC ? 1 : -1; if (ShouldInlineSmiCase(expr->op())) { - __ JumpIfSmi(r0, &no_conversion); + Label slow; + patch_site.EmitJumpIfNotSmi(r0, &slow); + + // Save result for postfix expressions. + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + // Save the result on the stack. If we have a named or keyed property + // we store the result under the receiver that is currently on top + // of the stack. + switch (assign_type) { + case VARIABLE: + __ push(r0); + break; + case NAMED_PROPERTY: + __ str(r0, MemOperand(sp, kPointerSize)); + break; + case KEYED_PROPERTY: + __ str(r0, MemOperand(sp, 2 * kPointerSize)); + break; + } + } + } + + __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); + __ b(vc, &done); + // Call stub. Undo operation first. + __ sub(r0, r0, Operand(Smi::FromInt(count_value))); + __ jmp(&stub_call); + __ bind(&slow); } ToNumberStub convert_stub; __ CallStub(&convert_stub); - __ bind(&no_conversion); // Save result for postfix expressions. if (expr->is_postfix()) { @@ -4424,22 +4454,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { } - // Inline smi case if we are in a loop. - Label stub_call, done; - JumpPatchSite patch_site(masm_); - - int count_value = expr->op() == Token::INC ? 1 : -1; - if (ShouldInlineSmiCase(expr->op())) { - __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); - __ b(vs, &stub_call); - // We could eliminate this smi check if we split the code at - // the first smi check before calling ToNumber. - patch_site.EmitJumpIfSmi(r0, &done); - - __ bind(&stub_call); - // Call stub. Undo operation first. - __ sub(r0, r0, Operand(Smi::FromInt(count_value))); - } + __ bind(&stub_call); __ mov(r1, r0); __ mov(r0, Operand(Smi::FromInt(count_value))); diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index 09f5de1..82d23e7 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -4394,14 +4394,50 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { PrepareForBailoutForId(prop->LoadId(), TOS_REG); } - // Call ToNumber only if operand is not a smi. - Label no_conversion; + // Inline smi case if we are in a loop. + Label done, stub_call; + JumpPatchSite patch_site(masm_); if (ShouldInlineSmiCase(expr->op())) { - __ JumpIfSmi(eax, &no_conversion, Label::kNear); + Label slow; + patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear); + + // Save result for postfix expressions. + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + // Save the result on the stack. If we have a named or keyed property + // we store the result under the receiver that is currently on top + // of the stack. + switch (assign_type) { + case VARIABLE: + __ push(eax); + break; + case NAMED_PROPERTY: + __ mov(Operand(esp, kPointerSize), eax); + break; + case KEYED_PROPERTY: + __ mov(Operand(esp, 2 * kPointerSize), eax); + break; + } + } + } + + if (expr->op() == Token::INC) { + __ add(eax, Immediate(Smi::FromInt(1))); + } else { + __ sub(eax, Immediate(Smi::FromInt(1))); + } + __ j(no_overflow, &done, Label::kNear); + // Call stub. Undo operation first. + if (expr->op() == Token::INC) { + __ sub(eax, Immediate(Smi::FromInt(1))); + } else { + __ add(eax, Immediate(Smi::FromInt(1))); + } + __ jmp(&stub_call, Label::kNear); + __ bind(&slow); } ToNumberStub convert_stub; __ CallStub(&convert_stub); - __ bind(&no_conversion); // Save result for postfix expressions. if (expr->is_postfix()) { @@ -4423,34 +4459,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { } } - // Inline smi case if we are in a loop. - Label done, stub_call; - JumpPatchSite patch_site(masm_); - - if (ShouldInlineSmiCase(expr->op())) { - if (expr->op() == Token::INC) { - __ add(eax, Immediate(Smi::FromInt(1))); - } else { - __ sub(eax, Immediate(Smi::FromInt(1))); - } - __ j(overflow, &stub_call, Label::kNear); - // We could eliminate this smi check if we split the code at - // the first smi check before calling ToNumber. - patch_site.EmitJumpIfSmi(eax, &done, Label::kNear); - - __ bind(&stub_call); - // Call stub. Undo operation first. - if (expr->op() == Token::INC) { - __ sub(eax, Immediate(Smi::FromInt(1))); - } else { - __ add(eax, Immediate(Smi::FromInt(1))); - } - } - // Record position before stub call. SetSourcePosition(expr->position()); // Call stub for +1/-1. + __ bind(&stub_call); __ mov(edx, eax); __ mov(eax, Immediate(Smi::FromInt(1))); BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE); diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index f69dfb9..2e988c8 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -4377,14 +4377,47 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { PrepareForBailoutForId(prop->LoadId(), TOS_REG); } - // Call ToNumber only if operand is not a smi. - Label no_conversion; + // Inline smi case if we are in a loop. + Label done, stub_call; + JumpPatchSite patch_site(masm_); if (ShouldInlineSmiCase(expr->op())) { - __ JumpIfSmi(rax, &no_conversion, Label::kNear); + Label slow; + patch_site.EmitJumpIfNotSmi(rax, &slow, Label::kNear); + + // Save result for postfix expressions. + if (expr->is_postfix()) { + if (!context()->IsEffect()) { + // Save the result on the stack. If we have a named or keyed property + // we store the result under the receiver that is currently on top + // of the stack. + switch (assign_type) { + case VARIABLE: + __ push(rax); + break; + case NAMED_PROPERTY: + __ movq(Operand(rsp, kPointerSize), rax); + break; + case KEYED_PROPERTY: + __ movq(Operand(rsp, 2 * kPointerSize), rax); + break; + } + } + } + + SmiOperationExecutionMode mode; + mode.Add(PRESERVE_SOURCE_REGISTER); + mode.Add(BAILOUT_ON_NO_OVERFLOW); + if (expr->op() == Token::INC) { + __ SmiAddConstant(rax, rax, Smi::FromInt(1), mode, &done, Label::kNear); + } else { + __ SmiSubConstant(rax, rax, Smi::FromInt(1), mode, &done, Label::kNear); + } + __ jmp(&stub_call, Label::kNear); + __ bind(&slow); } + ToNumberStub convert_stub; __ CallStub(&convert_stub); - __ bind(&no_conversion); // Save result for postfix expressions. if (expr->is_postfix()) { @@ -4406,34 +4439,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { } } - // Inline smi case if we are in a loop. - Label done, stub_call; - JumpPatchSite patch_site(masm_); - - if (ShouldInlineSmiCase(expr->op())) { - if (expr->op() == Token::INC) { - __ SmiAddConstant(rax, rax, Smi::FromInt(1)); - } else { - __ SmiSubConstant(rax, rax, Smi::FromInt(1)); - } - __ j(overflow, &stub_call, Label::kNear); - // We could eliminate this smi check if we split the code at - // the first smi check before calling ToNumber. - patch_site.EmitJumpIfSmi(rax, &done, Label::kNear); - - __ bind(&stub_call); - // Call stub. Undo operation first. - if (expr->op() == Token::INC) { - __ SmiSubConstant(rax, rax, Smi::FromInt(1)); - } else { - __ SmiAddConstant(rax, rax, Smi::FromInt(1)); - } - } - // Record position before stub call. SetSourcePosition(expr->position()); // Call stub for +1/-1. + __ bind(&stub_call); __ movq(rdx, rax); __ Move(rax, Smi::FromInt(1)); BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE); diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 586e496..9ffc451 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -1516,7 +1516,8 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant, - Label* on_not_smi_result, + SmiOperationExecutionMode mode, + Label* bailout_label, Label::Distance near_jump) { if (constant->value() == 0) { if (!dst.is(src)) { @@ -1524,19 +1525,32 @@ void MacroAssembler::SmiAddConstant(Register dst, } } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); - - Label done; LoadSmiConstant(kScratchRegister, constant); addq(dst, kScratchRegister); - j(no_overflow, &done, Label::kNear); - // Restore src. - subq(dst, kScratchRegister); - jmp(on_not_smi_result, near_jump); - bind(&done); + if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { + j(no_overflow, bailout_label, near_jump); + ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); + subq(dst, kScratchRegister); + } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { + if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { + Label done; + j(no_overflow, &done, Label::kNear); + subq(dst, kScratchRegister); + jmp(bailout_label, near_jump); + bind(&done); + } else { + // Bailout if overflow without reserving src. + j(overflow, bailout_label, near_jump); + } + } else { + CHECK(mode.IsEmpty()); + } } else { + ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); + ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); LoadSmiConstant(dst, constant); addq(dst, src); - j(overflow, on_not_smi_result, near_jump); + j(overflow, bailout_label, near_jump); } } @@ -1568,7 +1582,8 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant, - Label* on_not_smi_result, + SmiOperationExecutionMode mode, + Label* bailout_label, Label::Distance near_jump) { if (constant->value() == 0) { if (!dst.is(src)) { @@ -1576,35 +1591,40 @@ void MacroAssembler::SmiSubConstant(Register dst, } } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); - if (constant->value() == Smi::kMinValue) { - // Subtracting min-value from any non-negative value will overflow. - // We test the non-negativeness before doing the subtraction. - testq(src, src); - j(not_sign, on_not_smi_result, near_jump); - LoadSmiConstant(kScratchRegister, constant); - subq(dst, kScratchRegister); + LoadSmiConstant(kScratchRegister, constant); + subq(dst, kScratchRegister); + if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { + j(no_overflow, bailout_label, near_jump); + ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); + addq(dst, kScratchRegister); + } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { + if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { + Label done; + j(no_overflow, &done, Label::kNear); + addq(dst, kScratchRegister); + jmp(bailout_label, near_jump); + bind(&done); + } else { + // Bailout if overflow without reserving src. + j(overflow, bailout_label, near_jump); + } } else { - // Subtract by adding the negation. - LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); - addq(kScratchRegister, dst); - j(overflow, on_not_smi_result, near_jump); - movq(dst, kScratchRegister); + CHECK(mode.IsEmpty()); } } else { + ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); + ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); if (constant->value() == Smi::kMinValue) { - // Subtracting min-value from any non-negative value will overflow. - // We test the non-negativeness before doing the subtraction. - testq(src, src); - j(not_sign, on_not_smi_result, near_jump); - LoadSmiConstant(dst, constant); - // Adding and subtracting the min-value gives the same result, it only - // differs on the overflow bit, which we don't check here. - addq(dst, src); + ASSERT(!dst.is(kScratchRegister)); + movq(dst, src); + LoadSmiConstant(kScratchRegister, constant); + subq(dst, kScratchRegister); + j(overflow, bailout_label, near_jump); } else { // Subtract by adding the negation. LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); addq(dst, src); - j(overflow, on_not_smi_result, near_jump); + j(overflow, bailout_label, near_jump); } } } diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 3143d3d..7e00d64 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -53,6 +53,22 @@ typedef Operand MemOperand; enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum SmiOperationConstraint { + PRESERVE_SOURCE_REGISTER, + BAILOUT_ON_NO_OVERFLOW, + BAILOUT_ON_OVERFLOW, + NUMBER_OF_CONSTRAINTS +}; + +STATIC_ASSERT(NUMBER_OF_CONSTRAINTS <= 8); + +class SmiOperationExecutionMode : public EnumSet { + public: + SmiOperationExecutionMode() : EnumSet(0) { } + explicit SmiOperationExecutionMode(byte bits) + : EnumSet(bits) { } +}; + bool AreAliased(Register r1, Register r2, Register r3, Register r4); // Forward declaration. @@ -547,7 +563,8 @@ class MacroAssembler: public Assembler { void SmiAddConstant(Register dst, Register src, Smi* constant, - Label* on_not_smi_result, + SmiOperationExecutionMode mode, + Label* bailout_label, Label::Distance near_jump = Label::kFar); // Subtract an integer constant from a tagged smi, giving a tagged smi as @@ -560,7 +577,8 @@ class MacroAssembler: public Assembler { void SmiSubConstant(Register dst, Register src, Smi* constant, - Label* on_not_smi_result, + SmiOperationExecutionMode mode, + Label* bailout_label, Label::Distance near_jump = Label::kFar); // Negating a smi can give a negative zero or too large positive value. diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc index abde4c0..8262ab6 100644 --- a/test/cctest/test-macro-assembler-x64.cc +++ b/test/cctest/test-macro-assembler-x64.cc @@ -35,51 +35,53 @@ #include "serialize.h" #include "cctest.h" -using v8::internal::Assembler; -using v8::internal::CodeDesc; -using v8::internal::Condition; -using v8::internal::FUNCTION_CAST; -using v8::internal::HandleScope; -using v8::internal::Immediate; -using v8::internal::Isolate; -using v8::internal::Label; -using v8::internal::MacroAssembler; -using v8::internal::OS; -using v8::internal::Operand; -using v8::internal::RelocInfo; -using v8::internal::Representation; -using v8::internal::Smi; -using v8::internal::SmiIndex; -using v8::internal::byte; -using v8::internal::carry; -using v8::internal::greater; -using v8::internal::greater_equal; -using v8::internal::kIntSize; -using v8::internal::kPointerSize; -using v8::internal::kSmiTagMask; -using v8::internal::kSmiValueSize; -using v8::internal::less_equal; -using v8::internal::negative; -using v8::internal::not_carry; -using v8::internal::not_equal; -using v8::internal::not_zero; -using v8::internal::positive; -using v8::internal::r11; -using v8::internal::r13; -using v8::internal::r14; -using v8::internal::r15; -using v8::internal::r8; -using v8::internal::r9; -using v8::internal::rax; -using v8::internal::rbp; -using v8::internal::rbx; -using v8::internal::rcx; -using v8::internal::rdi; -using v8::internal::rdx; -using v8::internal::rsi; -using v8::internal::rsp; -using v8::internal::times_pointer_size; -using v8::internal::Address; +namespace i = v8::internal; +using i::Address; +using i::Assembler; +using i::CodeDesc; +using i::Condition; +using i::FUNCTION_CAST; +using i::HandleScope; +using i::Immediate; +using i::Isolate; +using i::Label; +using i::MacroAssembler; +using i::OS; +using i::Operand; +using i::RelocInfo; +using i::Representation; +using i::Smi; +using i::SmiIndex; +using i::byte; +using i::carry; +using i::greater; +using i::greater_equal; +using i::kIntSize; +using i::kPointerSize; +using i::kSmiTagMask; +using i::kSmiValueSize; +using i::less_equal; +using i::negative; +using i::not_carry; +using i::not_equal; +using i::equal; +using i::not_zero; +using i::positive; +using i::r11; +using i::r13; +using i::r14; +using i::r15; +using i::r8; +using i::r9; +using i::rax; +using i::rbp; +using i::rbx; +using i::rcx; +using i::rdi; +using i::rdx; +using i::rsi; +using i::rsp; +using i::times_pointer_size; // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the @@ -97,8 +99,8 @@ typedef int (*F0)(); static void EntryCode(MacroAssembler* masm) { // Smi constant register is callee save. - __ push(v8::internal::kSmiConstantRegister); - __ push(v8::internal::kRootRegister); + __ push(i::kSmiConstantRegister); + __ push(i::kRootRegister); __ InitializeSmiConstantRegister(); __ InitializeRootRegister(); } @@ -107,11 +109,11 @@ static void EntryCode(MacroAssembler* masm) { static void ExitCode(MacroAssembler* masm) { // Return -1 if kSmiConstantRegister was clobbered during the test. __ Move(rdx, Smi::FromInt(1)); - __ cmpq(rdx, v8::internal::kSmiConstantRegister); + __ cmpq(rdx, i::kSmiConstantRegister); __ movq(rdx, Immediate(-1)); __ cmovq(not_equal, rax, rdx); - __ pop(v8::internal::kRootRegister); - __ pop(v8::internal::kSmiConstantRegister); + __ pop(i::kRootRegister); + __ pop(i::kSmiConstantRegister); } @@ -152,7 +154,7 @@ static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) { // Test that we can move a Smi value literally into a register. TEST(SmiMove) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -240,7 +242,7 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) { // Test that we can compare smis for equality (and more). TEST(SmiCompare) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -292,7 +294,7 @@ TEST(SmiCompare) { TEST(Integer32ToSmi) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -421,7 +423,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm, TEST(Integer64PlusConstantToSmi) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -466,7 +468,7 @@ TEST(Integer64PlusConstantToSmi) { TEST(SmiCheck) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -714,7 +716,7 @@ void TestSmiNeg(MacroAssembler* masm, Label* exit, int id, int x) { TEST(SmiNeg) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -771,7 +773,7 @@ static void SmiAddTest(MacroAssembler* masm, __ j(not_equal, exit); __ incq(rax); - __ SmiAdd(rcx, rcx, rdx, exit); \ + __ SmiAdd(rcx, rcx, rdx, exit); __ cmpq(rcx, r8); __ j(not_equal, exit); @@ -790,13 +792,30 @@ static void SmiAddTest(MacroAssembler* masm, __ movl(rcx, Immediate(first)); __ Integer32ToSmi(rcx, rcx); + i::SmiOperationExecutionMode mode; + mode.Add(i::PRESERVE_SOURCE_REGISTER); + mode.Add(i::BAILOUT_ON_OVERFLOW); __ incq(rax); - __ SmiAddConstant(r9, rcx, Smi::FromInt(second), exit); + __ SmiAddConstant(r9, rcx, Smi::FromInt(second), mode, exit); __ cmpq(r9, r8); __ j(not_equal, exit); __ incq(rax); - __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), exit); + __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), mode, exit); + __ cmpq(rcx, r8); + __ j(not_equal, exit); + + __ movl(rcx, Immediate(first)); + __ Integer32ToSmi(rcx, rcx); + + mode.RemoveAll(); + mode.Add(i::PRESERVE_SOURCE_REGISTER); + mode.Add(i::BAILOUT_ON_NO_OVERFLOW); + Label done; + __ incq(rax); + __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), mode, &done); + __ jmp(exit); + __ bind(&done); __ cmpq(rcx, r8); __ j(not_equal, exit); } @@ -836,11 +855,14 @@ static void SmiAddOverflowTest(MacroAssembler* masm, __ j(not_equal, exit); } + i::SmiOperationExecutionMode mode; + mode.Add(i::PRESERVE_SOURCE_REGISTER); + mode.Add(i::BAILOUT_ON_OVERFLOW); __ movq(rcx, r11); { Label overflow_ok; __ incq(rax); - __ SmiAddConstant(r9, rcx, Smi::FromInt(y_min), &overflow_ok); + __ SmiAddConstant(r9, rcx, Smi::FromInt(y_min), mode, &overflow_ok); __ jmp(exit); __ bind(&overflow_ok); __ incq(rax); @@ -851,7 +873,7 @@ static void SmiAddOverflowTest(MacroAssembler* masm, { Label overflow_ok; __ incq(rax); - __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_min), &overflow_ok); + __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_min), mode, &overflow_ok); __ jmp(exit); __ bind(&overflow_ok); __ incq(rax); @@ -887,7 +909,7 @@ static void SmiAddOverflowTest(MacroAssembler* masm, { Label overflow_ok; __ incq(rax); - __ SmiAddConstant(r9, rcx, Smi::FromInt(y_max), &overflow_ok); + __ SmiAddConstant(r9, rcx, Smi::FromInt(y_max), mode, &overflow_ok); __ jmp(exit); __ bind(&overflow_ok); __ incq(rax); @@ -895,21 +917,23 @@ static void SmiAddOverflowTest(MacroAssembler* masm, __ j(not_equal, exit); } + mode.RemoveAll(); + mode.Add(i::BAILOUT_ON_OVERFLOW); { Label overflow_ok; __ incq(rax); - __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_max), &overflow_ok); + __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok); __ jmp(exit); __ bind(&overflow_ok); __ incq(rax); __ cmpq(rcx, r11); - __ j(not_equal, exit); + __ j(equal, exit); } } TEST(SmiAdd) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -988,15 +1012,30 @@ static void SmiSubTest(MacroAssembler* masm, __ cmpq(rcx, r8); __ j(not_equal, exit); + i::SmiOperationExecutionMode mode; + mode.Add(i::PRESERVE_SOURCE_REGISTER); + mode.Add(i::BAILOUT_ON_OVERFLOW); __ Move(rcx, Smi::FromInt(first)); - __ incq(rax); // Test 4. - __ SmiSubConstant(r9, rcx, Smi::FromInt(second), exit); - __ cmpq(r9, r8); + __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), mode, exit); + __ cmpq(rcx, r8); __ j(not_equal, exit); + __ Move(rcx, Smi::FromInt(first)); __ incq(rax); // Test 5. - __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), exit); + __ SmiSubConstant(r9, rcx, Smi::FromInt(second), mode, exit); + __ cmpq(r9, r8); + __ j(not_equal, exit); + + mode.RemoveAll(); + mode.Add(i::PRESERVE_SOURCE_REGISTER); + mode.Add(i::BAILOUT_ON_NO_OVERFLOW); + __ Move(rcx, Smi::FromInt(first)); + Label done; + __ incq(rax); // Test 6. + __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), mode, &done); + __ jmp(exit); + __ bind(&done); __ cmpq(rcx, r8); __ j(not_equal, exit); } @@ -1036,11 +1075,15 @@ static void SmiSubOverflowTest(MacroAssembler* masm, __ j(not_equal, exit); } + i::SmiOperationExecutionMode mode; + mode.Add(i::PRESERVE_SOURCE_REGISTER); + mode.Add(i::BAILOUT_ON_OVERFLOW); + __ movq(rcx, r11); { Label overflow_ok; __ incq(rax); - __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), &overflow_ok); + __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), mode, &overflow_ok); __ jmp(exit); __ bind(&overflow_ok); __ incq(rax); @@ -1051,7 +1094,7 @@ static void SmiSubOverflowTest(MacroAssembler* masm, { Label overflow_ok; __ incq(rax); - __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), &overflow_ok); + __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), mode, &overflow_ok); __ jmp(exit); __ bind(&overflow_ok); __ incq(rax); @@ -1087,7 +1130,7 @@ static void SmiSubOverflowTest(MacroAssembler* masm, { Label overflow_ok; __ incq(rax); - __ SmiSubConstant(r9, rcx, Smi::FromInt(y_max), &overflow_ok); + __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok); __ jmp(exit); __ bind(&overflow_ok); __ incq(rax); @@ -1095,21 +1138,24 @@ static void SmiSubOverflowTest(MacroAssembler* masm, __ j(not_equal, exit); } + mode.RemoveAll(); + mode.Add(i::BAILOUT_ON_OVERFLOW); + __ movq(rcx, r11); { Label overflow_ok; __ incq(rax); - __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), &overflow_ok); + __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok); __ jmp(exit); __ bind(&overflow_ok); __ incq(rax); __ cmpq(rcx, r11); - __ j(not_equal, exit); + __ j(equal, exit); } } TEST(SmiSub) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1201,7 +1247,7 @@ void TestSmiMul(MacroAssembler* masm, Label* exit, int id, int x, int y) { TEST(SmiMul) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = static_cast(OS::Allocate(Assembler::kMinimalBufferSize, @@ -1307,7 +1353,7 @@ void TestSmiDiv(MacroAssembler* masm, Label* exit, int id, int x, int y) { TEST(SmiDiv) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1418,7 +1464,7 @@ void TestSmiMod(MacroAssembler* masm, Label* exit, int id, int x, int y) { TEST(SmiMod) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1516,7 +1562,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) { TEST(SmiIndex) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1586,7 +1632,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) { TEST(SmiSelectNonSmi) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1666,7 +1712,7 @@ void TestSmiAnd(MacroAssembler* masm, Label* exit, int id, int x, int y) { TEST(SmiAnd) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1748,7 +1794,7 @@ void TestSmiOr(MacroAssembler* masm, Label* exit, int id, int x, int y) { TEST(SmiOr) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1832,7 +1878,7 @@ void TestSmiXor(MacroAssembler* masm, Label* exit, int id, int x, int y) { TEST(SmiXor) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1900,7 +1946,7 @@ void TestSmiNot(MacroAssembler* masm, Label* exit, int id, int x) { TEST(SmiNot) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -1997,7 +2043,7 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) { TEST(SmiShiftLeft) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -2104,7 +2150,7 @@ void TestSmiShiftLogicalRight(MacroAssembler* masm, TEST(SmiShiftLogicalRight) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -2174,7 +2220,7 @@ void TestSmiShiftArithmeticRight(MacroAssembler* masm, TEST(SmiShiftArithmeticRight) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -2239,7 +2285,7 @@ void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) { TEST(PositiveSmiTimesPowerOfTwoToInteger64) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); // Allocate an executable page of memory. size_t actual_size; byte* buffer = @@ -2280,7 +2326,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) { TEST(OperandOffset) { - v8::internal::V8::Initialize(NULL); + i::V8::Initialize(NULL); int data[256]; for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; }