From 67ff68783a3c438dbbc363b651766fae52f12898 Mon Sep 17 00:00:00 2001 From: "rmcilroy@chromium.org" Date: Tue, 6 May 2014 19:07:31 +0000 Subject: [PATCH] Arm64: Fix check errors on Arm64 debug after r21177. r21177 added extra AssertStackConsistency() checks which increased code size on debug and caused the assembler buffer to be too large. Increased some of these buffers to compensate. Also, ProfileEntryHoolStub could use the wrong number of instructions for kProfileEntryHookCallSize depending upon whether debug code was being emitted or ALWAYS_ALIGN_CSP was enabled. Fixed this by taking ALWAYS_ALIGN_CSP into account and ensuring that no debug code is emitted during MaybeCallEntryHook(). TBR=ulan@chromium.org Review URL: https://codereview.chromium.org/263213008 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21179 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm64/code-stubs-arm64.cc | 21 +++++++++++++++------ src/builtins.cc | 10 +++++++--- test/cctest/test-code-stubs-arm64.cc | 2 +- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc index a2dd220..fec8936 100644 --- a/src/arm64/code-stubs-arm64.cc +++ b/src/arm64/code-stubs-arm64.cc @@ -4661,22 +4661,31 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { } -// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by -// a "Push lr" instruction, followed by a call. -static const unsigned int kProfileEntryHookCallSize = - Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); +static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) { + // The entry hook is a "BumpSystemStackPointer" instruction (sub), + // followed by a "Push lr" instruction, followed by a call. + unsigned int size = + Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); + if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { + // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in + // "BumpSystemStackPointer". + size += kInstructionSize; + } + return size; +} void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { ProfileEntryHookStub stub(masm->isolate()); Assembler::BlockConstPoolScope no_const_pools(masm); + DontEmitDebugCodeScope no_debug_code(masm); Label entry_hook_call_start; __ Bind(&entry_hook_call_start); __ Push(lr); __ CallStub(&stub); ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == - kProfileEntryHookCallSize); + GetProfileEntryHookCallSize(masm)); __ Pop(lr); } @@ -4694,7 +4703,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { const int kNumSavedRegs = kCallerSaved.Count(); // Compute the function's address as the first argument. - __ Sub(x0, lr, kProfileEntryHookCallSize); + __ Sub(x0, lr, GetProfileEntryHookCallSize(masm)); #if V8_HOST_ARCH_ARM64 uintptr_t entry_hook = diff --git a/src/builtins.cc b/src/builtins.cc index d0c1a44..b523450 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -1604,9 +1604,13 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) { // For now we generate builtin adaptor code into a stack-allocated // buffer, before copying it into individual code objects. Be careful // with alignment, some platforms don't like unaligned code. - // TODO(jbramley): I had to increase the size of this buffer from 8KB because - // we can generate a lot of debug code on ARM64. - union { int force_alignment; byte buffer[16*KB]; } u; +#ifdef DEBUG + // We can generate a lot of debug code on Arm64. + const size_t buffer_size = 32*KB; +#else + const size_t buffer_size = 8*KB; +#endif + union { int force_alignment; byte buffer[buffer_size]; } u; // Traverse the list of builtins and generate an adaptor in a // separate code object for each one. diff --git a/test/cctest/test-code-stubs-arm64.cc b/test/cctest/test-code-stubs-arm64.cc index 3ad07bf..ad8c82d 100644 --- a/test/cctest/test-code-stubs-arm64.cc +++ b/test/cctest/test-code-stubs-arm64.cc @@ -46,7 +46,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, Register destination_reg, bool inline_fastpath) { // Allocate an executable page of memory. - size_t actual_size = 2 * Assembler::kMinimalBufferSize; + size_t actual_size = 4 * Assembler::kMinimalBufferSize; byte* buffer = static_cast(OS::Allocate(actual_size, &actual_size, true)); -- 2.7.4