# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm a64 mipsel
+ARCHES = ia32 x64 arm arm64 mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
-ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
--arch-and-mode=. $(TESTFLAGS)
SUPERFASTTESTMODES = ia32.release
-FASTTESTMODES = $(SUPERFASTTESTMODES),x64.release,ia32.optdebug,x64.optdebug,arm.optdebug,a64.release
-FASTCOMPILEMODES = $(FASTTESTMODES),a64.optdebug
+FASTTESTMODES = $(SUPERFASTTESTMODES),x64.release,ia32.optdebug,x64.optdebug,arm.optdebug,arm64.release
+FASTCOMPILEMODES = $(FASTTESTMODES),arm64.optdebug
COMMA = ,
EMPTY =
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
else
- ifeq ($(ARCH), android_a64)
- DEFINES = target_arch=a64 v8_target_arch=a64 android_target_arch=arm64
+ ifeq ($(ARCH), android_arm64)
+ DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
'-L<(android_stlport_libs)/x86',
],
}],
- ['target_arch=="a64"', {
+ ['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64',
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
- ['target_arch=="a64"', {
+ ['target_arch=="arm64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;\
s/amd64/x64/;\
- s/aarch64/a64/;\
+ s/aarch64/arm64/;\
s/arm.*/arm/;\
s/mips.*/mipsel/")',
}, {
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
- (v8_target_arch=="a64" and host_arch!="a64") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
}], # _toolset=="target"
],
}], # v8_target_arch=="arm"
- ['v8_target_arch=="a64" or v8_target_arch=="arm64"', {
+ ['v8_target_arch=="arm64"', {
'defines': [
- 'V8_TARGET_ARCH_A64',
+ 'V8_TARGET_ARCH_ARM64',
],
}],
['v8_target_arch=="ia32"', {
],
}],
['(OS=="linux" or OS=="android") and \
- (v8_target_arch=="x64" or v8_target_arch=="a64" or \
- v8_target_arch=="arm64")', {
+ (v8_target_arch=="x64" or v8_target_arch=="arm64")', {
# Check whether the host compiler and target compiler support the
# '-m64' option and set it if so.
'target_conditions': [
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_ASSEMBLER_A64_INL_H_
-#define V8_A64_ASSEMBLER_A64_INL_H_
+#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
+#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
-#include "a64/assembler-a64.h"
+#include "arm64/assembler-arm64.h"
#include "cpu.h"
#include "debug.h"
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
- // Call sequence on A64 is:
+ // Call sequence on ARM64 is:
// ldr ip0, #... @ load from literal pool
// blr ip0
Address candidate = pc - 2 * kInstructionSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
- UNREACHABLE(); // This should never be reached on A64.
+ UNREACHABLE(); // This should never be reached on ARM64.
return Handle<Object>();
}
// The sequence must be:
// ldr ip0, [pc, #offset]
// blr ip0
- // See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn().
+ // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
} } // namespace v8::internal
-#endif // V8_A64_ASSEMBLER_A64_INL_H_
+#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
-#define A64_DEFINE_REG_STATICS
+#define ARM64_DEFINE_REG_STATICS
-#include "a64/assembler-a64-inl.h"
+#include "arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on A64 means that it is a movz/movk sequence. We don't
+ // specially coded on ARM64 means that it is a movz/movk sequence. We don't
// generate those for relocatable pointers.
return false;
}
Label start;
bind(&start);
- // Refer to instructions-a64.h for a description of the marker and its
+ // Refer to instructions-arm64.h for a description of the marker and its
// arguments.
hlt(kImmExceptionIsDebug);
ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
// 1) Encode the size of the constant pool, for use by the disassembler.
// 2) Terminate the program, to try to prevent execution from accidentally
// flowing into the constant pool.
- // The header is therefore made of two a64 instructions:
+ // The header is therefore made of two arm64 instructions:
// ldr xzr, #<size of the constant pool in 32-bit words>
// blr xzr
// If executed the code will likely segfault and lr will point to the
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_ASSEMBLER_A64_H_
-#define V8_A64_ASSEMBLER_A64_H_
+#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
+#define V8_ARM64_ASSEMBLER_ARM64_H_
#include <list>
#include <map>
#include "utils.h"
#include "assembler.h"
#include "serialize.h"
-#include "a64/instructions-a64.h"
-#include "a64/cpu-a64.h"
+#include "arm64/instructions-arm64.h"
+#include "arm64/cpu-arm64.h"
namespace v8 {
STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
-#if defined(A64_DEFINE_REG_STATICS)
+#if defined(ARM64_DEFINE_REG_STATICS)
#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
const CPURegister init_##register_class##_##name = {code, size, type}; \
const register_class& name = *reinterpret_cast<const register_class*>( \
extern const register_class& name
#define ALIAS_REGISTER(register_class, alias, name) \
extern const register_class& alias
-#endif // defined(A64_DEFINE_REG_STATICS)
+#endif // defined(ARM64_DEFINE_REG_STATICS)
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and FPRegister
// Pseudo-instructions ------------------------------------------------------
- // Parameters are described in a64/instructions-a64.h.
+ // Parameters are described in arm64/instructions-arm64.h.
void debug(const char* message, uint32_t code, Instr params = BREAK);
// Required by V8.
} } // namespace v8::internal
-#endif // V8_A64_ASSEMBLER_A64_H_
+#endif // V8_ARM64_ASSEMBLER_ARM64_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "debug.h"
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "bootstrapper.h"
#include "code-stubs.h"
__ Fadd(base_double, base_double, fp_zero);
// The operation -0+0 results in +0 in all cases except where the
// FPCR rounding mode is 'round towards minus infinity' (RM). The
- // A64 simulator does not currently simulate FPCR (where the rounding
+ // ARM64 simulator does not currently simulate FPCR (where the rounding
// mode is set), so test the operation with some debug code.
if (masm->emit_debug_code()) {
UseScratchRegisterScope temps(masm);
// If base is -INFINITY, make it +INFINITY.
// * Calculate base - base: All infinities will become NaNs since both
- // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64.
+ // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
// * If the result is NaN, calculate abs(base).
__ Fsub(scratch0_double, base_double, base_double);
__ Fcmp(scratch0_double, 0.0);
void CodeStub::GenerateFPStubs(Isolate* isolate) {
- // Floating-point code doesn't get special handling in A64, so there's
+ // Floating-point code doesn't get special handling in ARM64, so there's
// nothing to do here.
USE(isolate);
}
bool CodeStub::CanUseFPRegisters() {
- // FP registers always available on A64.
+ // FP registers always available on ARM64.
return true;
}
// Compute the function's address as the first argument.
__ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart);
-#if V8_HOST_ARCH_A64
+#if V8_HOST_ARCH_ARM64
uintptr_t entry_hook =
reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
__ Mov(x10, entry_hook);
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_CODE_STUBS_A64_H_
-#define V8_A64_CODE_STUBS_A64_H_
+#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
+#define V8_ARM64_CODE_STUBS_ARM64_H_
#include "ic-inl.h"
} } // namespace v8::internal
-#endif // V8_A64_CODE_STUBS_A64_H_
+#endif // V8_ARM64_CODE_STUBS_ARM64_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "macro-assembler.h"
-#include "simulator-a64.h"
+#include "simulator-arm64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
-byte* fast_exp_a64_machine_code = NULL;
+byte* fast_exp_arm64_machine_code = NULL;
double fast_exp_simulator(double x) {
Simulator * simulator = Simulator::current(Isolate::Current());
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
};
- return simulator->CallDouble(fast_exp_a64_machine_code, args);
+ return simulator->CallDouble(fast_exp_arm64_machine_code, args);
}
#endif
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
- fast_exp_a64_machine_code = buffer;
+ fast_exp_arm64_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
// Continue the common case first. 'mi' tests N == 1.
__ B(&result_is_finite_non_zero, mi);
- // TODO(jbramley): Consider adding a +infinity register for A64.
+ // TODO(jbramley): Consider adding a +infinity register for ARM64.
__ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
// Select between +0.0 and +infinity. 'lo' tests C == 0.
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_CODEGEN_A64_H_
-#define V8_A64_CODEGEN_A64_H_
+#ifndef V8_ARM64_CODEGEN_ARM64_H_
+#define V8_ARM64_CODEGEN_ARM64_H_
#include "ast.h"
#include "ic-inl.h"
} } // namespace v8::internal
-#endif // V8_A64_CODEGEN_A64_H_
+#endif // V8_ARM64_CODEGEN_ARM64_H_
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_CONSTANTS_A64_H_
-#define V8_A64_CONSTANTS_A64_H_
+#ifndef V8_ARM64_CONSTANTS_ARM64_H_
+#define V8_ARM64_CONSTANTS_ARM64_H_
// Assert that this is an LP64 system.
return eq;
default:
// In practice this function is only used with a condition coming from
- // TokenToCondition in lithium-codegen-a64.cc. Any other condition is
+ // TokenToCondition in lithium-codegen-arm64.cc. Any other condition is
// invalid as it doesn't necessary make sense to reverse it (consider
// 'mi' for instance).
UNREACHABLE();
} } // namespace v8::internal
-#endif // V8_A64_CONSTANTS_A64_H_
+#endif // V8_ARM64_CONSTANTS_ARM64_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
-#include "a64/cpu-a64.h"
-#include "a64/utils-a64.h"
+#include "arm64/cpu-arm64.h"
+#include "arm64/utils-arm64.h"
namespace v8 {
namespace internal {
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_CPU_A64_H_
-#define V8_A64_CPU_A64_H_
+#ifndef V8_ARM64_CPU_ARM64_H_
+#define V8_ARM64_CPU_ARM64_H_
#include <stdio.h>
#include "serialize.h"
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- // There are no optional features for A64.
+ // There are no optional features for ARM64.
return false;
};
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- // There are no optional features for A64.
+ // There are no optional features for ARM64.
return false;
}
static unsigned supported_;
static bool VerifyCrossCompiling() {
- // There are no optional features for A64.
+ // There are no optional features for ARM64.
ASSERT(cross_compile_ == 0);
return true;
}
static bool VerifyCrossCompiling(CpuFeature f) {
- // There are no optional features for A64.
+ // There are no optional features for ARM64.
USE(f);
ASSERT(cross_compile_ == 0);
return true;
} } // namespace v8::internal
-#endif // V8_A64_CPU_A64_H_
+#endif // V8_ARM64_CPU_ARM64_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "debug.h"
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-a64.cc).
+ // Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -----------------------------------
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-a64.cc).
+ // Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -- x2 : feedback array
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-a64.cc).
+ // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-a64.cc).
+ // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
}
const bool Debug::kFrameDropperSupported = false;
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_DECODER_A64_INL_H_
-#define V8_A64_DECODER_A64_INL_H_
+#ifndef V8_ARM64_DECODER_ARM64_INL_H_
+#define V8_ARM64_DECODER_ARM64_INL_H_
-#include "a64/decoder-a64.h"
+#include "arm64/decoder-arm64.h"
#include "globals.h"
#include "utils.h"
} } // namespace v8::internal
-#endif // V8_A64_DECODER_A64_INL_H_
+#endif // V8_ARM64_DECODER_ARM64_INL_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "globals.h"
#include "utils.h"
-#include "a64/decoder-a64.h"
+#include "arm64/decoder-arm64.h"
namespace v8 {
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_DECODER_A64_H_
-#define V8_A64_DECODER_A64_H_
+#ifndef V8_ARM64_DECODER_ARM64_H_
+#define V8_ARM64_DECODER_ARM64_H_
#include <list>
#include "globals.h"
-#include "a64/instructions-a64.h"
+#include "arm64/instructions-arm64.h"
namespace v8 {
namespace internal {
} } // namespace v8::internal
-#endif // V8_A64_DECODER_A64_H_
+#endif // V8_ARM64_DECODER_ARM64_H_
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
- // There is no dynamic alignment padding on A64 in the input frame.
+ // There is no dynamic alignment padding on ARM64 in the input frame.
return false;
}
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "disasm.h"
-#include "a64/decoder-a64-inl.h"
-#include "a64/disasm-a64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
#include "macro-assembler.h"
#include "platform.h"
const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // A64 does not have the concept of a byte register
+ UNREACHABLE(); // ARM64 does not have the concept of a byte register
return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
- UNREACHABLE(); // A64 does not have any XMM registers
+ UNREACHABLE(); // ARM64 does not have any XMM registers
return "noxmmreg";
}
} // namespace disasm
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_DISASM_A64_H
-#define V8_A64_DISASM_A64_H
+#ifndef V8_ARM64_DISASM_ARM64_H
+#define V8_ARM64_DISASM_ARM64_H
#include "v8.h"
#include "globals.h"
#include "utils.h"
-#include "instructions-a64.h"
-#include "decoder-a64.h"
+#include "instructions-arm64.h"
+#include "decoder-arm64.h"
namespace v8 {
namespace internal {
} } // namespace v8::internal
-#endif // V8_A64_DISASM_A64_H
+#endif // V8_ARM64_DISASM_ARM64_H
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "assembler.h"
-#include "assembler-a64.h"
-#include "assembler-a64-inl.h"
+#include "assembler-arm64.h"
+#include "assembler-arm64-inl.h"
#include "frames.h"
namespace v8 {
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "a64/constants-a64.h"
-#include "a64/assembler-a64.h"
+#include "arm64/constants-arm64.h"
+#include "arm64/assembler-arm64.h"
-#ifndef V8_A64_FRAMES_A64_H_
-#define V8_A64_FRAMES_A64_H_
+#ifndef V8_ARM64_FRAMES_ARM64_H_
+#define V8_ARM64_FRAMES_ARM64_H_
namespace v8 {
namespace internal {
} } // namespace v8::internal
-#endif // V8_A64_FRAMES_A64_H_
+#endif // V8_ARM64_FRAMES_ARM64_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "code-stubs.h"
#include "codegen.h"
#include "scopes.h"
#include "stub-cache.h"
-#include "a64/code-stubs-a64.h"
-#include "a64/macro-assembler-a64.h"
+#include "arm64/code-stubs-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
namespace v8 {
namespace internal {
}
void EmitJumpIfNotSmi(Register reg, Label* target) {
- // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
+ // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
InstructionAccurateScope scope(masm_, 1);
ASSERT(!info_emitted_);
ASSERT(reg.Is64Bits());
}
void EmitJumpIfSmi(Register reg, Label* target) {
- // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
+ // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
InstructionAccurateScope scope(masm_, 1);
ASSERT(!info_emitted_);
ASSERT(reg.Is64Bits());
// Make sure that the constant pool is not emitted inside of the return
// sequence. This sequence can get patched when the debugger is used. See
- // debug-a64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
+ // debug-arm64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
{
InstructionAccurateScope scope(masm_,
Assembler::kJSRetSequenceInstructions);
// Try to generate an optimized comparison with a literal value.
// TODO(jbramley): This only checks common values like NaN or undefined.
- // Should it also handle A64 immediate operands?
+ // Should it also handle ARM64 immediate operands?
if (TryLiteralCompare(expr)) {
return;
}
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
-#include "a64/assembler-a64.h"
+#include "arm64/assembler-arm64.h"
#include "code-stubs.h"
#include "codegen.h"
#include "disasm.h"
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
-#define A64_DEFINE_FP_STATICS
+#define ARM64_DEFINE_FP_STATICS
-#include "a64/instructions-a64.h"
-#include "a64/assembler-a64-inl.h"
+#include "arm64/instructions-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
-// instructions-a64-inl.h to work around this.
+// instructions-arm64-inl.h to work around this.
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
-// instructions-a64-inl.h to work around this.
+// instructions-arm64-inl.h to work around this.
uint64_t InstructionSequence::InlineData() const {
ASSERT(IsInlineData());
uint64_t payload = ImmMoveWide();
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_INSTRUCTIONS_A64_H_
-#define V8_A64_INSTRUCTIONS_A64_H_
+#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
+#define V8_ARM64_INSTRUCTIONS_ARM64_H_
#include "globals.h"
#include "utils.h"
-#include "a64/constants-a64.h"
-#include "a64/utils-a64.h"
+#include "arm64/constants-arm64.h"
+#include "arm64/utils-arm64.h"
namespace v8 {
namespace internal {
typedef uint32_t Instr;
// The following macros initialize a float/double variable with a bit pattern
-// without using static initializers: If A64_DEFINE_FP_STATICS is defined, the
+// without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
-#if defined(A64_DEFINE_FP_STATICS)
+#if defined(ARM64_DEFINE_FP_STATICS)
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
-#endif // defined(A64_DEFINE_FP_STATICS)
+#endif // defined(ARM64_DEFINE_FP_STATICS)
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
// A pseudo 'printf' instruction. The arguments will be passed to the platform
// printf method.
const Instr kImmExceptionIsPrintf = 0xdeb1;
-// Parameters are stored in A64 registers as if the printf pseudo-instruction
+// Parameters are stored in ARM64 registers as if the printf pseudo-instruction
// was a call to the real printf method:
//
// x0: The format string, then either of:
} } // namespace v8::internal
-#endif // V8_A64_INSTRUCTIONS_A64_H_
+#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "a64/instrument-a64.h"
+#include "arm64/instrument-arm64.h"
namespace v8 {
namespace internal {
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_INSTRUMENT_A64_H_
-#define V8_A64_INSTRUMENT_A64_H_
+#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
+#define V8_ARM64_INSTRUMENT_ARM64_H_
#include "globals.h"
#include "utils.h"
-#include "a64/decoder-a64.h"
-#include "a64/constants-a64.h"
+#include "arm64/decoder-arm64.h"
+#include "arm64/constants-arm64.h"
namespace v8 {
namespace internal {
} } // namespace v8::internal
-#endif // V8_A64_INSTRUMENT_A64_H_
+#endif // V8_ARM64_INSTRUMENT_ARM64_H_
#include "v8.h"
#include "lithium-allocator-inl.h"
-#include "a64/lithium-a64.h"
-#include "a64/lithium-codegen-a64.h"
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
#include "hydrogen-osr.h"
namespace v8 {
case kMathFloor: {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->value()->representation().IsDouble());
- // TODO(jbramley): A64 can easily handle a double argument with frintm,
+ // TODO(jbramley): ARM64 can easily handle a double argument with frintm,
// but we're never asked for it here. At the moment, we fall back to the
// runtime if the result doesn't fit, like the other architectures.
LOperand* input = UseRegisterAtStart(instr->value());
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_LITHIUM_A64_H_
-#define V8_A64_LITHIUM_A64_H_
+#ifndef V8_ARM64_LITHIUM_ARM64_H_
+#define V8_ARM64_LITHIUM_ARM64_H_
#include "hydrogen.h"
#include "lithium-allocator.h"
// register by the instruction implementation.
//
// This behaves identically to ARM's UseTempRegister. However, it is renamed
- // to discourage its use in A64, since in most cases it is better to allocate
- // a temporary register for the Lithium instruction.
+ // to discourage its use in ARM64, since in most cases it is better to
+ // allocate a temporary register for the Lithium instruction.
MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
// The operand created by UseRegisterAtStart is guaranteed to be live only at
} } // namespace v8::internal
-#endif // V8_A64_LITHIUM_A64_H_
+#endif // V8_ARM64_LITHIUM_ARM64_H_
#include "v8.h"
-#include "a64/lithium-codegen-a64.h"
-#include "a64/lithium-gap-resolver-a64.h"
+#include "arm64/lithium-codegen-arm64.h"
+#include "arm64/lithium-gap-resolver-arm64.h"
#include "code-stubs.h"
#include "stub-cache.h"
#include "hydrogen-osr.h"
// calling a helper function. With frintz (to produce the intermediate
// quotient) and fmsub (to calculate the remainder without loss of
// precision), it should be possible. However, we would need support for
- // fdiv in round-towards-zero mode, and the A64 simulator doesn't support
- // that yet.
+ // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
+ // support that yet.
ASSERT(left.Is(d0));
ASSERT(right.Is(d1));
__ CallCFunction(
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
// TODO(all): on ARM we use a call descriptor to specify a storage mode
- // but on A64 we only have one storage mode so it isn't necessary. Check
+ // but on ARM64 we only have one storage mode so it isn't necessary. Check
// this understanding is correct.
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
} else {
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_
-#define V8_A64_LITHIUM_CODEGEN_A64_H_
+#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
-#include "a64/lithium-a64.h"
+#include "arm64/lithium-arm64.h"
-#include "a64/lithium-gap-resolver-a64.h"
+#include "arm64/lithium-gap-resolver-arm64.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
// the branch when the inverted condition is verified.
//
// For actual examples of condition see the concrete implementation in
-// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
+// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
class BranchGenerator BASE_EMBEDDED {
public:
explicit BranchGenerator(LCodeGen* codegen)
} } // namespace v8::internal
-#endif // V8_A64_LITHIUM_CODEGEN_A64_H_
+#endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
#include "v8.h"
-#include "a64/lithium-gap-resolver-a64.h"
-#include "a64/lithium-codegen-a64.h"
+#include "arm64/lithium-gap-resolver-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
namespace v8 {
namespace internal {
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
-#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
+#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#include "v8.h"
} } // namespace v8::internal
-#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
+#endif // V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_MACRO_ASSEMBLER_A64_INL_H_
-#define V8_A64_MACRO_ASSEMBLER_A64_INL_H_
+#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+#define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#include <ctype.h>
#include "v8globals.h"
#include "globals.h"
-#include "a64/assembler-a64.h"
-#include "a64/assembler-a64-inl.h"
-#include "a64/macro-assembler-a64.h"
-#include "a64/instrument-a64.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/instrument-arm64.h"
namespace v8 {
} } // namespace v8::internal
-#endif // V8_A64_MACRO_ASSEMBLER_A64_INL_H_
+#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "bootstrapper.h"
#include "codegen.h"
int MacroAssembler::ActivationFrameAlignment() {
-#if V8_HOST_ARCH_A64
+#if V8_HOST_ARCH_ARM64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
return OS::ActivationFrameAlignment();
-#else // V8_HOST_ARCH_A64
+#else // V8_HOST_ARCH_ARM64
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
-#endif // V8_HOST_ARCH_A64
+#endif // V8_HOST_ARCH_ARM64
}
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on A64.
+ // the same alignment on ARM64.
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and bail out if new space is exhausted.
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on A64.
+ // the same alignment on ARM64.
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and bail out if new space is exhausted
// Note: The ARM version of this clobbers elements_reg, but this version does
-// not. Some uses of this in A64 assume that elements_reg will be preserved.
+// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register elements_reg,
__ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
__ adr(x0, &start);
__ br(ip0);
- // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up
+ // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
// until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
__ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
if (stub) {
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_MACRO_ASSEMBLER_A64_H_
-#define V8_A64_MACRO_ASSEMBLER_A64_H_
+#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#include <vector>
#include "v8globals.h"
#include "globals.h"
-#include "a64/assembler-a64-inl.h"
+#include "arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
// On successful conversion, the least significant 32 bits of the result are
// equivalent to the ECMA-262 operation "ToInt32".
//
- // Only public for the test code in test-code-stubs-a64.cc.
+ // Only public for the test code in test-code-stubs-arm64.cc.
void TryConvertDoubleToInt64(Register result,
DoubleRegister input,
Label* done);
// Code ageing support functions.
- // Code ageing on A64 works similarly to on ARM. When V8 wants to mark a
+ // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
// function as old, it replaces some of the function prologue (generated by
// FullCodeGenerator::Generate) with a call to a special stub (ultimately
// generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
#define ACCESS_MASM(masm) masm->
#endif
-#endif // V8_A64_MACRO_ASSEMBLER_A64_H_
+#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "cpu-profiler.h"
#include "unicode.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
-#include "a64/regexp-macro-assembler-a64.h"
+#include "arm64/regexp-macro-assembler-arm64.h"
namespace v8 {
namespace internal {
* Isolate* isolate)
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in a64/simulator-a64.h.
+ * in arm64/simulator-arm64.h.
* When calling as a non-direct call (i.e., from C++ code), the return address
* area is overwritten with the LR register by the RegExp code. When doing a
* direct call from generated code, the return address is placed there by
#define __ ACCESS_MASM(masm_)
-RegExpMacroAssemblerA64::RegExpMacroAssemblerA64(
+RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(
Mode mode,
int registers_to_save,
Zone* zone)
}
-RegExpMacroAssemblerA64::~RegExpMacroAssemblerA64() {
+RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
stack_overflow_label_.Unuse();
}
-int RegExpMacroAssemblerA64::stack_limit_slack() {
+int RegExpMacroAssemblerARM64::stack_limit_slack() {
return RegExpStack::kStackLimitSlack;
}
-void RegExpMacroAssemblerA64::AdvanceCurrentPosition(int by) {
+void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
if (by != 0) {
__ Add(current_input_offset(),
current_input_offset(), by * char_size());
}
-void RegExpMacroAssemblerA64::AdvanceRegister(int reg, int by) {
+void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
ASSERT((reg >= 0) && (reg < num_registers_));
if (by != 0) {
Register to_advance;
}
-void RegExpMacroAssemblerA64::Backtrack() {
+void RegExpMacroAssemblerARM64::Backtrack() {
CheckPreemption();
Pop(w10);
__ Add(x10, code_pointer(), Operand(w10, UXTW));
}
-void RegExpMacroAssemblerA64::Bind(Label* label) {
+void RegExpMacroAssemblerARM64::Bind(Label* label) {
__ Bind(label);
}
-void RegExpMacroAssemblerA64::CheckCharacter(uint32_t c, Label* on_equal) {
+void RegExpMacroAssemblerARM64::CheckCharacter(uint32_t c, Label* on_equal) {
CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
}
-void RegExpMacroAssemblerA64::CheckCharacterGT(uc16 limit, Label* on_greater) {
+void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
+ Label* on_greater) {
CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
}
-void RegExpMacroAssemblerA64::CheckAtStart(Label* on_at_start) {
+void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the input string?
CompareAndBranchOrBacktrack(start_offset(), 0, ne, ¬_at_start);
}
-void RegExpMacroAssemblerA64::CheckNotAtStart(Label* on_not_at_start) {
+void RegExpMacroAssemblerARM64::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the input string?
CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
// If we did, are we still at the start of the input string?
}
-void RegExpMacroAssemblerA64::CheckCharacterLT(uc16 limit, Label* on_less) {
+void RegExpMacroAssemblerARM64::CheckCharacterLT(uc16 limit, Label* on_less) {
CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
}
-void RegExpMacroAssemblerA64::CheckCharacters(Vector<const uc16> str,
+void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string) {
}
-void RegExpMacroAssemblerA64::CheckGreedyLoop(Label* on_equal) {
+void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
__ Ldr(w10, MemOperand(backtrack_stackpointer()));
__ Cmp(current_input_offset(), w10);
__ Cset(x11, eq);
BranchOrBacktrack(eq, on_equal);
}
-void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
+void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
int start_reg,
Label* on_no_match) {
Label fallthrough;
__ Bind(&fallthrough);
}
-void RegExpMacroAssemblerA64::CheckNotBackReference(
+void RegExpMacroAssemblerARM64::CheckNotBackReference(
int start_reg,
Label* on_no_match) {
Label fallthrough;
}
-void RegExpMacroAssemblerA64::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
+void RegExpMacroAssemblerARM64::CheckNotCharacter(unsigned c,
+ Label* on_not_equal) {
CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
}
-void RegExpMacroAssemblerA64::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
+void RegExpMacroAssemblerARM64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
__ And(w10, current_character(), mask);
CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
}
-void RegExpMacroAssemblerA64::CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal) {
+void RegExpMacroAssemblerARM64::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal) {
__ And(w10, current_character(), mask);
CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
}
-void RegExpMacroAssemblerA64::CheckNotCharacterAfterMinusAnd(
+void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
uc16 c,
uc16 minus,
uc16 mask,
}
-void RegExpMacroAssemblerA64::CheckCharacterInRange(
+void RegExpMacroAssemblerARM64::CheckCharacterInRange(
uc16 from,
uc16 to,
Label* on_in_range) {
}
-void RegExpMacroAssemblerA64::CheckCharacterNotInRange(
+void RegExpMacroAssemblerARM64::CheckCharacterNotInRange(
uc16 from,
uc16 to,
Label* on_not_in_range) {
}
-void RegExpMacroAssemblerA64::CheckBitInTable(
+void RegExpMacroAssemblerARM64::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ Mov(x11, Operand(table));
}
-bool RegExpMacroAssemblerA64::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
+bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check
switch (type) {
}
-void RegExpMacroAssemblerA64::Fail() {
+void RegExpMacroAssemblerARM64::Fail() {
__ Mov(w0, FAILURE);
__ B(&exit_label_);
}
-Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
+Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
Label return_w0;
// Finalize code - write the entry point code now we know how many
// registers we need.
}
-void RegExpMacroAssemblerA64::GoTo(Label* to) {
+void RegExpMacroAssemblerARM64::GoTo(Label* to) {
BranchOrBacktrack(al, to);
}
-void RegExpMacroAssemblerA64::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
+void RegExpMacroAssemblerARM64::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
Register to_compare = GetRegister(reg, w10);
CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
}
-void RegExpMacroAssemblerA64::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
+void RegExpMacroAssemblerARM64::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
Register to_compare = GetRegister(reg, w10);
CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
}
-void RegExpMacroAssemblerA64::IfRegisterEqPos(int reg,
- Label* if_eq) {
+void RegExpMacroAssemblerARM64::IfRegisterEqPos(int reg, Label* if_eq) {
Register to_compare = GetRegister(reg, w10);
__ Cmp(to_compare, current_input_offset());
BranchOrBacktrack(eq, if_eq);
}
RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerA64::Implementation() {
- return kA64Implementation;
+ RegExpMacroAssemblerARM64::Implementation() {
+ return kARM64Implementation;
}
-void RegExpMacroAssemblerA64::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
+void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
// TODO(pielan): Make sure long strings are caught before this, and not
// just asserted in debug mode.
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
}
-void RegExpMacroAssemblerA64::PopCurrentPosition() {
+void RegExpMacroAssemblerARM64::PopCurrentPosition() {
Pop(current_input_offset());
}
-void RegExpMacroAssemblerA64::PopRegister(int register_index) {
+void RegExpMacroAssemblerARM64::PopRegister(int register_index) {
Pop(w10);
StoreRegister(register_index, w10);
}
-void RegExpMacroAssemblerA64::PushBacktrack(Label* label) {
+void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
if (label->is_bound()) {
int target = label->pos();
__ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
}
-void RegExpMacroAssemblerA64::PushCurrentPosition() {
+void RegExpMacroAssemblerARM64::PushCurrentPosition() {
Push(current_input_offset());
}
-void RegExpMacroAssemblerA64::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
+void RegExpMacroAssemblerARM64::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
Register to_push = GetRegister(register_index, w10);
Push(to_push);
if (check_stack_limit) CheckStackLimit();
}
-void RegExpMacroAssemblerA64::ReadCurrentPositionFromRegister(int reg) {
+void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
Register cached_register;
RegisterState register_state = GetRegisterState(reg);
switch (register_state) {
}
-void RegExpMacroAssemblerA64::ReadStackPointerFromRegister(int reg) {
+void RegExpMacroAssemblerARM64::ReadStackPointerFromRegister(int reg) {
Register read_from = GetRegister(reg, w10);
__ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
__ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
}
-void RegExpMacroAssemblerA64::SetCurrentPositionFromEnd(int by) {
+void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ Cmp(current_input_offset(), -by * char_size());
__ B(ge, &after_position);
}
-void RegExpMacroAssemblerA64::SetRegister(int register_index, int to) {
+void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
Register set_to = wzr;
if (to != 0) {
}
-bool RegExpMacroAssemblerA64::Succeed() {
+bool RegExpMacroAssemblerARM64::Succeed() {
__ B(&success_label_);
return global();
}
-void RegExpMacroAssemblerA64::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
+void RegExpMacroAssemblerARM64::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
Register position = current_input_offset();
if (cp_offset != 0) {
position = w10;
}
-void RegExpMacroAssemblerA64::ClearRegisters(int reg_from, int reg_to) {
+void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
ASSERT(reg_from <= reg_to);
int num_registers = reg_to - reg_from + 1;
}
-void RegExpMacroAssemblerA64::WriteStackPointerToRegister(int reg) {
+void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
__ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
__ Sub(x10, backtrack_stackpointer(), x10);
if (masm_->emit_debug_code()) {
}
-int RegExpMacroAssemblerA64::CheckStackGuardState(Address* return_address,
+int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame,
int start_offset,
}
-void RegExpMacroAssemblerA64::CheckPosition(int cp_offset,
- Label* on_outside_input) {
+void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
CompareAndBranchOrBacktrack(current_input_offset(),
-cp_offset * char_size(),
ge,
}
-bool RegExpMacroAssemblerA64::CanReadUnaligned() {
+bool RegExpMacroAssemblerARM64::CanReadUnaligned() {
// TODO(pielan): See whether or not we should disable unaligned accesses.
return !slow_safe();
}
// Private methods:
-void RegExpMacroAssemblerA64::CallCheckStackGuardState(Register scratch) {
+void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
// Allocate space on the stack to store the return address. The
// CheckStackGuardState C++ function will override it if the code
// moved. Allocate extra space for 2 arguments passed by pointers.
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
}
-void RegExpMacroAssemblerA64::BranchOrBacktrack(Condition condition,
- Label* to) {
+void RegExpMacroAssemblerARM64::BranchOrBacktrack(Condition condition,
+ Label* to) {
if (condition == al) { // Unconditional.
if (to == NULL) {
Backtrack();
__ Bind(&no_branch);
}
-void RegExpMacroAssemblerA64::CompareAndBranchOrBacktrack(Register reg,
- int immediate,
- Condition condition,
- Label* to) {
+void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to) {
if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
if (to == NULL) {
to = &backtrack_label_;
}
-void RegExpMacroAssemblerA64::CheckPreemption() {
+void RegExpMacroAssemblerARM64::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
}
-void RegExpMacroAssemblerA64::CheckStackLimit() {
+void RegExpMacroAssemblerARM64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ Mov(x10, stack_limit);
}
-void RegExpMacroAssemblerA64::Push(Register source) {
+void RegExpMacroAssemblerARM64::Push(Register source) {
ASSERT(source.Is32Bits());
ASSERT(!source.is(backtrack_stackpointer()));
__ Str(source,
}
-void RegExpMacroAssemblerA64::Pop(Register target) {
+void RegExpMacroAssemblerARM64::Pop(Register target) {
ASSERT(target.Is32Bits());
ASSERT(!target.is(backtrack_stackpointer()));
__ Ldr(target,
}
-Register RegExpMacroAssemblerA64::GetCachedRegister(int register_index) {
+Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
ASSERT(register_index < kNumCachedRegisters);
return Register::Create(register_index / 2, kXRegSizeInBits);
}
-Register RegExpMacroAssemblerA64::GetRegister(int register_index,
- Register maybe_result) {
+Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
+ Register maybe_result) {
ASSERT(maybe_result.Is32Bits());
ASSERT(register_index >= 0);
if (num_registers_ <= register_index) {
}
-void RegExpMacroAssemblerA64::StoreRegister(int register_index,
- Register source) {
+void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
+ Register source) {
ASSERT(source.Is32Bits());
ASSERT(register_index >= 0);
if (num_registers_ <= register_index) {
}
-void RegExpMacroAssemblerA64::CallIf(Label* to, Condition condition) {
+void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
Label skip_call;
if (condition != al) __ B(&skip_call, InvertCondition(condition));
__ Bl(to);
}
-void RegExpMacroAssemblerA64::RestoreLinkRegister() {
+void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
ASSERT(csp.Is(__ StackPointer()));
__ Pop(lr, xzr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
-void RegExpMacroAssemblerA64::SaveLinkRegister() {
+void RegExpMacroAssemblerARM64::SaveLinkRegister() {
ASSERT(csp.Is(__ StackPointer()));
__ Sub(lr, lr, Operand(masm_->CodeObject()));
__ Push(xzr, lr);
}
-MemOperand RegExpMacroAssemblerA64::register_location(int register_index) {
+MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
ASSERT(register_index < (1<<30));
ASSERT(register_index >= kNumCachedRegisters);
if (num_registers_ <= register_index) {
return MemOperand(frame_pointer(), offset);
}
-MemOperand RegExpMacroAssemblerA64::capture_location(int register_index,
+MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
Register scratch) {
ASSERT(register_index < (1<<30));
ASSERT(register_index < num_saved_registers_);
}
}
-void RegExpMacroAssemblerA64::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
+void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
Register offset = current_input_offset();
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
}} // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
-#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
+#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
-#include "a64/assembler-a64.h"
-#include "a64/assembler-a64-inl.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
#include "macro-assembler.h"
namespace v8 {
#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
+class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
public:
- RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerA64();
+ RegExpMacroAssemblerARM64(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerARM64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
}} // namespace v8::internal
-#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
+#endif // V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
#include <cstdarg>
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "disasm.h"
#include "assembler.h"
-#include "a64/decoder-a64-inl.h"
-#include "a64/simulator-a64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/simulator-arm64.h"
#include "macro-assembler.h"
namespace v8 {
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_SIMULATOR_A64_H_
-#define V8_A64_SIMULATOR_A64_H_
+#ifndef V8_ARM64_SIMULATOR_ARM64_H_
+#define V8_ARM64_SIMULATOR_ARM64_H_
#include <stdarg.h>
#include <vector>
#include "utils.h"
#include "allocation.h"
#include "assembler.h"
-#include "a64/assembler-a64.h"
-#include "a64/decoder-a64.h"
-#include "a64/disasm-a64.h"
-#include "a64/instrument-a64.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/decoder-arm64.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/instrument-arm64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native A64 platform.
+// Running without a simulator on a native ARM64 platform.
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-typedef int (*a64_regexp_matcher)(String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- void* return_address,
- Isolate* isolate);
+typedef int (*arm64_regexp_matcher)(String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type a64_regexp_matcher.
+// should act as a function matching the type arm64_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<a64_regexp_matcher>(entry)( \
+ (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
} } // namespace v8::internal
-#endif // V8_A64_SIMULATOR_A64_H_
+#endif // V8_ARM64_SIMULATOR_ARM64_H_
#include "v8.h"
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
#include "ic-inl.h"
#include "codegen.h"
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
-#include "a64/utils-a64.h"
+#include "arm64/utils-arm64.h"
namespace v8 {
int CountLeadingZeros(uint64_t value, int width) {
- // TODO(jbramley): Optimize this for A64 hosts.
+ // TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
int CountLeadingSignBits(int64_t value, int width) {
- // TODO(jbramley): Optimize this for A64 hosts.
+ // TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
int CountTrailingZeros(uint64_t value, int width) {
- // TODO(jbramley): Optimize this for A64 hosts.
+ // TODO(jbramley): Optimize this for ARM64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_A64
+#endif // V8_TARGET_ARCH_ARM64
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_UTILS_A64_H_
-#define V8_A64_UTILS_A64_H_
+#ifndef V8_ARM64_UTILS_ARM64_H_
+#define V8_ARM64_UTILS_ARM64_H_
#include <cmath>
#include "v8.h"
-#include "a64/constants-a64.h"
+#include "arm64/constants-arm64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
} } // namespace v8::internal
-#endif // V8_A64_UTILS_A64_H_
+#endif // V8_ARM64_UTILS_ARM64_H_
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/assembler-a64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/regexp-macro-assembler-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
-// 1101: constant or veneer pool. Used only on ARM and A64 for now.
+// 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
// The format is: [2-bit sub-type] 1101 11
// signed int (size of the pool).
// The 2-bit sub-types are:
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
-#elif V8_TARGET_ARCH_A64
- function = FUNCTION_ADDR(RegExpMacroAssemblerA64::CheckStackGuardState);
+#elif V8_TARGET_ARCH_ARM64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
-#if V8_TARGET_ARCH_A64
- // On A64, the Assembler keeps track of pointers to Labels to resolve branches
- // to distant targets. Copying labels would confuse the Assembler.
+#if V8_TARGET_ARCH_ARM64
+ // On ARM64, the Assembler keeps track of pointers to Labels to resolve
+ // branches to distant targets. Copying labels would confuse the Assembler.
DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
#endif
};
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
- // Marks constant and veneer pools. Only used on ARM and A64.
+ // Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
VENEER_POOL,
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__)
#include "atomicops_internals_mac.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_A64
-#include "atomicops_internals_a64_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
+#include "atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
// TODO(jbramley): I had to increase the size of this buffer from 8KB because
- // we can generate a lot of debug code on A64.
+ // we can generate a lot of debug code on ARM64.
union { int force_alignment; byte buffer[16*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
#endif
// Simulator specific helpers.
-#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_A64)
+#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64)
// TODO(all): If possible automatically prepend an indicator like
// UNIMPLEMENTED or LOCATION.
#define ASM_UNIMPLEMENTED(message) \
V(KeyedStringLength)
// List of code stubs only used on ARM platforms.
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_A64)
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64)
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
V(SetProperty) \
#include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/code-stubs-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/codegen-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
void EnableInterrupts();
void DisableInterrupts();
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
-// simulator-arm.cc, simulator-a64.cc and simulator-mips.cc
+// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(debug_sim, false, "Enable debugging the simulator")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#ifdef V8_TARGET_ARCH_A64
+#ifdef V8_TARGET_ARCH_ARM64
DEFINE_int(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
#endif
DEFINE_int(sim_stack_size, 2 * MB / KB,
- "Stack size of the A64 simulator in kBytes (default is 2 MB)")
+ "Stack size of the ARM64 simulator in kBytes (default is 2 MB)")
DEFINE_bool(log_regs_modified, true,
"When logging register values, only print modified registers.")
DEFINE_bool(log_colour, true,
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
-DEFINE_string(log_instruction_file, "a64_inst.csv",
+DEFINE_string(log_instruction_file, "arm64_inst.csv",
"AArch64 instruction statistics log file.")
DEFINE_int(log_instruction_period, 1 << 22,
"AArch64 instruction statistics logging period.")
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/frames-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/frames-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
namespace v8 {
namespace internal {
-#if V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_ARM64
typedef uint64_t RegList;
#else
typedef uint32_t RegList;
static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 142;
-#elif V8_TARGET_ARCH_A64
-// TODO(all): Copied ARM value. Check this is sensible for A64.
+#elif V8_TARGET_ARCH_ARM64
+// TODO(all): Copied ARM value. Check this is sensible for ARM64.
static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 142;
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__AARCH64EL__)
-#define V8_HOST_ARCH_A64 1
+#define V8_HOST_ARCH_ARM64 1
#define V8_HOST_ARCH_64_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_A64 && !V8_TARGET_ARCH_MIPS
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
#elif defined(__AARCH64EL__)
-#define V8_TARGET_ARCH_A64 1
+#define V8_TARGET_ARCH_ARM64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__MIPSEL__)
#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
-#if (V8_TARGET_ARCH_A64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_A64))
-#error Target architecture a64 is only supported on a64 and x64 host
+#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
+#error Target architecture arm64 is only supported on arm64 and x64 host
#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
-#if (V8_TARGET_ARCH_A64 && !V8_HOST_ARCH_A64)
+#if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM
#define V8_TARGET_LITTLE_ENDIAN 1
-#elif V8_TARGET_ARCH_A64
+#elif V8_TARGET_ARCH_ARM64
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_LITTLE_ENDIAN 1
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-codegen-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_A64 || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
+ V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
#define ISOLATE_INIT_SIMULATOR_LIST(V) \
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
FIELD_ACCESSOR(ThreadState*, thread_state)
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
FIELD_ACCESSOR(Simulator*, simulator)
#endif
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/regexp-macro-assembler-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
-#elif V8_TARGET_ARCH_A64
- RegExpMacroAssemblerA64 macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
+#elif V8_TARGET_ARCH_ARM64
+ RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
-#include "a64/lithium-codegen-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
-#include "a64/lithium-codegen-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
#else
#error "Unknown architecture."
#endif
#include "x64/assembler-x64-inl.h"
#include "code.h" // must be after assembler_*.h
#include "x64/macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/constants-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/constants-arm64.h"
#include "assembler.h"
-#include "a64/assembler-a64.h"
-#include "a64/assembler-a64-inl.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
#include "code.h" // must be after assembler_*.h
-#include "a64/macro-assembler-a64.h"
-#include "a64/macro-assembler-a64-inl.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/macro-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
#include "property-details.h"
#include "smart-pointers.h"
#include "unicode-inl.h"
-#if V8_TARGET_ARCH_A64
-#include "a64/constants-a64.h"
+#if V8_TARGET_ARCH_ARM64
+#include "arm64/constants-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
V(kLetBindingReInitialization, "Let binding re-initialization") \
V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
- V(kLiveEditFrameDroppingIsNotSupportedOnA64, \
- "LiveEdit frame dropping is not supported on a64") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnARM64, \
+ "LiveEdit frame dropping is not supported on arm64") \
V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
"LiveEdit frame dropping is not supported on arm") \
V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
asm("bkpt 0");
-#elif V8_HOST_ARCH_A64
+#elif V8_HOST_ARCH_ARM64
asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
assembler_(assembler) {
unsigned int type = assembler->Implementation();
ASSERT(type < 6);
- const char* impl_names[] = {"IA32", "ARM", "A64", "MIPS", "X64", "Bytecode"};
+ const char* impl_names[] = {"IA32", "ARM", "ARM64",
+ "MIPS", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
- kA64Implementation,
+ kARM64Implementation,
kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
-#elif V8_TARGET_ARCH_A64
+#elif V8_TARGET_ARCH_ARM64
if (simulator_->sp() == 0 || simulator_->fp() == 0) {
// It possible that the simulator is interrupted while it is updating
- // the sp or fp register. A64 simulator does this in two steps:
+ // the sp or fp register. ARM64 simulator does this in two steps:
// first setting it to zero and then setting it to the new value.
// Bailout if sp/fp doesn't contain the new value.
return;
if (!helper.Init(sampler, isolate)) return;
helper.FillRegisters(&state);
// It possible that the simulator is interrupted while it is updating
- // the sp or fp register. A64 simulator does this in two steps:
+ // the sp or fp register. ARM64 simulator does this in two steps:
// first setting it to zero and then setting it to the new value.
// Bailout if sp/fp doesn't contain the new value.
if (state.sp == 0 || state.fp == 0) return;
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-#elif V8_HOST_ARCH_A64
+#elif V8_HOST_ARCH_ARM64
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.sp);
// FP is an alias for x29.
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/simulator-a64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
'test-macro-assembler-arm.cc'
],
}],
- ['v8_target_arch=="a64" or v8_target_arch=="arm64"', {
+ ['v8_target_arch=="arm64"', {
'sources': [
- 'test-utils-a64.cc',
- 'test-assembler-a64.cc',
+ 'test-utils-arm64.cc',
+ 'test-assembler-arm64.cc',
'test-code-stubs.cc',
- 'test-code-stubs-a64.cc',
- 'test-disasm-a64.cc',
- 'test-fuzz-a64.cc',
- 'test-javascript-a64.cc',
- 'test-js-a64-variables.cc'
+ 'test-code-stubs-arm64.cc',
+ 'test-disasm-arm64.cc',
+ 'test-fuzz-arm64.cc',
+ 'test-javascript-arm64.cc',
+ 'test-js-arm64-variables.cc'
],
}],
['v8_target_arch=="mipsel"', {
}], # ALWAYS
##############################################################################
-['arch == a64', {
+['arch == arm64', {
'test-api/Bug618': [PASS],
# BUG(v8:3155).
'test-strings/AsciiArrayJoin': [PASS, ['mode == debug', FAIL]],
-}], # 'arch == a64'
+}], # 'arch == arm64'
-['arch == a64 and simulator_run == True', {
+['arch == arm64 and simulator_run == True', {
# Pass but take too long with the simulator.
'test-api/ExternalArrays': [PASS, TIMEOUT],
'test-api/Threading1': [SKIP],
-}], # 'arch == a64 and simulator_run == True'
+}], # 'arch == arm64 and simulator_run == True'
-['arch == a64 and mode == debug and simulator_run == True', {
+['arch == arm64 and mode == debug and simulator_run == True', {
# Pass but take too long with the simulator in debug mode.
'test-api/ExternalDoubleArray': [SKIP],
'test-api/Float32Array': [SKIP],
'test-api/Float64Array': [SKIP],
'test-debug/DebugBreakLoop': [SKIP],
-}], # 'arch == a64 and mode == debug and simulator_run == True'
+}], # 'arch == arm64 and mode == debug and simulator_run == True'
##############################################################################
['asan == True', {
#include "v8.h"
#include "macro-assembler.h"
-#include "a64/simulator-a64.h"
-#include "a64/decoder-a64-inl.h"
-#include "a64/disasm-a64.h"
-#include "a64/utils-a64.h"
+#include "arm64/simulator-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/utils-arm64.h"
#include "cctest.h"
-#include "test-utils-a64.h"
+#include "test-utils-arm64.h"
using namespace v8::internal;
//
// Once the test has been run all integer and floating point registers as well
// as flags are accessible through a RegisterDump instance, see
-// utils-a64.cc for more info on RegisterDump.
+// utils-arm64.cc for more info on RegisterDump.
//
// We provide some helper assert to handle common cases:
//
ASSERT_EQUAL_FP64(FLT_MAX, d11);
ASSERT_EQUAL_FP64(FLT_MIN, d12);
- // Check that the NaN payload is preserved according to A64 conversion rules:
+ // Check that the NaN payload is preserved according to ARM64 conversion
+ // rules:
// - The sign bit is preserved.
// - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
// - The remaining mantissa bits are copied until they run out.
#include "v8.h"
#include "macro-assembler.h"
-#include "a64/assembler-a64.h"
-#include "a64/macro-assembler-a64.h"
-#include "a64/decoder-a64-inl.h"
-#include "a64/disasm-a64.h"
-#include "a64/utils-a64.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/utils-arm64.h"
using namespace v8::internal;
#include <stdlib.h>
#include "cctest.h"
-#include "a64/decoder-a64.h"
-#include "a64/decoder-a64-inl.h"
-#include "a64/disasm-a64.h"
+#include "arm64/decoder-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
using namespace v8::internal;
StringHelper::GenerateHashGetHash(masm, r0);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
-#elif V8_TARGET_ARCH_A64
- // The A64 assembler usually uses jssp (x28) as a stack pointer, but only csp
- // is initialized by the calling (C++) code.
+#elif V8_TARGET_ARCH_ARM64
+ // The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
+ // csp is initialized by the calling (C++) code.
Register old_stack_pointer = __ StackPointer();
__ SetStackPointer(csp);
__ Push(root, xzr);
__ GetNumberHash(r0, ip);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
-#elif V8_TARGET_ARCH_A64
- // The A64 assembler usually uses jssp (x28) as a stack pointer, but only csp
- // is initialized by the calling (C++) code.
+#elif V8_TARGET_ARCH_ARM64
+ // The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
+ // csp is initialized by the calling (C++) code.
Register old_stack_pointer = __ StackPointer();
__ SetStackPointer(csp);
__ Push(root, xzr);
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_A64)
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = heap->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
CHECK(value->IsHeapNumber());
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
-#if V8_TARGET_ARCH_A64
-#include "a64/assembler-a64.h"
-#include "a64/macro-assembler-a64.h"
-#include "a64/regexp-macro-assembler-a64.h"
+#if V8_TARGET_ARCH_ARM64
+#include "arm64/assembler-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/regexp-macro-assembler-arm64.h"
#endif
#if V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips.h"
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
-#elif V8_TARGET_ARCH_A64
-typedef RegExpMacroAssemblerA64 ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_ARM64
+typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#endif
#include "v8.h"
#include "macro-assembler.h"
-#include "a64/utils-a64.h"
+#include "arm64/utils-arm64.h"
#include "cctest.h"
-#include "test-utils-a64.h"
+#include "test-utils-arm64.h"
using namespace v8::internal;
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_A64_TEST_UTILS_A64_H_
-#define V8_A64_TEST_UTILS_A64_H_
+#ifndef V8_ARM64_TEST_UTILS_ARM64_H_
+#define V8_ARM64_TEST_UTILS_ARM64_H_
#include "v8.h"
#include "macro-assembler.h"
-#include "a64/macro-assembler-a64.h"
-#include "a64/utils-a64.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/utils-arm64.h"
#include "cctest.h"
// Clobber or ClobberFP functions.
void Clobber(MacroAssembler* masm, CPURegList reg_list);
-#endif // V8_A64_TEST_UTILS_A64_H_
+#endif // V8_ARM64_TEST_UTILS_ARM64_H_
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// We change the stack size for the A64 simulator because at one point this test
-// enters an infinite recursion which goes through the runtime and we overflow
-// the system stack before the simulator stack.
+// We change the stack size for the ARM64 simulator because at one point this
+// test enters an infinite recursion which goes through the runtime and we
+// overflow the system stack before the simulator stack.
// Flags: --harmony-proxies --sim-stack-size=500
}], # 'gc_stress == True'
##############################################################################
-['arch == a64', {
+['arch == arm64', {
# Requires bigger stack size in the Genesis and if stack size is increased,
# the test requires too much time to run. However, the problem test covers
'regress/regress-1132': [SKIP],
# Pass but take too long to run. Skip.
- # Some similar tests (with fewer iterations) may be included in a64-js tests.
+ # Some similar tests (with fewer iterations) may be included in arm64-js
+ # tests.
'compiler/regress-arguments': [SKIP],
'compiler/regress-gvn': [SKIP],
'compiler/regress-max-locals-for-osr': [SKIP],
'unicodelctest-no-optimization': [PASS, SLOW],
'unicodelctest': [PASS, SLOW],
'unicode-test': [PASS, SLOW],
-}], # 'arch == a64'
+}], # 'arch == arm64'
-['arch == a64 and mode == debug and simulator_run == True', {
+['arch == arm64 and mode == debug and simulator_run == True', {
# Pass but take too long with the simulator in debug mode.
'array-sort': [PASS, TIMEOUT],
'harmony/symbols': [SKIP],
# Issue 3219:
'getters-on-elements': [PASS, ['gc_stress == True', FAIL]],
-}], # 'arch == a64 and mode == debug and simulator_run == True'
+}], # 'arch == arm64 and mode == debug and simulator_run == True'
##############################################################################
['asan == True', {
}], # ALWAYS
-['arch == arm or arch == a64', {
+['arch == arm or arch == arm64', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
-}], # 'arch == arm or arch == a64'
+}], # 'arch == arm or arch == arm64'
-['arch == a64', {
+['arch == arm64', {
# BUG(v8:3152): Runs out of stack in debug mode.
'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
-}], # 'arch == a64'
+}], # 'arch == arm64'
['arch == mipsel', {
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel'
-['arch == a64 and simulator_run == True', {
+['arch == arm64 and simulator_run == True', {
'js1_5/GC/regress-203278-2': [SKIP],
'js1_5/extensions/regress-330569': [SKIP],
'js1_5/extensions/regress-351448': [SKIP],
'js1_5/extensions/regress-336410-1': [SKIP],
-}], # 'arch == a64 and simulator_run == True'
+}], # 'arch == arm64 and simulator_run == True'
]
'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
-['arch == arm or arch == mipsel or arch == a64', {
+['arch == arm or arch == mipsel or arch == arm64', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.
'S15.1.3.2_A2.5_T1': [SKIP],
'S15.1.3.3_A2.3_T1': [SKIP],
'S15.1.3.4_A2.3_T1': [SKIP],
-}], # 'arch == arm or arch == mipsel or arch == a64'
+}], # 'arch == arm or arch == mipsel or arch == arm64'
]
['simulator', {
'function-apply-aliased': [SKIP],
}], # 'simulator'
-['arch == a64 and simulator_run == True', {
+['arch == arm64 and simulator_run == True', {
'dfg-int-overflow-in-loop': [SKIP],
-}], # 'arch == a64 and simulator_run == True'
+}], # 'arch == arm64 and simulator_run == True'
]
# This script reads in CSV formatted instruction data, and draws a stacked
# graph in png format.
-defaultfile=a64_inst.csv
-defaultout=a64_inst.png
+defaultfile=arm64_inst.csv
+defaultout=arm64_inst.png
gnuplot=/usr/bin/gnuplot
'../../src/arm/stub-cache-arm.cc',
],
}],
- ['v8_target_arch=="a64" or v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:a64) ###
- '../../src/a64/assembler-a64.cc',
- '../../src/a64/assembler-a64.h',
- '../../src/a64/assembler-a64-inl.h',
- '../../src/a64/builtins-a64.cc',
- '../../src/a64/codegen-a64.cc',
- '../../src/a64/codegen-a64.h',
- '../../src/a64/code-stubs-a64.cc',
- '../../src/a64/code-stubs-a64.h',
- '../../src/a64/constants-a64.h',
- '../../src/a64/cpu-a64.cc',
- '../../src/a64/cpu-a64.h',
- '../../src/a64/debug-a64.cc',
- '../../src/a64/decoder-a64.cc',
- '../../src/a64/decoder-a64.h',
- '../../src/a64/decoder-a64-inl.h',
- '../../src/a64/deoptimizer-a64.cc',
- '../../src/a64/disasm-a64.cc',
- '../../src/a64/disasm-a64.h',
- '../../src/a64/frames-a64.cc',
- '../../src/a64/frames-a64.h',
- '../../src/a64/full-codegen-a64.cc',
- '../../src/a64/ic-a64.cc',
- '../../src/a64/instructions-a64.cc',
- '../../src/a64/instructions-a64.h',
- '../../src/a64/instrument-a64.cc',
- '../../src/a64/instrument-a64.h',
- '../../src/a64/lithium-a64.cc',
- '../../src/a64/lithium-a64.h',
- '../../src/a64/lithium-codegen-a64.cc',
- '../../src/a64/lithium-codegen-a64.h',
- '../../src/a64/lithium-gap-resolver-a64.cc',
- '../../src/a64/lithium-gap-resolver-a64.h',
- '../../src/a64/macro-assembler-a64.cc',
- '../../src/a64/macro-assembler-a64.h',
- '../../src/a64/macro-assembler-a64-inl.h',
- '../../src/a64/regexp-macro-assembler-a64.cc',
- '../../src/a64/regexp-macro-assembler-a64.h',
- '../../src/a64/simulator-a64.cc',
- '../../src/a64/simulator-a64.h',
- '../../src/a64/stub-cache-a64.cc',
- '../../src/a64/utils-a64.cc',
- '../../src/a64/utils-a64.h',
+ ['v8_target_arch=="arm64"', {
+ 'sources': [ ### gcmole(arch:arm64) ###
+ '../../src/arm64/assembler-arm64.cc',
+ '../../src/arm64/assembler-arm64.h',
+ '../../src/arm64/assembler-arm64-inl.h',
+ '../../src/arm64/builtins-arm64.cc',
+ '../../src/arm64/codegen-arm64.cc',
+ '../../src/arm64/codegen-arm64.h',
+ '../../src/arm64/code-stubs-arm64.cc',
+ '../../src/arm64/code-stubs-arm64.h',
+ '../../src/arm64/constants-arm64.h',
+ '../../src/arm64/cpu-arm64.cc',
+ '../../src/arm64/cpu-arm64.h',
+ '../../src/arm64/debug-arm64.cc',
+ '../../src/arm64/decoder-arm64.cc',
+ '../../src/arm64/decoder-arm64.h',
+ '../../src/arm64/decoder-arm64-inl.h',
+ '../../src/arm64/deoptimizer-arm64.cc',
+ '../../src/arm64/disasm-arm64.cc',
+ '../../src/arm64/disasm-arm64.h',
+ '../../src/arm64/frames-arm64.cc',
+ '../../src/arm64/frames-arm64.h',
+ '../../src/arm64/full-codegen-arm64.cc',
+ '../../src/arm64/ic-arm64.cc',
+ '../../src/arm64/instructions-arm64.cc',
+ '../../src/arm64/instructions-arm64.h',
+ '../../src/arm64/instrument-arm64.cc',
+ '../../src/arm64/instrument-arm64.h',
+ '../../src/arm64/lithium-arm64.cc',
+ '../../src/arm64/lithium-arm64.h',
+ '../../src/arm64/lithium-codegen-arm64.cc',
+ '../../src/arm64/lithium-codegen-arm64.h',
+ '../../src/arm64/lithium-gap-resolver-arm64.cc',
+ '../../src/arm64/lithium-gap-resolver-arm64.h',
+ '../../src/arm64/macro-assembler-arm64.cc',
+ '../../src/arm64/macro-assembler-arm64.h',
+ '../../src/arm64/macro-assembler-arm64-inl.h',
+ '../../src/arm64/regexp-macro-assembler-arm64.cc',
+ '../../src/arm64/regexp-macro-assembler-arm64.h',
+ '../../src/arm64/simulator-arm64.cc',
+ '../../src/arm64/simulator-arm64.h',
+ '../../src/arm64/stub-cache-arm64.cc',
+ '../../src/arm64/utils-arm64.cc',
+ '../../src/arm64/utils-arm64.h',
],
}],
['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
"nacl_ia32",
"nacl_x64",
"x64",
- "a64",
"arm64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"mipsel",
"nacl_ia32",
"nacl_x64",
- "a64"]
+ "arm64"]
def BuildOptions():
for (arch, mode) in options.arch_and_mode:
try:
- if arch == "arm64":
- arch = "a64"
code = Execute(arch, mode, args, options, suites, workspace)
except KeyboardInterrupt:
return 2
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
- arch in ['a64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
+ arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "android_arm", "android_ia32", "arm", "a64",
+for var in ["debug", "release", "android_arm", "android_ia32", "arm", "arm64",
"ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos",
"windows", "linux"]:
VARIABLES[var] = var
def UseSimulator(arch):
machine = platform.machine()
return (machine and
- (arch == "mipsel" or arch == "arm" or arch == "a64") and
+ (arch == "mipsel" or arch == "arm" or arch == "arm64") and
not arch.startswith(machine))