static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
__ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister);
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
ASSERT(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
if (count_constructions) {
Label allocate;
// Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ decb(FieldOperand(rcx,
SharedFunctionInfo::kConstructionCountOffset));
__ j(not_zero, &allocate);
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
// rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ __ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ movp(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
__ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+ __ movp(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject
__ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
+ __ movp(Operand(rcx, 0), rdx);
__ addq(rcx, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(rcx, rax);
// rbx: JSObject
// rdi: FixedArray
__ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+ __ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
// Continue with JSObject being successfully allocated
// rdi: function (constructor)
__ bind(&rt_call);
// Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
+ __ movp(rdi, Operand(rsp, 0));
__ push(rdi);
__ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
+ __ movp(rbx, rax); // store result in rbx
// New object allocated.
// rbx: newly allocated object
__ pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
__ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies
// Copy arguments and receiver to the expression stack.
Label loop, entry;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
__ push(Operand(rbx, rcx, times_pointer_size, 0));
// Call the function.
if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
__ Call(code, RelocInfo::CODE_TARGET);
}
// Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
// Restore the arguments count and leave the construct frame.
__ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
// Leave construct frame.
}
FrameScope scope(masm, StackFrame::INTERNAL);
// Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
__ push(rdx);
__ push(r8);
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
+ __ movp(rax, r9);
// Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ __ movp(kScratchRegister, Operand(rbp, 0));
+ __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
- __ movq(rdi, rdx);
+ __ movp(rdi, rdx);
#else // _WIN64
// GCC parameters in:
// rdi : entry (ignored)
// rcx : argc
// r8 : argv
- __ movq(rdi, rsi);
+ __ movp(rdi, rsi);
// rdi : function
// Clear the context before we push it when entering the internal frame.
// Push the function and receiver and setup the context.
__ push(rdi);
__ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
+ __ movp(rax, rcx);
+ __ movp(rbx, r8);
#endif // _WIN64
// Current stack contents:
__ Set(rcx, 0); // Set loop variable to 0.
__ jmp(&entry);
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
__ push(Operand(kScratchRegister, 0)); // dereference handle
__ addq(rcx, Immediate(1));
__ bind(&entry);
__ subq(Operand(rsp, 0), Immediate(5));
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2);
// pointers.
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
__ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
// Perform prologue operations usually performed by the young code stub.
__ PopReturnAddressTo(kScratchRegister);
__ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
__ push(rsi); // Callee's context.
__ push(rdi); // Callee's JS Function.
__ PushReturnAddressFrom(kScratchRegister);
__ ret(1 * kPointerSize); // Remove state.
__ bind(¬_no_registers);
- __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
+ __ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
__ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, ¬_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
// if it is a function.
Label slow, non_function;
StackArgumentsAccessor args(rsp, rax);
- __ movq(rdi, args.GetReceiverOperand());
+ __ movp(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &shift_arguments);
__ j(not_zero, &shift_arguments);
// Compute the receiver in non-strict mode.
- __ movq(rbx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
__ pop(rax);
}
// Restore the function to rdi.
- __ movq(rdi, args.GetReceiverOperand());
+ __ movp(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
__ bind(&use_global_receiver);
- __ movq(rbx,
+ __ movp(rbx,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
- __ movq(args.GetArgumentOperand(1), rbx);
+ __ movp(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(args.GetArgumentOperand(1), rdi);
+ __ movp(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
__ bind(&shift_arguments);
{ Label loop;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
+ __ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+ __ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
__ decq(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
__ pop(rbx); // Discard copy of return address.
// 5b. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing. If so, jump
// (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rbx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
// limit" is checked.
Label okay;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
+ __ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
__ subq(rcx, kScratchRegister);
__ push(Immediate(0)); // index
// Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
+ __ movp(rbx, Operand(rbp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &push_receiver);
// Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &push_receiver);
__ bind(&call_to_object);
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
__ bind(&use_global_receiver);
- __ movq(rbx,
+ __ movp(rbx,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
__ bind(&push_receiver);
// Copy all arguments from the array to the stack.
Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
+ __ movp(rax, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+ __ movp(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic =
__ push(rax);
// Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
+ __ movp(rax, Operand(rbp, kIndexOffset));
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
+ __ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
__ cmpq(rax, Operand(rbp, kLimitOffset));
Label call_proxy;
ParameterCount actual(rax);
__ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &call_proxy);
__ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
Label no_arguments;
__ testq(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
__ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
__ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
// Set properties and elements.
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set the value.
- __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
+ __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
// Ensure the object is fully initialized.
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
__ JumpIfSmi(rax, &convert_argument);
Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
__ j(NegateCondition(is_string), &convert_argument);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ IncrementCounter(counters->string_ctor_string_value(), 1);
__ jmp(&argument_is_string);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
__ pop(rdi);
}
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&argument_is_string);
// Load the empty string into rbx, remove the receiver from the
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(rbp);
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
__ pop(rbp);
// Remove caller arguments from the stack.
__ IncrementCounter(counters->arguments_adaptors(), 1);
Label enough, too_few;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ j(less, &too_few);
__ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(less, &fill);
// Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// Call the entry point.
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ bind(&skip);
// Load deoptimization data from the code object.
- __ movq(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
// Get the function from the stack.
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
+ __ movp(rcx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Set up the fixed slots.
__ Set(rbx, 0); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
+ __ movp(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
+ __ movp(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
+ __ movp(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
// Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
+ __ movp(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
+ __ movp(Operand(rax, Context::SlotOffset(i)), rbx);
}
// Return and remove the on-stack parameter.
- __ movq(rsi, rax);
+ __ movp(rsi, rax);
__ ret(1 * kPointerSize);
// Need to collect. Call into runtime system.
// Get the function from the stack.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(1));
+ __ movp(rcx, args.GetArgumentOperand(1));
// Get the serialized scope info from the stack.
- __ movq(rbx, args.GetArgumentOperand(0));
+ __ movp(rbx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// If this block context is nested in the native context we get a smi
__ cmpq(rcx, Immediate(0));
__ Assert(equal, kExpected0AsASmiSentinel);
}
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
+ __ movp(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Set up the fixed slots.
- __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
- __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
- __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
+ __ movp(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
+ __ movp(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
+ __ movp(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
// Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
+ __ movp(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movp(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
// Initialize the rest of the slots to the hole value.
__ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
for (int i = 0; i < slots_; i++) {
- __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
+ __ movp(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
}
// Return and remove the on-stack parameter.
- __ movq(rsi, rax);
+ __ movp(rsi, rax);
__ ret(2 * kPointerSize);
// Need to collect. Call into runtime system.
Label call_runtime, done, exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on.
- __ movq(scratch, Immediate(1));
+ __ movp(scratch, Immediate(1));
__ Cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(base, args.GetArgumentOperand(0));
- __ movq(exponent, args.GetArgumentOperand(1));
+ __ movp(base, args.GetArgumentOperand(0));
+ __ movp(exponent, args.GetArgumentOperand(1));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ bind(&int_exponent);
const XMMRegister double_scratch2 = double_exponent;
// Back up exponent as we need to check if exponent is negative later.
- __ movq(scratch, exponent); // Back up exponent.
+ __ movp(scratch, exponent); // Back up exponent.
__ movsd(double_scratch, double_base); // Back up base.
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Check that elements are FixedArray.
// We rely on StoreIC_ArrayLength below to deal with all types of
// fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+ __ movp(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss);
// Check that the array has fast properties, otherwise the length
// property might have been redefined.
- __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ movp(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
__ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &miss);
// Smi instead of the context. We can't use SmiCompare here, because that
// only works for comparing two smis.
Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor);
__ SmiSub(rax, rax, rdx);
__ SmiToInteger32(rax, rax);
StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
+ __ movp(rax, args.GetArgumentOperand(0));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
// limit found in the arguments adaptor frame. Use unsigned
// comparison to get negative check for free.
__ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ cmpq(rdx, rcx);
__ j(above_equal, &slow);
__ SmiToInteger32(rcx, rcx);
StackArgumentsAccessor adaptor_args(rbx, rcx,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, adaptor_args.GetArgumentOperand(0));
+ __ movp(rax, adaptor_args.GetArgumentOperand(0));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Label adaptor_frame, try_allocate;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// No adaptor, parameter count = argument count.
- __ movq(rcx, rbx);
+ __ movp(rcx, rbx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
__ cmpq(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
- __ movq(rbx, rcx);
+ __ movp(rbx, rcx);
__ bind(&try_allocate);
// rcx = argument count (untagged)
// Get the arguments boilerplate from the current native context into rdi.
Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
__ testq(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+ __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
__ jmp(©, Label::kNear);
const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
__ bind(&has_mapped_parameters);
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+ __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
__ bind(©);
// rax = address of new object (tagged)
// rdi = address of boilerplate object (tagged)
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rdx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rdx);
+ __ movp(rdx, FieldOperand(rdi, i));
+ __ movp(FieldOperand(rax, i), rdx);
}
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, args.GetArgumentOperand(0));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(rdx, args.GetArgumentOperand(0));
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize),
rdx);
// Note: rcx is tagged from here on.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
// rax = address of new object (tagged)
// rbx = mapped parameter count (untagged)
__ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
// rbx contains the untagged argument count. Add 2 and tag to write.
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ Integer64PlusConstantToSmi(r9, rbx, 2);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
__ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
__ addq(r8, args.GetArgumentOperand(2));
__ subq(r8, r9);
__ Move(r11, factory->the_hole_value());
- __ movq(rdx, rdi);
+ __ movp(rdx, rdi);
__ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
__ bind(¶meters_loop);
__ SmiSubConstant(r9, r9, Smi::FromInt(1));
__ SmiToInteger64(kScratchRegister, r9);
- __ movq(FieldOperand(rdx, kScratchRegister,
+ __ movp(FieldOperand(rdx, kScratchRegister,
times_pointer_size,
kParameterMapHeaderSize),
r8);
- __ movq(FieldOperand(rdi, kScratchRegister,
+ __ movp(FieldOperand(rdi, kScratchRegister,
times_pointer_size,
FixedArray::kHeaderSize),
r11);
// Copy arguments header and remaining slots (if there are any).
__ Move(FieldOperand(rdi, FixedArray::kMapOffset),
factory->fixed_array_map());
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
Label arguments_loop, arguments_test;
- __ movq(r8, rbx);
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(r8, rbx);
+ __ movp(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
__ bind(&arguments_loop);
__ subq(rdx, Immediate(kPointerSize));
- __ movq(r9, Operand(rdx, 0));
- __ movq(FieldOperand(rdi, r8,
+ __ movp(r9, Operand(rdx, 0));
+ __ movp(FieldOperand(rdi, r8,
times_pointer_size,
FixedArray::kHeaderSize),
r9);
// rcx = argument count (untagged)
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
- __ movq(args.GetArgumentOperand(2), rcx); // Patch argument count.
+ __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(args.GetArgumentOperand(2), rcx);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(2));
+ __ movp(rcx, args.GetArgumentOperand(2));
__ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(args.GetArgumentOperand(2), rcx);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
__ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current native context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rdi, offset));
+ __ movp(rdi, Operand(rdi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rbx);
+ __ movp(rbx, FieldOperand(rdi, i));
+ __ movp(FieldOperand(rax, i), rbx);
}
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, args.GetArgumentOperand(2));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(rcx, args.GetArgumentOperand(2));
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack.
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(rdx, args.GetArgumentOperand(1));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
// Untag the length for the loop below.
__ SmiToInteger64(rcx, rcx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
- __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
+ __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
__ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize));
__ decq(rcx);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
- __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
+ __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ JumpIfSmi(rax, &runtime);
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movp(rax, FieldOperand(rax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
Condition is_smi = masm->CheckSmi(rax);
__ Check(NegateCondition(is_smi),
// Reset offset for possibly sliced string.
__ Set(r14, 0);
- __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ JumpIfSmi(rdi, &runtime);
- __ movq(r15, rdi); // Make a copy of the original subject string.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(r15, rdi); // Make a copy of the original subject string.
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// rax: RegExp data (FixedArray)
// rdi: subject string
__ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
+ __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
__ bind(&check_underlying);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// (5a) Is subject sequential two byte? If yes, go to (9).
__ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
// (6) One byte sequential. Load regexp code for one byte.
__ bind(&seq_one_byte_string);
// rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
__ Set(rcx, 1); // Type is one byte.
// (E) Carry on. String handling is done.
// We have to use r15 instead of rdi to load the length because rdi might
// have been only made to look like a sequential string when it actually
// is an external string.
- __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
+ __ movp(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
__ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
__ j(above_equal, &runtime);
// Argument 7: Start (high end) of backtracking stack memory area.
__ Move(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
+ __ movp(r9, Operand(kScratchRegister, 0));
__ Move(kScratchRegister, address_of_regexp_stack_memory_size);
__ addq(r9, Operand(kScratchRegister, 0));
__ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
// r15: original subject string
// Argument 2: Previous index.
- __ movq(arg_reg_2, rbx);
+ __ movp(arg_reg_2, rbx);
// Argument 4: End of string data
// Argument 3: Start of string data
// use rbp, which points exactly to one pointer size below the previous rsp.
// (Because creating a new stack frame pushes the previous rbp onto the stack
// and thereby moves up rsp by one kPointerSize.)
- __ movq(arg_reg_1, r15);
+ __ movp(arg_reg_1, r15);
// Locate the code entry and call it.
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
// Load RegExp data.
__ bind(&success);
- __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
+ __ movp(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
// rdx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
+ __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
__ JumpIfSmi(r15, &runtime);
__ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
+ __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
// rdx: number of capture registers
// Store the capture count.
__ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
kScratchRegister);
// Store last subject and last input.
- __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rax);
+ __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movp(rcx, rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastSubjectOffset,
rax,
rdi,
kDontSaveFPRegs);
- __ movq(rax, rcx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movp(rax, rcx);
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastInputOffset,
rax,
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
__ Integer32ToSmi(rdi, rdi);
// Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
+ __ movp(FieldOperand(rbx,
rdx,
times_pointer_size,
RegExpImpl::kFirstCaptureOffset),
__ bind(&done);
// Return last match info.
- __ movq(rax, r15);
+ __ movp(rax, r15);
__ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
__ bind(&exception);
Isolate::kPendingExceptionAddress, isolate);
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address, rbx);
- __ movq(rax, pending_exception_operand);
+ __ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ cmpq(rax, rdx);
__ j(equal, &runtime);
- __ movq(pending_exception_operand, rdx);
+ __ movp(pending_exception_operand, rdx);
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
Label termination_exception;
// (8) External string. Short external strings have been ruled out.
__ bind(&external_string);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
__ testb(rbx, Immediate(kIsIndirectStringMask));
__ Assert(zero, kExternalStringExpectedButNotFound);
}
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rax: RegExp data (FixedArray)
// (9) Two byte sequential. Load regexp code for one byte. Go to (E).
__ bind(&seq_two_byte_string);
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
__ Set(rcx, 0); // Type is two byte.
__ jmp(&check_code); // Go to (E).
// (11) Sliced string. Replace subject with parent. Go to (5a).
// Load offset into r14 and replace subject string with parent.
__ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
+ __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
__ jmp(&check_underlying);
#endif // V8_INTERPRETED_REGEXP
}
Label slowcase;
Label done;
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(r8, args.GetArgumentOperand(0));
+ __ movp(r8, args.GetArgumentOperand(0));
__ JumpIfNotSmi(r8, &slowcase);
__ SmiToInteger32(rbx, r8);
__ cmpl(rbx, Immediate(kMaxInlineLength));
// r8: Number of array elements as smi.
// Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
+ __ movp(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
+ __ movp(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rdx);
// Set empty properties FixedArray.
__ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
// Set elements to point to FixedArray allocated right after the JSArray.
__ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set input, index and length fields from arguments.
- __ movq(r8, args.GetArgumentOperand(2));
- __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, args.GetArgumentOperand(1));
- __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, args.GetArgumentOperand(0));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
+ __ movp(r8, args.GetArgumentOperand(2));
+ __ movp(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
+ __ movp(r8, args.GetArgumentOperand(1));
+ __ movp(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
+ __ movp(r8, args.GetArgumentOperand(0));
+ __ movp(FieldOperand(rax, JSArray::kLengthOffset), r8);
// Fill out the elements FixedArray.
// rax: JSArray.
// Set map.
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
// Set length.
__ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
+ __ movp(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
// Fill contents of fixed-array with undefined.
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
__ bind(&loop);
__ j(less_equal, &done); // Jump if rcx is negative or zero.
__ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
+ __ movp(Operand(rcx, rbx, times_pointer_size, 0), rdx);
__ jmp(&loop);
__ bind(&done);
Register object,
Register scratch) {
__ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ j(no_overflow, &smi_done);
__ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
__ ret(0);
__ bind(&non_smi);
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
__ ret(0);
__ bind(¬_smis);
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ __ movp(rcx, FieldOperand(rbx, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ jmp(&done);
__ bind(¬_array_function);
- __ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
+ __ movp(FieldOperand(rbx, Cell::kValueOffset), rdi);
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
__ bind(&non_function);
- __ movq(args.GetReceiverOperand(), rdi);
+ __ movp(args.GetReceiverOperand(), rdi);
__ Set(rax, argc_);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
// Jump to the function-specific construct stub.
Register jmp_reg = rcx;
- __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(jmp_reg, FieldOperand(jmp_reg,
+ __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
__ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
__ jmp(jmp_reg);
Register value,
Register scratch,
Label* oom_label) {
- __ movq(scratch, value);
+ __ movp(scratch, value);
STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
STATIC_ASSERT(kFailureTag == 3);
__ and_(scratch, Immediate(0xf));
// stack is known to be aligned. This function takes one argument which is
// passed in register.
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, rax);
+ __ movp(arg_reg_1, rax);
__ Move(kScratchRegister,
ExternalReference::perform_gc_function(masm->isolate()));
__ call(kScratchRegister);
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
- __ movq(rcx, r14); // argc.
- __ movq(rdx, r15); // argv.
+ __ movp(rcx, r14); // argc.
+ __ movp(rdx, r15); // argv.
__ Move(r8, ExternalReference::isolate_address(masm->isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
__ lea(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
- __ movq(rdx, r14); // argc.
- __ movq(r8, r15); // argv.
+ __ movp(rdx, r14); // argc.
+ __ movp(r8, r15); // argv.
__ Move(r9, ExternalReference::isolate_address(masm->isolate()));
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
+ __ movp(rdi, r14); // argc.
+ __ movp(rsi, r15); // argv.
__ Move(rdx, ExternalReference::isolate_address(masm->isolate()));
#endif
__ call(rbx);
Isolate::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
- __ movq(rax, pending_exception_operand);
+ __ movp(rax, pending_exception_operand);
// See if we just retrieved an OOM exception.
JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ movq(pending_exception_operand, rdx);
+ __ movp(pending_exception_operand, rdx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
__ push(rbp);
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ testq(rax, rax);
__ j(not_zero, ¬_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ movq(rax, rbp);
+ __ movp(rax, rbp);
__ Store(js_entry_sp, rax);
Label cont;
__ jmp(&cont);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, ¬_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ movp(Operand(kScratchRegister, 0), Immediate(0));
__ bind(¬_outermost_js_2);
// Restore the top frame descriptor from the stack.
Label slow;
StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
+ __ movp(rax, args.GetArgumentOperand(0));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(rdx, args.GetArgumentOperand(1));
// rdx is function, rax is map.
// If there is a call site cache don't look in the global cache, but do the
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
} else {
// Get return address and delta to inlined map check.
- __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ movp(kScratchRegister, StackOperandForReturnAddress(0));
__ subq(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
__ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
}
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, kOffsetToMapCheckValue));
- __ movq(Operand(kScratchRegister, 0), rax);
+ __ movp(Operand(kScratchRegister, 0), rax);
}
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
Label loop, is_instance, is_not_instance;
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
// Assert it is a 1-byte signed value.
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
- __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ movp(kScratchRegister, StackOperandForReturnAddress(0));
__ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
// Assert it is a 1-byte signed value.
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
- __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ movp(kScratchRegister, StackOperandForReturnAddress(0));
__ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ testb(result_, Immediate(kIsNotStringMask));
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ movq(index_, rax);
+ __ movp(index_, rax);
}
__ pop(object_);
// Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
__ push(index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
if (!result_.is(rax)) {
- __ movq(result_, rax);
+ __ movp(result_, rax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
+ __ movp(result_, FieldOperand(result_, index.reg, index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ j(equal, &slow_case_);
__ push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result_.is(rax)) {
- __ movq(result_, rax);
+ __ movp(result_, rax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
// Load the two arguments.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0)); // First argument (left).
- __ movq(rdx, args.GetArgumentOperand(1)); // Second argument (right).
+ __ movp(rax, args.GetArgumentOperand(0)); // First argument (left).
+ __ movp(rdx, args.GetArgumentOperand(1)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
// Otherwise, at least one of the arguments is definitely a string,
// rdx: second string
// Check if either of the strings are empty. In that case return the other.
Label second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
+ __ movp(rcx, FieldOperand(rdx, String::kLengthOffset));
__ SmiTest(rcx);
__ j(not_zero, &second_not_zero_length, Label::kNear);
// Second string is empty, result is first string which is already in rax.
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ movp(rbx, FieldOperand(rax, String::kLengthOffset));
__ SmiTest(rbx);
__ j(not_zero, &both_not_zero_length, Label::kNear);
// First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(r8, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rdx, HeapObject::kMapOffset));
}
// Get the instance types of the two strings as they will be needed soon.
__ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
__ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
+ __ movp(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
+ __ movp(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
Label skip_write_barrier, after_writing;
__ testb(rbx, Immediate(1));
__ j(zero, &skip_write_barrier);
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+ __ movp(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ RecordWriteField(rcx,
ConsString::kFirstOffset,
rax,
rbx,
kDontSaveFPRegs);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+ __ movp(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
__ RecordWriteField(rcx,
ConsString::kSecondOffset,
rdx,
__ jmp(&after_writing);
__ bind(&skip_write_barrier);
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+ __ movp(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+ __ movp(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
__ bind(&after_writing);
- __ movq(rax, rcx);
+ __ movp(rax, rcx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
STATIC_ASSERT(kShortExternalStringTag != 0);
__ testb(r8, Immediate(kShortExternalStringMask));
__ j(not_zero, &call_runtime);
- __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
+ __ movp(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
STATIC_ASSERT(kShortExternalStringTag != 0);
__ testb(r9, Immediate(kShortExternalStringMask));
__ j(not_zero, &call_runtime);
- __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
+ __ movp(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ bind(¬_string);
// Puts the cached result into scratch1.
__ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
- __ movq(arg, scratch1);
- __ movq(Operand(rsp, stack_offset), arg);
+ __ movp(arg, scratch1);
+ __ movp(Operand(rsp, stack_offset), arg);
__ bind(&done);
}
// Load the entry from the string table.
STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ movq(candidate,
+ __ movp(candidate,
FieldOperand(string_table,
scratch,
times_pointer_size,
Register result = candidate;
__ bind(&found_in_string_table);
if (!result.is(rax)) {
- __ movq(rax, result);
+ __ movp(rax, result);
}
}
ARGUMENTS_DONT_CONTAIN_RECEIVER);
// Make sure first argument is a string.
- __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
+ __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
STATIC_ASSERT(kSmiTag == 0);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
// rax: string
// rbx: instance type
// Calculate length of sub string using the smi values.
- __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
- __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
+ __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
+ __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
__ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
// Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
__ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+ __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
// Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&seq_or_external_string);
// Sequential or external string. Just move string to the correct register.
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
__ bind(&underlying_unpacked);
__ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
__ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
- __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
+ __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
+ __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
- __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
+ __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
+ __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
STATIC_CHECK(kShortExternalStringTag != 0);
__ testb(rbx, Immediate(kShortExternalStringMask));
__ j(not_zero, &runtime);
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rax: result string
// rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
+ __ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
// rsi: character of sub string start
// r14: original value of rsi
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, r14); // Restore rsi.
+ __ movp(rsi, r14); // Restore rsi.
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
// rax: result string
// rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
+ __ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
// rsi: character of sub string start
// r14: original value of rsi
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, r14); // Restore esi.
+ __ movp(rsi, r14); // Restore esi.
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
// Compare lengths.
Label check_zero_length;
- __ movq(length, FieldOperand(left, String::kLengthOffset));
+ __ movp(length, FieldOperand(left, String::kLengthOffset));
__ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
__ j(equal, &check_zero_length, Label::kNear);
__ Move(rax, Smi::FromInt(NOT_EQUAL));
STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
// Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
+ __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movp(scratch4, scratch1);
__ SmiSub(scratch4,
scratch4,
FieldOperand(right, String::kLengthOffset));
// rsp[16] : left string
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rdx, args.GetArgumentOperand(0)); // left
- __ movq(rax, args.GetArgumentOperand(1)); // right
+ __ movp(rdx, args.GetArgumentOperand(0)); // left
+ __ movp(rax, args.GetArgumentOperand(1)); // right
// Check for identity.
Label not_same;
// Correct sign of result in case of overflow.
__ not_(rdx);
__ bind(&done);
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
}
__ ret(0);
__ j(cond, &miss, Label::kNear);
// Check that both operands are internalized strings.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ movq(tmp3, tmp1);
+ __ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
__ or_(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
- __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ Cmp(rcx, known_map_);
__ j(not_equal, &miss, Label::kNear);
__ Cmp(rbx, known_map_);
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties,
+ __ movp(entity_name, Operand(properties,
index,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ j(equal, &good, Label::kNear);
// Check if the entry name is not a unique name.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
miss);
__ bind(&good);
kPointerSize);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, args.GetArgumentOperand(1));
+ __ movp(scratch, args.GetArgumentOperand(1));
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
__ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
- __ movq(scratch, Operand(dictionary_,
+ __ movp(scratch, Operand(dictionary_,
index_,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
// key we are looking for.
// Check if the entry name is not a unique name.
- __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
&maybe_in_dictionary);
}
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
if (mode_ == POSITIVE_LOOKUP) {
- __ movq(scratch, Immediate(0));
+ __ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
__ bind(&in_dictionary);
- __ movq(scratch, Immediate(1));
+ __ movp(scratch, Immediate(1));
__ Drop(1);
__ ret(2 * kPointerSize);
__ bind(¬_in_dictionary);
- __ movq(scratch, Immediate(0));
+ __ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(),
regs_.scratch0(),
&dont_need_remembered_set);
Label need_incremental;
Label need_incremental_pop_object;
- __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
__ and_(regs_.scratch0(), regs_.object());
- __ movq(regs_.scratch1(),
+ __ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ subq(regs_.scratch1(), Immediate(1));
- __ movq(Operand(regs_.scratch0(),
+ __ movp(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
__ j(negative, &need_incremental);
__ bind(&on_black);
// Get the value from the slot.
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
// Get array literal index, array literal and its map.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rdx, args.GetArgumentOperand(1));
- __ movq(rbx, args.GetArgumentOperand(0));
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+ __ movp(rdx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(0));
+ __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ CheckFastElements(rdi, &double_elements);
__ push(rbx);
__ push(rcx);
__ push(rax);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ push(rdx);
__ PushReturnAddressFrom(rdi);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
__ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
+ __ movp(Operand(rcx, 0), rax);
// Update the write barrier for the array store.
__ RecordWrite(rbx, rcx, rax,
kDontSaveFPRegs,
// FAST_*_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize), rax);
__ ret(0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
- __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
__ SmiToInteger32(r11, rcx);
__ StoreNumberToDoubleElements(rax,
r9,
__ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rbx, MemOperand(rbp, parameter_count_offset));
+ __ movp(rbx, MemOperand(rbp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ PopReturnAddressTo(rcx);
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
__ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rax, MemOperand(rbp, parameter_count_offset));
+ __ movp(rax, MemOperand(rbp, parameter_count_offset));
// The parameter count above includes the receiver for the arguments passed to
// the deoptimization handler. Subtract the receiver for the parameter count
// for the call.
Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
// Calculate the function address to the first arg.
- __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
+ __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
__ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
// Save the remainder of the volatile registers.
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
+ __ movp(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the cell).
__ incl(rdx);
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ __ movp(rcx, FieldOperand(rbx, Cell::kValueOffset));
if (FLAG_debug_code) {
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
// AllocationSite, call an array constructor that doesn't use AllocationSites.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
- __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
+ __ movp(rdx, FieldOperand(rbx, Cell::kValueOffset));
__ Cmp(FieldOperand(rdx, 0),
masm->isolate()->factory()->allocation_site_map());
__ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
- __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
+ __ movp(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
// We might need to create a holey array
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
+ __ movp(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
}
// Figure out the right elements kind
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
masm->push(scratch1_);
if (!address_.is(address_orig_)) {
masm->push(address_);
- masm->movq(address_, address_orig_);
+ masm->movp(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
masm->push(object_);
- masm->movq(object_, object_orig_);
+ masm->movp(object_, object_orig_);
}
}
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with rcx.
if (!object_.is(object_orig_)) {
- masm->movq(object_orig_, object_);
+ masm->movp(object_orig_, object_);
masm->pop(object_);
}
if (!address_.is(address_orig_)) {
- masm->movq(address_orig_, address_);
+ masm->movp(address_orig_, address_);
masm->pop(address_);
}
masm->pop(scratch1_);
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
__ movq(rcx, kNaNValue);
- __ movq(Operand(rsp, kPointerSize), rcx);
+ __ movp(Operand(rsp, kPointerSize), rcx);
__ movsd(xmm0, Operand(rsp, kPointerSize));
__ jmp(&return_result);
}
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
// the same size.
__ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
- __ movq(r14, r8); // Destination array equals source array.
+ __ movp(r14, r8); // Destination array equals source array.
// r8 : source FixedArray
// r9 : elements array length
// r14: destination FixedDoubleArray
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
__ bind(&allocated);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
// Set receiver's backing store.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
- __ movq(r11, r14);
+ __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+ __ movp(r11, r14);
__ RecordWriteField(rdx,
JSObject::kElementsOffset,
r11,
OMIT_SMI_CHECK);
// Set backing store's length.
__ Integer32ToSmi(r11, r9);
- __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+ __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
__ jmp(&allocated);
__ bind(&only_change_map);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
// Conversion loop.
__ bind(&loop);
- __ movq(rbx,
+ __ movp(rbx,
FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
// r9 : current element's index
// rbx: current element (smi-tagged)
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
__ push(rax);
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
// r8 : source FixedDoubleArray
// r9 : number of elements
__ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
__ Integer32ToSmi(r14, r9);
- __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+ __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
__ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(fail);
// Box doubles into heap numbers.
__ AllocateHeapNumber(rax, r15, &gc_required);
// rax: new heap number
__ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
- __ movq(FieldOperand(r11,
+ __ movp(FieldOperand(r11,
r9,
times_pointer_size,
FixedArray::kHeaderSize),
rax);
- __ movq(r15, r9);
+ __ movp(r15, r9);
__ RecordWriteArray(r11,
rax,
r15,
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ movq(FieldOperand(r11,
+ __ movp(FieldOperand(r11,
r9,
times_pointer_size,
FixedArray::kHeaderSize),
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+ __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
__ RecordWriteField(rdx,
JSObject::kElementsOffset,
r11,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&only_change_map);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
__ addq(index, result);
- __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded, Label::kNear);
// Handle cons strings.
__ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, call_runtime);
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
// Check encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ testb(result, Immediate(kStringEncodingMask));
- __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
__ j(not_equal, &ascii_external, Label::kNear);
// Two-byte string.
__ movzxwl(result, Operand(result, index, times_2, 0));
// FUNCTION and OPTIMIZED_FUNCTION code:
CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
patcher.masm()->push(rbp);
- patcher.masm()->movq(rbp, rsp);
+ patcher.masm()->movp(rbp, rsp);
patcher.masm()->push(rsi);
patcher.masm()->push(rdi);
initialized = true;
ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
masm->isolate());
__ Move(rax, restarter_frame_function_slot);
- __ movq(Operand(rax, 0), Immediate(0));
+ __ movp(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp.
__ lea(rsp, Operand(rbp, -1 * kPointerSize));
__ pop(rbp);
// Load context from the function.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Get function code.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
Register arg5 = r11;
// Get the bailout id from the stack.
- __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
+ __ movp(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- __ movq(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
+ __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
__ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
kPCOnStackSize));
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(arg_reg_1, rax);
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(arg_reg_1, rax);
__ Set(arg_reg_2, type());
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
- __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+ __ movp(Operand(rsp, 4 * kPointerSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate()));
- __ movq(Operand(rsp, 5 * kPointerSize), arg5);
+ __ movp(Operand(rsp, 5 * kPointerSize), arg5);
#else
- __ movq(r8, arg5);
+ __ movp(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#endif
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+ __ movp(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// Compute the output frame in the deoptimizer.
__ push(rax);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, rax);
+ __ movp(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(masm());
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ movp(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ movp(rbx, Operand(rax, 0));
+ __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
Label ok;
// +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
- __ movq(rcx, args.GetReceiverOperand());
+ __ movp(rcx, args.GetReceiverOperand());
__ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &ok, Label::kNear);
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rcx);
+ __ movp(args.GetReceiverOperand(), rcx);
__ bind(&ok);
}
function_in_register = false;
// Context is returned in both rax and rsi. It replaces the context
// passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
+ __ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
+ __ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
__ RecordWriteContextSlot(
rsi, context_offset, rax, rbx, kDontSaveFPRegs);
int reset_value = FLAG_interrupt_budget;
__ Move(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ Move(kScratchRegister, Smi::FromInt(reset_value));
- __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
+ __ movp(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
__ pop(rbp);
int no_frame_start = masm_->pc_offset();
Register reg) const {
ASSERT(count > 0);
if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
+ __ movp(Operand(rsp, 0), reg);
}
void FullCodeGenerator::GetVar(Register dest, Variable* var) {
ASSERT(var->IsContextSlot() || var->IsStackAllocated());
MemOperand location = VarOperand(var, dest);
- __ movq(dest, location);
+ __ movp(dest, location);
}
ASSERT(!scratch0.is(scratch1));
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
- __ movq(location, src);
+ __ movp(location, src);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
- __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
__ Check(not_equal, kDeclarationInWithContext);
__ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(StackOperand(variable), kScratchRegister);
+ __ movp(StackOperand(variable), kScratchRegister);
}
break;
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
+ __ movp(ContextOperand(rsi, variable->index()), kScratchRegister);
// No write barrier since the hole value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
- __ movq(StackOperand(variable), result_register());
+ __ movp(StackOperand(variable), result_register());
break;
}
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ movq(ContextOperand(rsi, variable->index()), result_register());
+ __ movp(ContextOperand(rsi, variable->index()), result_register());
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(rsi,
// Load instance object.
__ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
- __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
+ __ movp(rax, ContextOperand(rax, variable->interface()->Index()));
+ __ movp(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
// Assign it.
- __ movq(ContextOperand(rsi, variable->index()), rax);
+ __ movp(ContextOperand(rsi, variable->index()), rax);
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(rsi,
Context::SlotOffset(variable->index()),
VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
+ __ movp(rdx, Operand(rsp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ movq(rcx, rdx);
+ __ movp(rcx, rdx);
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(rax); // Map.
Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
- __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
+ __ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
__ j(above, &non_proxy);
__ bind(&non_proxy);
__ push(rbx); // Smi
__ push(rax); // Array
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
+ __ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
__ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movp(rbx, Operand(rsp, 2 * kPointerSize));
SmiIndex index = masm()->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rbx,
+ __ movp(rbx, FieldOperand(rbx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
// Get the expected map from the stack or a smi in the
// permanent slow case into register rdx.
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movp(rdx, Operand(rsp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+ __ movp(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_label());
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
// Update the 'each' property or variable from the possibly filtered
// entry in register rbx.
__ bind(&update_each);
- __ movq(result_register(), rbx);
+ __ movp(result_register(), rbx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
__ j(not_equal, slow);
}
// Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
// safe to use raw labels here.
Label next, fast;
if (!context.is(temp)) {
- __ movq(temp, context);
+ __ movp(temp, context);
}
// Load map for comparison into register, outside loop.
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ movq(rax, GlobalObjectOperand());
+ __ movp(rax, GlobalObjectOperand());
__ Move(rcx, var->name());
ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
? NOT_CONTEXTUAL
Immediate(0));
__ j(not_equal, slow);
}
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
- __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
+ __ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == LET ||
local->mode() == CONST ||
local->mode() == CONST_HARMONY) {
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
- __ movq(rax, GlobalObjectOperand());
+ __ movp(rax, GlobalObjectOperand());
CallLoadIC(CONTEXTUAL);
context()->Plug(rax);
break;
// rcx = literals array.
// rbx = regexp literal.
// rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
__ Push(expr->pattern());
__ Push(expr->flags());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ __ movp(rdx, FieldOperand(rbx, i));
+ __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movp(FieldOperand(rax, i), rdx);
+ __ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
context()->Plug(rax);
}
expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->value());
- __ movq(rdx, Operand(rsp, 0));
+ __ movp(rdx, Operand(rsp, 0));
CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(
__ CallStub(&stub);
} else if (expr->depth() > 1 || Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
}
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
- __ movq(FieldOperand(rbx, offset), result_register());
+ __ movp(FieldOperand(rbx, offset), result_register());
// Update the write barrier for the array store.
__ RecordWriteField(rbx, offset, result_register(), rcx,
kDontSaveFPRegs,
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
- __ movq(rdx, Operand(rsp, 0));
+ __ movp(rdx, Operand(rsp, 0));
__ push(rax);
} else {
VisitForStackValue(property->obj());
ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(continuation.pos()));
- __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movq(rcx, rsi);
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
__ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
__ j(equal, &post_runtime);
__ push(rax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movq(context_register(),
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ jmp(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
- __ movq(rax, Operand(rsp, generator_object_depth));
+ __ movp(rax, Operand(rsp, generator_object_depth));
__ push(rax); // g
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
- __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movq(rcx, rsi);
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movq(context_register(),
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ pop(rax); // result
EmitReturnSequence();
__ bind(&l_call);
Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
CallIC(ic);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The key is still on the stack; drop it.
// if (!result.done) goto l_try;
__ j(less, &wrong_state);
// Load suspended function and context.
- __ movq(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
- __ movq(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
// Push receiver.
__ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
// Push holes for arguments to generator function.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rdx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ jmp(&done);
__ bind(&resume_frame);
__ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
__ push(rsi); // Callee's context.
__ push(rdi); // Callee's JS Function.
// Load the operand stack size.
- __ movq(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
- __ movq(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+ __ movp(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
__ SmiToInteger32(rdx, rdx);
// If we are sending a value and there is no operand stack, we can jump back
Label slow_resume;
__ cmpq(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
__ addq(rdx, rcx);
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ movq(context_register(),
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ pop(rcx);
__ Move(rdx, isolate()->factory()->ToBoolean(done));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
isolate()->factory()->empty_fixed_array());
__ Move(FieldOperand(rax, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
- __ movq(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
+ __ movp(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
rcx);
- __ movq(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
+ __ movp(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
rdx);
// Only the value field needs a write barrier, as the other values are in the
// rcx to make the shifts easier.
Label done, stub_call, smi_case;
__ pop(rdx);
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ or_(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
- __ movq(rax, rcx);
+ __ movp(rax, rcx);
BinaryOpICStub stub(op, mode);
CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
expr->BinaryOperationFeedbackId());
case NAMED_PROPERTY: {
__ push(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->value());
CallStoreIC(NOT_CONTEXTUAL);
__ push(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ pop(rdx);
__ pop(rax); // Restore value.
Handle<Code> ic = is_classic_mode()
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
- __ movq(rdx, GlobalObjectOperand());
+ __ movp(rdx, GlobalObjectOperand());
CallStoreIC(CONTEXTUAL);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
if (var->IsStackLocal()) {
Label skip;
- __ movq(rdx, StackOperand(var));
+ __ movp(rdx, StackOperand(var));
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &skip);
- __ movq(StackOperand(var), rax);
+ __ movp(StackOperand(var), rax);
__ bind(&skip);
} else {
ASSERT(var->IsContextSlot() || var->IsLookupSlot());
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
MemOperand location = VarOperand(var, rcx);
- __ movq(rdx, location);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&assign);
- __ movq(location, rax);
+ __ movp(location, rax);
if (var->IsContextSlot()) {
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ RecordWriteContextSlot(
rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
}
MemOperand location = VarOperand(var, rcx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
- __ movq(rdx, location);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ Check(equal, kLetBindingReInitialization);
}
// Perform the assignment.
- __ movq(location, rax);
+ __ movp(location, rax);
if (var->IsContextSlot()) {
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ RecordWriteContextSlot(
rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
}
CallIC(ic, mode, ast_id);
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
+ __ movp(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
CallIC(ic, NOT_CONTEXTUAL, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax); // Drop the key still on the stack.
}
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
context()->DropAndPlug(1, rax);
}
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
- __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
- __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ __ movp(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
+ __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax);
} else if (call_type == Call::GLOBAL_CALL) {
// Call to a global variable. Push global object as receiver for the
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
- __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
+ __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
Handle<Object> uninitialized =
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// Check whether this map has already been checked to be safe for default
// valueOf.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ j(not_zero, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
- __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
__ j(equal, if_false);
// internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, FieldOperand(r8, 0));
+ __ movp(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
__ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
__ testq(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
&if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
- __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker);
- __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
// ArgumentsAccessStub expects the key in rdx and the formal
// parameter count in rax.
VisitForAccumulatorValue(args->at(0));
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &exit, Label::kNear);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
- __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
__ AssertSmi(rax);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
+ __ movp(rax, FieldOperand(rax, Map::kConstructorOffset));
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &non_function_constructor);
// rax now contains the constructor function. Grab the
// instance class name from there.
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
__ jmp(&done);
// Functions have class 'Function'.
// If the object is not a value type, return the object.
__ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
__ j(not_equal, &done);
- __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
+ __ movp(rax, FieldOperand(rax, JSValue::kValueOffset));
__ bind(&done);
context()->Plug(rax);
__ j(not_equal, ¬_date_object);
if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset));
__ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
- __ movq(scratch, stamp_operand);
+ __ movp(scratch, stamp_operand);
__ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
+ __ movp(arg_reg_1, object);
__ Move(arg_reg_2, index, RelocInfo::NONE64);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
}
__ j(not_equal, &done);
// Store the value.
- __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
+ __ movp(FieldOperand(rbx, JSValue::kValueOffset), rax);
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
__ bind(&done);
__ j(not_equal, &runtime);
// InvokeFunction requires the function in rdi. Move it in there.
- __ movq(rdi, result_register());
+ __ movp(rdi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(rdi, count, CALL_FUNCTION, NullCallWrapper());
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
__ bind(&runtime);
Register key = rax;
Register cache = rbx;
Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(cache,
+ __ movp(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movp(cache,
FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ movq(cache,
+ __ movp(cache,
ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache,
+ __ movp(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
Label done, not_found;
// tmp now holds finger offset as a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ __ movp(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
__ cmpq(key, FieldOperand(cache,
index.scale,
FixedArray::kHeaderSize));
__ j(not_equal, ¬_found, Label::kNear);
- __ movq(rax, FieldOperand(cache,
+ __ movp(rax, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize + kPointerSize));
Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
__ j(either_smi, &fail, Label::kNear);
__ j(zero, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
Immediate(JS_REGEXP_TYPE));
__ j(not_equal, &fail, Label::kNear);
__ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
__ j(not_equal, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
+ __ movp(tmp, FieldOperand(left, JSRegExp::kDataOffset));
__ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
__ j(equal, &ok, Label::kNear);
__ bind(&fail);
// Array has fast elements, so its length must be a smi.
// If the array has length zero, return the empty string.
- __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ movp(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ SmiCompare(array_length, Smi::FromInt(0));
__ j(not_zero, &non_trivial_array);
__ LoadRoot(rax, Heap::kempty_stringRootIndex);
// Save the FixedArray containing array's elements.
// End of array's live range.
elements = array;
- __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
+ __ movp(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
- __ movq(string, FieldOperand(elements,
+ __ movp(string, FieldOperand(elements,
index,
times_pointer_size,
FixedArray::kHeaderSize));
__ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
// If array_length is 1, return elements[0], a string.
__ cmpl(array_length, Immediate(1));
__ j(not_equal, ¬_size_one_array);
- __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ movp(rax, FieldOperand(elements, FixedArray::kHeaderSize));
__ jmp(&return_result);
__ bind(¬_size_one_array);
// index: Array length.
// Check that the separator is a sequential ASCII string.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
// elements: FixedArray of strings.
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
- __ movq(result_operand, result_pos);
+ __ movp(result_operand, result_pos);
__ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
// scratch: array length.
// Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
+ __ movp(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
__ bind(&loop_2_entry);
// Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
+ __ movp(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
// Replace separator string with pointer to its first character, and
// make scratch be its length.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ movq(separator_operand, string);
+ __ movp(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
// separator_operand (rsp[0x10]): Address of first char of separator.
// Copy the separator to the result.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ movl(string_length, scratch);
__ CopyBytes(result_pos, string, string_length, 2);
__ bind(&loop_3_entry);
// Get string = array[index].
- __ movq(string, Operand(elements, index, times_pointer_size, 0));
+ __ movp(string, Operand(elements, index, times_pointer_size, 0));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
__ j(not_equal, &loop_3); // Loop while (index < 0).
__ bind(&done);
- __ movq(rax, result_operand);
+ __ movp(rax, result_operand);
__ bind(&return_result);
// Drop temp values from the stack, and restore context register.
__ addq(rsp, Immediate(3 * kPointerSize));
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
if (expr->is_jsruntime()) {
// Prepare for calling JS runtime function.
- __ movq(rax, GlobalObjectOperand());
+ __ movp(rax, GlobalObjectOperand());
__ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
}
Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(arg_count);
CallIC(ic, NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
__ CallRuntime(expr->function(), arg_count);
}
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
+ __ movp(rdx, Operand(rsp, 0)); // Leave receiver on stack
__ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
__ push(rax);
break;
case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
+ __ movp(Operand(rsp, kPointerSize), rax);
break;
case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
}
}
__ push(rax);
break;
case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
+ __ movp(Operand(rsp, kPointerSize), rax);
break;
case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
}
}
// Call stub for +1/-1.
__ bind(&stub_call);
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
CallIC(stub.GetCode(isolate()),
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
__ Move(rcx, proxy->name());
- __ movq(rax, GlobalObjectOperand());
+ __ movp(rax, GlobalObjectOperand());
// Use a regular load, not a contextual load, to avoid a reference
// error.
CallLoadIC(NOT_CONTEXTUAL);
if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(rax, if_true);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->string_string())) {
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => true.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ movq(rcx, rdx);
+ __ movp(rcx, rdx);
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
context()->Plug(rax);
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
- __ movq(Operand(rbp, frame_offset), value);
+ __ movp(Operand(rbp, frame_offset), value);
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, ContextOperand(rsi, context_index));
+ __ movp(dst, ContextOperand(rsi, context_index));
}
__ Drop(*stack_depth); // Down to the handler block.
if (*context_length > 0) {
// Restore the context to its dedicated register and the stack.
- __ movq(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ movp(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
}
__ PopTryHandler();
__ call(finally_entry_);
__ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
- __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
__ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
__ j(below, miss);
(1 << Map::kHasNamedInterceptor)));
__ j(not_zero, miss);
- __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, miss);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(result,
+ __ movp(result,
Operand(elements, r1, times_pointer_size,
kValueOffset - kHeapObjectTag));
}
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
- __ movq(Operand(scratch1, 0), value);
+ __ movp(Operand(scratch1, 0), value);
// Update write barrier. Make sure not to clobber the value.
- __ movq(scratch0, value);
+ __ movp(scratch0, value);
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
//
// scratch - used to hold elements of the receiver and the loaded value.
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
__ j(above_equal, out_of_range);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movq(scratch, FieldOperand(elements,
+ __ movp(scratch, FieldOperand(elements,
index.reg,
index.scale,
FixedArray::kHeaderSize));
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
if (!result.is(scratch)) {
- __ movq(result, scratch);
+ __ movp(result, scratch);
}
}
__ bind(&check_number_dictionary);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx);
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
- __ movq(rdi, rcx);
+ __ movp(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
__ bind(&load_in_object_property);
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
- __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
+ __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
__ bind(&property_array_property);
- __ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
+ __ movp(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rax, FieldOperand(rax, rdi, times_pointer_size,
FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// rax: key
// rbx: elements
- __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kMapOffset));
__ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
__ JumpUnlessNonNegativeSmi(rax, &slow);
// Get the map of the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
// rdx: receiver (a JSArray)
// r9: map of receiver
if (check_map == kCheckMap) {
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, fast_double);
}
// We have to go to the runtime if the current value is the hole because
// there may be a callback on the element
Label holecheck_passed1;
- __ movq(kScratchRegister, FieldOperand(rbx,
+ __ movp(kScratchRegister, FieldOperand(rbx,
rcx,
times_pointer_size,
FixedArray::kHeaderSize));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
// It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ ret(0);
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
+ __ movp(rdx, rax); // Preserve the value which is returned.
__ RecordWriteArray(
rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
__ ret(0);
__ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &non_double_value);
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
rbx,
slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ testb(FieldOperand(r9, Map::kBitFieldOffset),
// rax: value
// rdx: JSObject
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check array bounds.
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow);
// Increment index to get new length.
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_if_double_array);
__ jmp(&fast_object_grow);
// rax: value
// rdx: receiver (a JSArray)
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
Label miss;
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
}
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
// Enter an internal frame.
{
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
}
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global;
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
// Patch the receiver on the stack.
__ bind(&global);
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), rdx);
+ __ movp(args.GetReceiverOperand(), rdx);
__ bind(&invoke);
}
// -----------------------------------
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
GenerateMiss(masm, argc, extra_ic_state);
}
// -----------------------------------
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
Label do_call, slow_call, slow_load;
Label check_number_dictionary, check_name, lookup_monomorphic_cache;
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(rcx); // restore the key
}
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
__ jmp(&do_call);
__ bind(&check_name);
GenerateKeyedLoadReceiverCheck(
masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &lookup_monomorphic_cache);
// Load the elements into scratch1 and check its map. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments.
- __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
__ cmpq(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
__ SmiToInteger64(scratch3, key);
- __ movq(scratch2, FieldOperand(scratch1,
+ __ movp(scratch2, FieldOperand(scratch1,
scratch3,
times_pointer_size,
kHeaderSize));
// Load value from context and return it. We can reuse scratch1 because
// we do not jump to the unmapped lookup (which requires the parameter
// map in scratch1).
- __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
__ SmiToInteger64(scratch3, scratch2);
return FieldOperand(scratch1,
scratch3,
// overwritten.
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
- __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmpq(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
Operand mapped_location =
GenerateMappedArgumentsLookup(
masm, rdx, rax, rbx, rcx, rdi, ¬in, &slow);
- __ movq(rax, mapped_location);
+ __ movp(rax, mapped_location);
__ Ret();
__ bind(¬in);
// The unmapped lookup expects that the parameter map is in rbx.
GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
__ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
__ j(equal, &slow);
- __ movq(rax, unmapped_location);
+ __ movp(rax, unmapped_location);
__ Ret();
__ bind(&slow);
GenerateMiss(masm);
Label slow, notin;
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, ¬in, &slow);
- __ movq(mapped_location, rax);
+ __ movp(mapped_location, rax);
__ lea(r9, mapped_location);
- __ movq(r8, rax);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
// The unmapped lookup expects that the parameter map is in rbx.
Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
- __ movq(unmapped_location, rax);
+ __ movp(unmapped_location, rax);
__ lea(r9, unmapped_location);
- __ movq(r8, rax);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
// -----------------------------------
Label slow, notin;
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rax, r8, ¬in, &slow);
- __ movq(rdi, mapped_location);
+ __ movp(rdi, mapped_location);
GenerateFunctionTailCall(masm, argc, &slow);
__ bind(¬in);
// The unmapped lookup expects that the parameter map is in rbx.
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
__ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
__ j(equal, &slow);
- __ movq(rdi, unmapped_location);
+ __ movp(rdi, unmapped_location);
GenerateFunctionTailCall(masm, argc, &slow);
__ bind(&slow);
GenerateMiss(masm, argc);
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
- __ movq(Operand(rsp, offset), rax);
+ __ movp(Operand(rsp, offset), rax);
}
}
#endif
!info_->is_native()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
- __ movq(rcx, args.GetReceiverOperand());
+ __ movp(rcx, args.GetReceiverOperand());
__ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &ok, Label::kNear);
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rcx);
+ __ movp(args.GetReceiverOperand(), rcx);
__ bind(&ok);
}
__ movq(kScratchRegister, kSlotsZapValue);
Label loop;
__ bind(&loop);
- __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
+ __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both rax and rsi. It replaces the context
// passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
+ __ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
+ __ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
__ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
}
__ jmp(&needs_frame);
} else {
__ bind(&needs_frame);
- __ movq(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
__ push(rbp);
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
__ push(rsi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
__ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ movp(rsi, MemOperand(rsp, kPointerSize));
__ call(kScratchRegister);
}
} else {
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
__ pop(rbp);
}
__ jmp(code->exit());
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
if (!ToRegister(context).is(rsi)) {
- __ movq(rsi, ToRegister(context));
+ __ movp(rsi, ToRegister(context));
}
} else if (context->IsStackSlot()) {
- __ movq(rsi, ToOperand(context));
+ __ movp(rsi, ToOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ movq(kScratchRegister, left);
+ __ movp(kScratchRegister, left);
} else {
__ movl(kScratchRegister, left);
}
Register input = ToRegister(instr->value());
// Load map into |result|.
- __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(input, HeapObject::kMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte.
__ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
__ j(not_equal, &done, Label::kNear);
- __ movq(result, FieldOperand(input, JSValue::kValueOffset));
+ __ movp(result, FieldOperand(input, JSValue::kValueOffset));
__ bind(&done);
}
DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
- __ movq(kScratchRegister, stamp_operand);
+ __ movp(kScratchRegister, stamp_operand);
__ cmpq(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
+ __ movp(arg_reg_1, object);
__ Move(arg_reg_2, index, RelocInfo::NONE64);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
if (FLAG_debug_code) {
__ push(string);
- __ movq(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
ASSERT(!instr->hydrogen_value()->representation().IsSmi());
__ cmpl(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_imm);
+ __ movp(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
__ cmpl(left_reg, right_reg);
}
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_reg);
+ __ movp(left_reg, right_reg);
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
__ cmpl(left_reg, right_op);
}
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_op);
+ __ movp(left_reg, right_op);
}
__ bind(&return_left);
} else {
const Register map = kScratchRegister;
if (expected.NeedsMap()) {
- __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, is_object);
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
if (!instr->hydrogen()->value()->IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
} else {
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
+ __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
// temp now contains the constructor function. Grab the
// instance class name from there.
- __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ movq(temp, FieldOperand(temp,
+ __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is internalized since it's a literal.
// The name in the constructor is internalized because of the way the context
Label cache_miss;
// Use a temp register to avoid memory operands with variable lengths.
Register map = ToRegister(instr->temp());
- __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
__ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Move result to a register that survives the end of the
// PushSafepointRegisterScope.
- __ movq(kScratchRegister, rax);
+ __ movp(kScratchRegister, rax);
}
__ testq(kScratchRegister, kScratchRegister);
Label load_false;
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
__ push(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
__ pop(rbp);
no_frame_start = masm_->pc_offset();
}
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
// Store the value.
- __ movq(Operand(cell, 0), value);
+ __ movp(Operand(cell, 0), value);
} else {
// Store the value.
__ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
- __ movq(Operand(kScratchRegister, 0), value);
+ __ movp(Operand(kScratchRegister, 0), value);
}
// Cells are always rescanned, so no write barrier here.
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ movq(result, ContextOperand(context, instr->slot_index()));
+ __ movp(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
__ j(not_equal, &skip_assignment);
}
}
- __ movq(target, value);
+ __ movp(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
Register result = ToRegister(instr->result());
if (!access.IsInobject()) {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
object = result;
}
__ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
- __ movq(result,
+ __ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
__ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
__ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
__ bind(&non_instance);
- __ movq(result, FieldOperand(result, Map::kConstructorOffset));
+ __ movp(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
__ bind(&done);
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input,
+ __ movp(result, FieldOperand(input,
ExternalPixelArray::kExternalPointerOffset));
}
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
StackArgumentsAccessor args(arguments, const_length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(result, args.GetArgumentOperand(const_index));
+ __ movp(result, args.GetArgumentOperand(const_index));
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
}
StackArgumentsAccessor args(arguments, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(result, args.GetArgumentOperand(0));
+ __ movp(result, args.GetArgumentOperand(0));
}
}
} else {
// Check for arguments adapter frame.
Label done, adapted;
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
- __ movq(result, rbp);
+ __ movp(result, rbp);
__ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiToInteger32(result,
Operand(result,
ArgumentsAdaptorFrameConstants::kLengthOffset));
// Do not transform the receiver to object for strict mode
// functions.
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(kScratchRegister,
SharedFunctionInfo::kStrictModeByteOffset),
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
- __ movq(receiver, FieldOperand(function, JSFunction::kContextOffset));
- __ movq(receiver,
+ __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
+ __ movp(receiver,
Operand(receiver, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(receiver,
+ __ movp(receiver,
FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
}
DeoptimizeIf(above, instr->environment());
__ push(receiver);
- __ movq(receiver, length);
+ __ movp(receiver, length);
// Loop through the arguments pushing them onto the execution
// stack.
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
if (info()->IsOptimizing()) {
- __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in rsi.
ASSERT(result.is(rsi));
void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ movq(result,
+ __ movp(result,
Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ movq(result,
+ __ movp(result,
Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
+ __ movp(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
}
}
// Change context.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
}
// Change context.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
CallRuntimeFromDeferred(
Runtime::kAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) __ movq(tmp, rax);
+ if (!tmp.is(rax)) __ movp(tmp, rax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
Label packed_case;
// We might need a change here
// look at the first argument
- __ movq(rcx, Operand(rsp, 0));
+ __ movp(rcx, Operand(rsp, 0));
__ testq(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
__ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
- __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+ __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
} else {
Register temp = ToRegister(instr->temp());
__ Move(kScratchRegister, transition);
- __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
__ RecordWriteField(object,
HeapObject::kMapOffset,
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
- __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
if (representation.IsSmi() &&
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
__ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ASSERT(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
if (!object_reg.is(rax)) {
- __ movq(rax, object_reg);
+ __ movp(rax, object_reg);
}
__ Move(rbx, to_map);
TransitionElementsKindStub stub(from_kind, to_kind);
__ j(above, deferred->entry());
__ movsxlq(char_code, char_code);
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ movq(result, FieldOperand(result,
+ __ movp(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(result, Heap::kUndefinedValueRootIndex);
// They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- if (!reg.is(rax)) __ movq(reg, rax);
+ if (!reg.is(rax)) __ movp(reg, rax);
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
// They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ movq(kScratchRegister, rax);
+ __ movp(kScratchRegister, rax);
}
- __ movq(reg, kScratchRegister);
+ __ movp(reg, kScratchRegister);
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->value());
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
if (instr->hydrogen()->is_interval_check()) {
InstanceType first;
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ movq(input_reg, Immediate(0));
+ __ movp(input_reg, Immediate(0));
__ jmp(&done, Label::kNear);
// Heap number
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
__ Move(rcx, instr->hydrogen()->literals());
- __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
__ Push(instr->hydrogen()->pattern());
__ Push(instr->hydrogen()->flags());
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ __ movp(rdx, FieldOperand(rbx, i));
+ __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movp(FieldOperand(rax, i), rdx);
+ __ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
}
__ j(equal, true_label, true_distance);
__ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
void LCodeGen::EmitIsConstructCall(Register temp) {
// Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
ASSERT(instr->HasEnvironment());
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ movq(result,
+ __ movp(result,
FieldOperand(result, DescriptorArray::kEnumCacheOffset));
- __ movq(result,
+ __ movp(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
- __ movq(object, FieldOperand(object,
+ __ movp(object, FieldOperand(object,
index,
times_pointer_size,
JSObject::kHeaderSize));
__ jmp(&done, Label::kNear);
__ bind(&out_of_object);
- __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
__ negl(index);
// Index is now equal to out of object property index plus 1.
- __ movq(object, FieldOperand(object,
+ __ movp(object, FieldOperand(object,
index,
times_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
Register src = cgen_->ToRegister(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
}
} else if (source->IsStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movq(kScratchRegister, src);
- __ movq(dst, kScratchRegister);
+ __ movp(kScratchRegister, src);
+ __ movp(dst, kScratchRegister);
}
} else if (source->IsConstantOperand()) {
} else if (cgen_->IsInteger32Constant(constant_source)) {
// Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
// value.
- __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(kScratchRegister, cgen_->ToHandle(constant_source));
- __ movq(dst, kScratchRegister);
+ __ movp(dst, kScratchRegister);
}
}
cgen_->ToRegister(source->IsRegister() ? source : destination);
Operand mem =
cgen_->ToOperand(source->IsRegister() ? destination : source);
- __ movq(kScratchRegister, mem);
- __ movq(mem, reg);
- __ movq(reg, kScratchRegister);
+ __ movp(kScratchRegister, mem);
+ __ movp(mem, reg);
+ __ movp(reg, kScratchRegister);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
__ movsd(xmm0, src);
- __ movq(kScratchRegister, dst);
+ __ movp(kScratchRegister, dst);
__ movsd(dst, xmm0);
- __ movq(src, kScratchRegister);
+ __ movp(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
intptr_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
load_rax(source);
} else {
Move(kScratchRegister, source);
- movq(destination, Operand(kScratchRegister, 0));
+ movp(destination, Operand(kScratchRegister, 0));
}
}
intptr_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
+ movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
}
store_rax(destination);
} else {
Move(kScratchRegister, destination);
- movq(Operand(kScratchRegister, 0), source);
+ movp(Operand(kScratchRegister, 0), source);
}
}
return size;
}
}
- // Size of movq(destination, src);
+ // Size of movp(destination, src);
return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- movq(destination, Operand(kRootRegister,
+ movp(destination, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
Register variable_offset,
int fixed_offset) {
ASSERT(root_array_available_);
- movq(destination,
+ movp(destination,
Operand(kRootRegister,
variable_offset, times_pointer_size,
(fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
+ movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
source);
}
// Load store buffer top.
LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Store pointer to buffer.
- movq(Operand(scratch, 0), addr);
+ movp(Operand(scratch, 0), addr);
// Increment buffer top.
addq(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
Register prev_limit_reg = rbx;
Register base_reg = r15;
Move(base_reg, next_address);
- movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
+ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
if (FLAG_log_timer_events) {
}
// Load the value from ReturnValue
- movq(rax, return_value_operand);
+ movp(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
+ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
Register map = rcx;
JumpIfSmi(return_value, &ok, Label::kNear);
- movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
CmpInstanceType(map, FIRST_NONSTRING_TYPE);
j(below, &ok, Label::kNear);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
- movq(rsi, *context_restore_operand);
+ movp(rsi, *context_restore_operand);
}
LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movq(prev_limit_reg, rax);
+ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
+ movp(prev_limit_reg, rax);
LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
LoadAddress(rax,
ExternalReference::delete_handle_scope_extensions(isolate()));
call(rax);
- movq(rax, prev_limit_reg);
+ movp(rax, prev_limit_reg);
jmp(&leave_exit_frame);
}
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movq(target, FieldOperand(target,
+ movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ movp(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
ASSERT(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(rdi, id);
- movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
- movq(dst, src);
+ movp(dst, src);
}
}
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
- movq(dst, src);
+ movp(dst, src);
}
}
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
- movq(dst, kSmiConstantRegister);
+ movp(dst, kSmiConstantRegister);
break;
case 0:
UNREACHABLE();
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
shr(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
sar(dst, Immediate(kSmiShift));
}
return;
}
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
if (power < kSmiShift) {
sar(dst, Immediate(kSmiShift - power));
if (dst.is(src1) || dst.is(src2)) {
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
or_(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
} else {
- movq(dst, src1);
+ movp(dst, src1);
or_(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
- movq(kScratchRegister, src);
+ movp(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
return zero;
if (first.is(second)) {
return CheckNonNegativeSmi(first);
}
- movq(kScratchRegister, first);
+ movp(kScratchRegister, first);
or_(kScratchRegister, second);
rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
return;
} else if (dst.is(src)) {
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
ASSERT(!dst.is(kScratchRegister));
- movq(dst, src);
+ movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
subq(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
Label::Distance near_jump) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
+ movp(kScratchRegister, src);
neg(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
cmpq(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
- movq(src, kScratchRegister);
+ movp(src, kScratchRegister);
} else {
- movq(dst, src);
+ movp(dst, src);
neg(dst);
cmpq(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
- masm->movq(dst, src1);
+ masm->movp(dst, src1);
masm->addq(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
// overflowing is impossible.
if (!dst.is(src1)) {
if (emit_debug_code()) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
addq(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
- masm->movq(dst, src1);
+ masm->movp(dst, src1);
masm->subq(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (!dst.is(src1)) {
- masm->movq(dst, src1);
+ masm->movp(dst, src1);
}
masm->subq(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
if (dst.is(src1)) {
Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
+ movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
imul(dst, src2);
j(overflow, &failure, Label::kNear);
testq(dst, dst);
j(not_zero, &correct_result, Label::kNear);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
xor_(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&zero_correct_result);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
xor_(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
// We need to rule out dividing Smi::kMinValue by -1, since that would
testq(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
} else {
j(negative, on_not_smi_result, near_jump);
if (src1.is(rax)) {
Label smi_result;
j(zero, &smi_result, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&smi_result);
} else {
j(not_zero, on_not_smi_result, near_jump);
}
if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
Integer32ToSmi(dst, rax);
}
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
SmiToInteger32(src2, src2);
// Retag inputs and go slow case.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
jmp(on_not_smi_result, near_jump);
bind(&safe_div);
// Restore smi tags on inputs.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
// Check for a negative zero result. If the result is zero, and the
// dividend is negative, go slow to return a floating point negative zero.
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
- movq(dst, src1);
+ movp(dst, src1);
}
and_(dst, src2);
}
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
ASSERT(!src1.is(src2));
- movq(dst, src1);
+ movp(dst, src1);
}
or_(dst, src2);
}
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
ASSERT(!src1.is(src2));
- movq(dst, src1);
+ movp(dst, src1);
}
xor_(dst, src2);
}
Register src,
int shift_value) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
if (shift_value > 0) {
shl(dst, Immediate(shift_value));
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
- movq(dst, src);
+ movp(dst, src);
if (shift_value == 0) {
testq(dst, dst);
j(negative, on_not_smi_result, near_jump);
ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
if (src1.is(rcx)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
} else if (src2.is(rcx)) {
- movq(kScratchRegister, src2);
+ movp(kScratchRegister, src2);
}
if (!dst.is(src1)) {
- movq(dst, src1);
+ movp(dst, src1);
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
sar_cl(dst); // Shift 32 + original rcx & 0x1f.
shl(dst, Immediate(kSmiShift));
if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
} else if (src2.is(rcx)) {
- movq(src2, kScratchRegister);
+ movp(src2, kScratchRegister);
}
}
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
subq(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
+ movp(dst, src1);
xor_(dst, src2);
and_(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
- movq(scratch, src);
+ movp(scratch, src);
// High bits.
shr(src, Immediate(64 - kSmiShift));
shl(src, Immediate(kSmiShift));
Register index = scratch;
Register probe = mask;
- movq(probe,
+ movp(probe,
FieldOperand(number_string_cache,
index,
times_1,
// Get the result from the cache.
bind(&load_result_from_cache);
- movq(result,
+ movp(result,
FieldOperand(number_string_cache,
index,
times_1,
j(either_smi, on_fail, near_jump);
// Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
Label* on_fail,
Label::Distance near_jump) {
// Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
+ movp(scratch1, first_object_instance_type);
+ movp(scratch2, second_object_instance_type);
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
}
Move(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
}
}
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
Move(result, cell, RelocInfo::CELL);
- movq(result, Operand(result, 0));
+ movp(result, Operand(result, 0));
} else {
Move(result, object, RelocInfo::EMBEDDED_OBJECT);
}
load_rax(cell.location(), RelocInfo::CELL);
} else {
Move(dst, cell, RelocInfo::CELL);
- movq(dst, Operand(dst, 0));
+ movp(dst, Operand(dst, 0));
}
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
const Immediate& imm) {
- movq(SafepointRegisterSlot(dst), imm);
+ movp(SafepointRegisterSlot(dst), imm);
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- movq(SafepointRegisterSlot(dst), src);
+ movp(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- movq(dst, SafepointRegisterSlot(src));
+ movp(dst, SafepointRegisterSlot(src));
}
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
push(ExternalOperand(handler_address));
// Set this new handler as the current one.
- movq(ExternalOperand(handler_address), rsp);
+ movp(ExternalOperand(handler_address), rsp);
}
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state.
- movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+ movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx,
+ movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- movq(rsp, ExternalOperand(handler_address));
+ movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
pop(ExternalOperand(handler_address));
Label skip;
testq(rsi, rsi);
j(zero, &skip, Label::kNear);
- movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
JumpToHandlerEntry();
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
Label fetch_next, check_kind;
jmp(&check_kind, Label::kNear);
bind(&fetch_next);
- movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
+ movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+ movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ movp(dst, FieldOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ movp(dst, FieldOperand(map, Map::kBitField3Offset));
Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
and_(dst, kScratchRegister);
}
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
Check(below, kOperandIsNotAString);
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
pop(object);
Check(below_equal, kOperandIsNotAName);
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
testb(instance_type, Immediate(kIsNotStringMask));
Condition MacroAssembler::IsObjectNameType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
return below_equal;
j(not_equal, miss);
if (miss_on_bound_function) {
- movq(kScratchRegister,
+ movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
// It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
// field).
j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
- movq(result,
+ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and
j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ movp(result, FieldOperand(result, Map::kPrototypeOffset));
jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- movq(result, FieldOperand(result, Map::kConstructorOffset));
+ movp(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
bind(&done);
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function.is(rdi));
- movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
movsxlq(rbx,
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
InvokeCode(rdx, expected, actual, flag, call_wrapper);
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function.is(rdi));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
InvokeCode(rdx, expected, actual, flag, call_wrapper);
}
Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
+ movp(rdx, code_register);
}
if (flag == CALL_FUNCTION) {
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
push(rbp); // Caller's frame pointer.
- movq(rbp, rsp);
+ movp(rbp, rsp);
push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
} else {
Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
push(rbp); // Caller's frame pointer.
- movq(rbp, rsp);
+ movp(rbp, rsp);
push(rsi); // Callee's context.
push(rdi); // Callee's JS function.
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
- movq(rbp, rsp);
+ movp(rbp, rsp);
push(rsi); // Context.
Push(Smi::FromInt(type));
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
- movq(rsp, rbp);
+ movp(rsp, rbp);
pop(rbp);
}
ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(rbp);
- movq(rbp, rsp);
+ movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
// Save the frame pointer and the context in top.
if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
+ movp(r14, rax); // Backup rax in callee-save register.
}
Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
}
// Patch the saved entry sp.
- movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+ movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
}
}
// Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, kFPOnStackSize));
- movq(rbp, Operand(rbp, 0 * kPointerSize));
+ movp(rcx, Operand(rbp, kFPOnStackSize));
+ movp(rbp, Operand(rbp, 0 * kPointerSize));
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
- movq(rsp, rbp);
+ movp(rsp, rbp);
pop(rbp);
LeaveExitFrameEpilogue(restore_context);
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
if (restore_context) {
- movq(rsi, context_operand);
+ movp(rsi, context_operand);
}
#ifdef DEBUG
- movq(context_operand, Immediate(0));
+ movp(context_operand, Immediate(0));
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
isolate());
Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
+ movp(c_entry_fp_operand, Immediate(0));
}
ASSERT(!holder_reg.is(scratch));
ASSERT(!scratch.is(kScratchRegister));
// Load current lexical context from the stack frame.
- movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+ movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, offset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
if (emit_debug_code()) {
// Preserve original value of holder_reg.
push(holder_reg);
- movq(holder_reg,
+ movp(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
// Read the first word and compare to native_context_map(),
- movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+ movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
pop(holder_reg);
}
- movq(kScratchRegister,
+ movp(kScratchRegister,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, token_offset));
+ movp(scratch, FieldOperand(scratch, token_offset));
cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
- movq(r2, r0);
+ movp(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
LoadAddress(scratch, allocation_top);
- movq(result, Operand(scratch, 0));
+ movp(result, Operand(scratch, 0));
} else {
Load(result, allocation_top);
}
// Update new top.
if (scratch.is_valid()) {
// Scratch already contains address of allocation top.
- movq(Operand(scratch, 0), result_end);
+ movp(Operand(scratch, 0), result_end);
} else {
Store(allocation_top, result_end);
}
Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
- movq(top_reg, result);
+ movp(top_reg, result);
}
addq(top_reg, Immediate(object_size));
j(carry, gc_required);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) {
- movq(result_end, object_size);
+ movp(result_end, object_size);
}
addq(result_end, result);
j(carry, gc_required);
cmpq(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
- movq(top_operand, object);
+ movp(top_operand, object);
}
// Set the map.
LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
- movq(scratch, length);
+ movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
repmovsq();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
- movq(length, Operand(source, scratch, times_1, -kPointerSize));
- movq(Operand(destination, scratch, times_1, -kPointerSize), length);
+ movp(length, Operand(source, scratch, times_1, -kPointerSize));
+ movp(Operand(destination, scratch, times_1, -kPointerSize), length);
addq(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
bind(&len24);
- movq(scratch, Operand(source, 2 * kPointerSize));
- movq(Operand(destination, 2 * kPointerSize), scratch);
+ movp(scratch, Operand(source, 2 * kPointerSize));
+ movp(Operand(destination, 2 * kPointerSize), scratch);
bind(&len16);
- movq(scratch, Operand(source, kPointerSize));
- movq(Operand(destination, kPointerSize), scratch);
+ movp(scratch, Operand(source, kPointerSize));
+ movp(Operand(destination, kPointerSize), scratch);
bind(&len8);
- movq(scratch, Operand(source, 0));
- movq(Operand(destination, 0), scratch);
+ movp(scratch, Operand(source, 0));
+ movp(Operand(destination, 0), scratch);
// Move remaining bytes of length.
- movq(scratch, Operand(source, length, times_1, -kPointerSize));
- movq(Operand(destination, length, times_1, -kPointerSize), scratch);
+ movp(scratch, Operand(source, length, times_1, -kPointerSize));
+ movp(Operand(destination, length, times_1, -kPointerSize), scratch);
addq(destination, length);
jmp(&done, Label::kNear);
Label loop, entry;
jmp(&entry);
bind(&loop);
- movq(Operand(start_offset, 0), filler);
+ movp(Operand(start_offset, 0), filler);
addq(start_offset, Immediate(kPointerSize));
bind(&entry);
cmpq(start_offset, end_offset);
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
// cannot be allowed to destroy the context in rsi).
- movq(dst, rsi);
+ movp(dst, rsi);
}
// We should not have found a with context by walking the context
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- movq(scratch,
+ movp(scratch,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- movq(scratch, Operand(scratch,
+ movp(scratch, Operand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
int offset = expected_kind * kPointerSize +
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- movq(map_in_out, FieldOperand(scratch, offset));
+ movp(map_in_out, FieldOperand(scratch, offset));
}
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
- movq(map_out, FieldOperand(function_in,
+ movp(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- movq(function,
+ movp(function,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
- movq(function, Operand(function, Context::SlotOffset(index)));
+ movp(function, Operand(function, Context::SlotOffset(index)));
}
void MacroAssembler::LoadArrayFunction(Register function) {
- movq(function,
+ movp(function,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movq(function,
+ movp(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ movp(function,
Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
}
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
- movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
bind(&is_object);
push(value);
- movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ movp(value, FieldOperand(string, HeapObject::kMapOffset));
movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
ASSERT(num_arguments >= 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
- movq(kScratchRegister, rsp);
+ movp(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
and_(rsp, Immediate(-frame_alignment));
- movq(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
+ movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
ASSERT(num_arguments >= 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- movq(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
+ movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
}
if (scratch.is(object)) {
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
- movq(scratch, Immediate(~Page::kPageAlignmentMask));
+ movp(scratch, Immediate(~Page::kPageAlignmentMask));
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Move(scratch, map);
- movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
SmiToInteger32(scratch, scratch);
and_(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
// and a 0 at all other positions, including the position of the second bit.
- movq(rcx, mask_scratch);
+ movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
Label* not_data_object,
Label::Distance not_data_object_distance) {
Label is_data_object;
- movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
j(equal, &is_data_object, Label::kNear);
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
Register bitmap_reg,
Register mask_reg) {
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movq(bitmap_reg, addr_reg);
+ movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movq(rcx, addr_reg);
+ movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
~(Bitmap::kBytesPerCell - 1)));
addq(bitmap_reg, rcx);
- movq(rcx, addr_reg);
+ movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
Label is_data_object;
// Check for heap-number
- movq(map, FieldOperand(value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
j(not_equal, ¬_heap_number, Label::kNear);
- movq(length, Immediate(HeapNumber::kSize));
+ movp(length, Immediate(HeapNumber::kSize));
jmp(&is_data_object, Label::kNear);
bind(¬_heap_number);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
testb(instance_type, Immediate(kExternalStringTag));
j(zero, ¬_external, Label::kNear);
- movq(length, Immediate(ExternalString::kSize));
+ movp(length, Immediate(ExternalString::kSize));
jmp(&is_data_object, Label::kNear);
bind(¬_external);
Label next, start;
Register empty_fixed_array_value = r8;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- movq(rcx, rax);
+ movp(rcx, rax);
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
EnumLength(rdx, rbx);
Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
bind(&next);
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
EnumLength(rdx, rbx);
FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
- movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
cmpq(rcx, null_value);
j(not_equal, &next);
}
Register current = scratch0;
Label loop_again;
- movq(current, object);
+ movp(current, object);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
- movq(current, FieldOperand(current, HeapObject::kMapOffset));
- movq(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ movp(current, FieldOperand(current, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
and_(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift));
cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
- movq(current, FieldOperand(current, Map::kPrototypeOffset));
+ movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
j(not_equal, &loop_again);
}
void Move(const Operand& dst, Smi* source) {
Register constant = GetSmiConstant(source);
- movq(dst, constant);
+ movp(dst, constant);
}
void Push(Smi* smi);
__ j(below, &loop);
// Compute new value of character position after the matched part.
- __ movq(rdi, r11);
+ __ movp(rdi, r11);
__ subq(rdi, rsi);
} else {
ASSERT(mode_ == UC16);
// Set byte_offset2.
__ lea(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
- __ movq(r8, rbx);
+ __ movp(r8, rbx);
// Isolate.
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
// Compute and set byte_offset1 (start of capture).
__ lea(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ movq(rsi, rax);
+ __ movp(rsi, rax);
// Set byte_length.
- __ movq(rdx, rbx);
+ __ movp(rdx, rbx);
// Isolate.
__ LoadAddress(rcx, ExternalReference::isolate_address(isolate()));
#endif
// Success.
// Set current character position to position after match.
- __ movq(rdi, rbx);
+ __ movp(rdi, rbx);
__ subq(rdi, rsi);
__ bind(&fallthrough);
__ Move(rax, table);
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ movq(rbx, current_character());
+ __ movp(rbx, current_character());
__ and_(rbx, Immediate(kTableMask));
index = rbx;
}
// Actually emit code to start a new stack frame.
__ push(rbp);
- __ movq(rbp, rsp);
+ __ movp(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
#ifdef _WIN64
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
- __ movq(rcx, rsp);
+ __ movp(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
// Allocate space on stack for registers.
__ subq(rsp, Immediate(num_registers_ * kPointerSize));
// Load string length.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
- __ movq(rdi, Operand(rbp, kInputStart));
+ __ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
__ subq(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
- __ movq(rbx, Operand(rbp, kStartIndex));
+ __ movp(rbx, Operand(rbp, kStartIndex));
__ neg(rbx);
if (mode_ == UC16) {
__ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ movq(Operand(rbp, kInputStartMinusOne), rax);
+ __ movp(Operand(rbp, kInputStartMinusOne), rax);
#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
- __ movq(register_location(i), rax); // One write every page.
+ __ movp(register_location(i), rax); // One write every page.
}
#endif // V8_OS_WIN
__ Set(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
+ __ movp(Operand(rbp, rcx, times_1, 0), rax);
__ subq(rcx, Immediate(kPointerSize));
__ cmpq(rcx,
Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(register_location(i), rax);
+ __ movp(register_location(i), rax);
}
}
}
// Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ movp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
__ jmp(&start_label_);
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ movq(rdx, Operand(rbp, kStartIndex));
- __ movq(rbx, Operand(rbp, kRegisterOutput));
- __ movq(rcx, Operand(rbp, kInputEnd));
+ __ movp(rdx, Operand(rbp, kStartIndex));
+ __ movp(rbx, Operand(rbp, kRegisterOutput));
+ __ movp(rcx, Operand(rbp, kInputEnd));
__ subq(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
__ lea(rcx, Operand(rcx, rdx, times_2, 0));
__ movq(rax, register_location(i));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in rdx for the zero-length check later.
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
}
__ addq(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ cmpq(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
- __ movq(Operand(rbp, kNumOutputRegisters), rcx);
+ __ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
__ addq(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kInputStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
__ jmp(&load_char_start_regexp);
} else {
- __ movq(rax, Immediate(SUCCESS));
+ __ movp(rax, Immediate(SUCCESS));
}
}
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
- __ movq(rax, Operand(rbp, kSuccessfulCaptures));
+ __ movp(rax, Operand(rbp, kSuccessfulCaptures));
}
__ bind(&return_rax);
// Stack now at rbp.
#else
// Restore callee save register.
- __ movq(rbx, Operand(rbp, kBackup_rbx));
+ __ movp(rbx, Operand(rbp, kBackup_rbx));
// Skip rsp to rbp.
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
#endif
// Exit function frame, restore previous one.
__ pop(rbp);
__ pop(rdi);
__ pop(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movp(rsi, Operand(rbp, kInputEnd));
SafeReturn();
}
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
+ __ movp(rdi, backtrack_stackpointer()); // First argument.
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
__ testq(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
- __ movq(backtrack_stackpointer(), rax);
+ __ movp(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
void RegExpMacroAssemblerX64::PopRegister(int register_index) {
Pop(rax);
- __ movq(register_location(register_index), rax);
+ __ movp(register_location(register_index), rax);
}
void RegExpMacroAssemblerX64::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
- __ movq(rax, register_location(register_index));
+ __ movp(rax, register_location(register_index));
Push(rax);
if (check_stack_limit) CheckStackLimit();
}
void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ movq(register_location(register_index), Immediate(to));
+ __ movp(register_location(register_index), Immediate(to));
}
void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
if (cp_offset == 0) {
- __ movq(register_location(reg), rdi);
+ __ movp(register_location(reg), rdi);
} else {
__ lea(rax, Operand(rdi, cp_offset * char_size()));
- __ movq(register_location(reg), rax);
+ __ movp(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
ASSERT(reg_from <= reg_to);
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kInputStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
- __ movq(register_location(reg), rax);
+ __ movp(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
+ __ movp(rax, backtrack_stackpointer());
__ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
+ __ movp(register_location(reg), rax);
}
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
- __ movq(rdx, code_object_pointer());
+ __ movp(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
- __ movq(r8, rbp);
+ __ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
__ lea(rcx, Operand(rsp, -kPointerSize));
#else
// Third argument: RegExp code frame pointer.
- __ movq(rdx, rbp);
+ __ movp(rdx, rbp);
// Second argument: Code* of self.
- __ movq(rsi, code_object_pointer());
+ __ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
__ lea(rdi, Operand(rsp, -kPointerSize));
// Get the map entry from the cache.
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
__ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
__ LoadAddress(kScratchRegister, value_offset);
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, 0));
// Check that the flags match what we're looking for.
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
- __ movq(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Load properties array.
Register properties = scratch0;
- __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ movq(prototype,
+ __ movp(prototype,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- __ movq(prototype,
+ __ movp(prototype,
FieldOperand(prototype, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+ __ movp(prototype, Operand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
+ __ movp(prototype,
FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
__ j(not_equal, miss_label);
// Load length directly from the JS array.
- __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
+ __ movp(rax, FieldOperand(receiver, JSArray::kLengthOffset));
__ ret(0);
}
__ JumpIfSmi(receiver, smi);
// Check that the object is a string.
- __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
__ testl(scratch, Immediate(kNotStringTag));
GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length directly from the string.
- __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
+ __ movp(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
// Check if the object is a JSValue wrapper.
// Check if the wrapped value is a string and load the length
// directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ __ movp(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ movp(rax, FieldOperand(scratch2, String::kLengthOffset));
__ ret(0);
}
Register scratch,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movq(rax, result);
+ if (!result.is(rax)) __ movp(rax, result);
__ ret(0);
}
if (!inobject) {
// Calculate the offset into the properties array.
offset = offset + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ movp(dst, FieldOperand(src, JSObject::kPropertiesOffset));
src = dst;
}
- __ movq(dst, FieldOperand(src, offset));
+ __ movp(dst, FieldOperand(src, offset));
}
StackArgumentsAccessor args(rsp, kFastApiCallArguments,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
for (int i = 0; i < kFastApiCallArguments; i++) {
- __ movq(args.GetArgumentOperand(i), scratch);
+ __ movp(args.GetArgumentOperand(i), scratch);
}
}
// Save calling context.
int offset = argc + kFastApiCallArguments;
- __ movq(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi);
+ __ movp(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi);
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ Move(rdi, function);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Construct the FunctionCallbackInfo on the stack.
- __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
+ __ movp(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
- __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx);
+ __ movp(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
+ __ movp(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx);
} else {
__ Move(args.GetArgumentOperand(offset - FCA::kDataIndex), call_data);
}
__ Move(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
- __ movq(args.GetArgumentOperand(offset - FCA::kIsolateIndex),
+ __ movp(args.GetArgumentOperand(offset - FCA::kIsolateIndex),
kScratchRegister);
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex),
+ __ movp(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex),
kScratchRegister);
- __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueOffset),
+ __ movp(args.GetArgumentOperand(offset - FCA::kReturnValueOffset),
kScratchRegister);
// Prepare arguments.
bool call_data_undefined = false;
if (isolate->heap()->InNewSpace(*call_data)) {
__ Move(scratch2, api_call_info);
- __ movq(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
+ __ movp(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
} else if (call_data->IsUndefined()) {
call_data_undefined = true;
__ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
ASSERT(!scratch1.is(rax));
// store receiver address for GenerateFastApiCallBody
- __ movq(rax, rsp);
+ __ movp(rax, rsp);
__ PushReturnAddressFrom(scratch1);
GenerateFastApiCallBody(masm, optimization, argc, true);
__ PrepareCallApiFunction(kApiStackSpace);
- __ movq(StackSpaceOperand(0), rax); // FunctionCallbackInfo::implicit_args_.
+ __ movp(StackSpaceOperand(0), rax); // FunctionCallbackInfo::implicit_args_.
__ addq(rax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
- __ movq(StackSpaceOperand(1), rax); // FunctionCallbackInfo::values_.
+ __ movp(StackSpaceOperand(1), rax); // FunctionCallbackInfo::values_.
__ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
// FunctionCallbackInfo::is_construct_call_.
__ Set(StackSpaceOperand(3), 0);
// Update the map of the object.
__ Move(scratch1, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+ __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
if (FLAG_track_double_fields && representation.IsDouble()) {
- __ movq(FieldOperand(receiver_reg, offset), storage_reg);
+ __ movp(FieldOperand(receiver_reg, offset), storage_reg);
} else {
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ __ movp(FieldOperand(receiver_reg, offset), value_reg);
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(storage_reg, value_reg);
+ __ movp(storage_reg, value_reg);
}
__ RecordWriteField(
receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
if (FLAG_track_double_fields && representation.IsDouble()) {
- __ movq(FieldOperand(scratch1, offset), storage_reg);
+ __ movp(FieldOperand(scratch1, offset), storage_reg);
} else {
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ __ movp(FieldOperand(scratch1, offset), value_reg);
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(storage_reg, value_reg);
+ __ movp(storage_reg, value_reg);
}
__ RecordWriteField(
scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(scratch1, FieldOperand(receiver_reg, offset));
+ __ movp(scratch1, FieldOperand(receiver_reg, offset));
} else {
- __ movq(scratch1,
+ __ movp(scratch1,
FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ movq(scratch1, FieldOperand(scratch1, offset));
+ __ movp(scratch1, FieldOperand(scratch1, offset));
}
// Store the value into the storage.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ __ movp(FieldOperand(receiver_reg, offset), value_reg);
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ __ movp(name_reg, value_reg);
__ RecordWriteField(
receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movp(FieldOperand(scratch1, offset), value_reg);
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ __ movp(name_reg, value_reg);
__ RecordWriteField(
scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
FunctionCallbackArguments::kHolderIndex;
if (save_at_depth == depth) {
- __ movq(args.GetArgumentOperand(kHolderIndex), object_reg);
+ __ movp(args.GetArgumentOperand(kHolderIndex), object_reg);
}
Handle<JSObject> current = Handle<JSObject>::null();
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
if (in_new_space) {
// Save the map in scratch1 for later.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
if (in_new_space) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ Move(reg, prototype);
}
if (save_at_depth == depth) {
- __ movq(args.GetArgumentOperand(kHolderIndex), reg);
+ __ movp(args.GetArgumentOperand(kHolderIndex), reg);
}
// Go to the next object in the prototype chain.
// Load the properties dictionary.
Register dictionary = scratch4();
- __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+ __ movp(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
// Probe the dictionary.
Label probe_done;
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(scratch2(),
+ __ movp(scratch2(),
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
__ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
Handle<JSObject> holder,
PropertyIndex field,
Representation representation) {
- if (!reg.is(receiver())) __ movq(receiver(), reg);
+ if (!reg.is(receiver())) __ movp(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
field.translate(holder),
#endif
ASSERT(!name_arg.is(scratch4()));
- __ movq(name_arg, rsp);
+ __ movp(name_arg, rsp);
__ PushReturnAddressFrom(scratch4());
// v8::Arguments::values_ and handler for name.
__ lea(rax, Operand(name_arg, 1 * kPointerSize));
// v8::PropertyAccessorInfo::args_.
- __ movq(StackSpaceOperand(0), rax);
+ __ movp(StackSpaceOperand(0), rax);
// The context register (rsi) has been saved in PrepareCallApiFunction and
// could be used to pass arguments.
Label* miss) {
// Get the value from the cell.
__ Move(rdi, cell);
- __ movq(rdi, FieldOperand(rdi, Cell::kValueOffset));
+ __ movp(rdi, FieldOperand(rdi, Cell::kValueOffset));
// Check that the cell contains the same function.
if (heap()->InNewSpace(*function)) {
StackArgumentsAccessor args(rsp, argc);
if (argc == 0) {
// Noop, return the length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ movp(rax, FieldOperand(rdx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
} else {
Label call_builtin;
Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+ __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
- __ movq(rcx, args.GetArgumentOperand(1));
+ __ movp(rcx, args.GetArgumentOperand(1));
__ JumpIfNotSmi(rcx, &with_write_barrier);
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Store the value.
- __ movq(FieldOperand(rdi,
+ __ movp(FieldOperand(rdi,
rax,
times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize),
__ cmpl(rax, rcx);
__ j(greater, &call_builtin);
- __ movq(rcx, args.GetArgumentOperand(1));
+ __ movp(rcx, args.GetArgumentOperand(1));
__ StoreNumberToDoubleElements(
rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
__ bind(&with_write_barrier);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
DONT_TRACK_ALLOCATION_SITE,
NULL);
// Restore edi.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+ __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ jmp(&fast_object);
__ bind(&try_holey_map);
GenerateMapChangeElementsTransition(masm(),
DONT_TRACK_ALLOCATION_SITE,
NULL);
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
+ __ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(rbx, &call_builtin);
__ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
+ __ movp(Operand(rdx, 0), rcx);
__ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ jmp(&call_builtin);
}
- __ movq(rbx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(1));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(rbx, &no_fast_elements_check);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check);
__ Store(new_space_allocation_top, rcx);
// Push the argument...
- __ movq(Operand(rdx, 0), rbx);
+ __ movp(Operand(rdx, 0), rbx);
// ... and fill the rest with holes.
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
- __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
+ __ movp(Operand(rdx, i * kPointerSize), kScratchRegister);
}
// We know the elements array is in new space so we don't need the
__ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
// Make new length a smi before returning it.
__ Integer32ToSmi(rax, rax);
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ __ movp(FieldOperand(rdx, JSArray::kLengthOffset), rax);
__ ret((argc + 1) * kPointerSize);
}
HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
// Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
// Get the last element.
__ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
- __ movq(rax, FieldOperand(rbx,
+ __ movp(rax, FieldOperand(rbx,
rcx, times_pointer_size,
FixedArray::kHeaderSize));
// Check if element is already the hole.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
// Fill with the hole and return original value.
- __ movq(FieldOperand(rbx,
+ __ movp(FieldOperand(rbx,
rcx, times_pointer_size,
FixedArray::kHeaderSize),
r9);
const int argc = arguments().immediate();
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss_before_stack_reserved);
if (object->IsGlobalObject()) {
StackArgumentsAccessor args(rsp, arguments());
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), rdx);
+ __ movp(args.GetReceiverOperand(), rdx);
}
}
Register reg = rdx;
StackArgumentsAccessor args(rsp, arguments());
- __ movq(reg, args.GetReceiverOperand());
+ __ movp(reg, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
// Check that the function really is a function.
GenerateFunctionCheck(function, rbx, miss);
- if (!function.is(rdi)) __ movq(rdi, function);
+ if (!function.is(rdi)) __ movp(rdi, function);
PatchImplicitReceiver(object);
// Invoke the function.
// Get the receiver from the stack.
StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
CallInterceptorCompiler compiler(this, arguments(), rcx);
compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
&miss);
// Restore receiver.
- __ movq(rdx, args.GetReceiverOperand());
+ __ movp(rdx, args.GetReceiverOperand());
GenerateJumpFunction(object, rax, &miss);
__ pop(rax);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
}
Label miss;
__ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ movq(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int i = 0; i < receiver_count; ++i) {
// Check map and tail call if there's a match
}
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
}
// Get the value from the cell.
__ Move(rbx, cell);
- __ movq(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
+ __ movp(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
__ ret(0);
// Return the generated code.
__ JumpIfSmi(receiver(), smi_target);
Register map_reg = scratch1();
- __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
__ JumpIfNotSmi(rax, &miss);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver