__ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
__ Ldr(x10, MemOperand(x11));
- // TODO(all): Pushing the marker twice seems unnecessary.
- // In this case perhaps we could push xzr in the slot for the context
- // (see MAsm::EnterFrame).
- __ Push(x13, x12, x12, x10);
+ __ Push(x13, xzr, x12, x10);
// Set up fp.
__ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- // TODO(jbramley): Do this in the Assembler.
__ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
}
__ Bind(&update_instance_type);
__ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
__ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- // TODO(all): This generates "b #+0x4". Can these be optimised out?
- __ B(&underlying_unpacked);
+ // Now control must go to &underlying_unpacked. Since the no code is generated
+ // before then we fall through instead of generating a useless branch.
__ Bind(&seq_or_external_string);
// Sequential or external string. Registers unpacked_string and input_string
// alias, so there's nothing to do here.
+ // Note that if code is added here, the above code must be updated.
// x0 result_string pointer to result string object (uninit)
// x1 result_length length of substring result
input_string, from, result_length, x0,
&runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm);
- // TODO(jbramley): Why doesn't this jump to return_x0?
__ Drop(3);
__ Ret();
generator.SkipSlow(masm, &runtime);
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
} else {
- // TODO(all): ARM has a redundant cmp here.
__ B(gt, &call_builtin);
__ Peek(value, (argc - 1) * kPointerSize);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
- // TODO(all): Optimize this with stp and suchlike.
ASSERT(!AreAliased(x0, api_function_address));
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
ASSERT(!csp.Is(sp_));
// TODO(jbramley): Several callers rely on this not using scratch registers,
// so we use the assembler directly here. However, this means that large
- // immediate values of 'space' cannot be handled cleanly. Once we implement
- // our flexible scratch register idea, we could greatly simplify this
- // function.
+ // immediate values of 'space' cannot be handled cleanly. (Only 24-bits
+ // immediates or values of 'space' that can be encoded in one instruction are
+ // accepted.) Once we implement our flexible scratch register idea, we could
+ // greatly simplify this function.
InstructionAccurateScope scope(this);
if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
// The subtract instruction supports a 12-bit immediate, shifted left by
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3) {
ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
- ASSERT(src0.IsValid());
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
}
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3,
+ const CPURegister& src4, const CPURegister& src5,
+ const CPURegister& src6, const CPURegister& src7) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
+
+ int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
+ int size = src0.SizeInBytes();
+
+ PrepareForPush(count, size);
+ PushHelper(4, size, src0, src1, src2, src3);
+ PushHelper(count - 4, size, src4, src5, src6, src7);
+}
+
+
void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3) {
// It is not valid to pop into the same register more than once in one
Register length,
Register scratch,
CopyHint hint) {
- ASSERT(!AreAliased(src, dst, length, scratch));
+ UseScratchRegisterScope temps(this);
+ Register tmp1 = temps.AcquireX();
+ Register tmp2 = temps.AcquireX();
+ ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
+ ASSERT(!AreAliased(src, dst, csp));
- // TODO(all): Implement a faster copy function, and use hint to determine
- // which algorithm to use for copies.
if (emit_debug_code()) {
// Check copy length.
Cmp(length, 0);
Assert(le, kCopyBuffersOverlap);
}
- Label loop, done;
- Cbz(length, &done);
+ Label short_copy, short_loop, bulk_loop, done;
- Bind(&loop);
+ if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
+ Register bulk_length = scratch;
+ int pair_size = 2 * kXRegSize;
+ int pair_mask = pair_size - 1;
+
+ Bic(bulk_length, length, pair_mask);
+ Cbz(bulk_length, &short_copy);
+ Bind(&bulk_loop);
+ Sub(bulk_length, bulk_length, pair_size);
+ Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
+ Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
+ Cbnz(bulk_length, &bulk_loop);
+
+ And(length, length, pair_mask);
+ }
+
+ Bind(&short_copy);
+ Cbz(length, &done);
+ Bind(&short_loop);
Sub(length, length, 1);
- Ldrb(scratch, MemOperand(src, 1, PostIndex));
- Strb(scratch, MemOperand(dst, 1, PostIndex));
- Cbnz(length, &loop);
+ Ldrb(tmp1, MemOperand(src, 1, PostIndex));
+ Strb(tmp1, MemOperand(dst, 1, PostIndex));
+ Cbnz(length, &short_loop);
+
+
Bind(&done);
}
}
-// TODO(jbramley): Check that we're handling the frame pointer correctly.
void MacroAssembler::EnterExitFrame(bool save_doubles,
const Register& scratch,
int extra_space) {
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
- // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
// jssp[8]: Extra space reserved for caller (if extra_space != 0).
// jssp -> jssp[0]: Space reserved for the return address.
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
- // csp[...]: Saved doubles, if saved_doubles is true.
+ // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
// csp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
// csp -> csp[0]: Space reserved for the return address.
// the system stack pointer, these methods do not modify any other registers.
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3,
+ const CPURegister& src4, const CPURegister& src5 = NoReg,
+ const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
- //
- // TODO(jbramley): Currently, this method can only accept values of 'space'
- // that can be encoded in one instruction. Refer to the implementation for
- // details.
inline void BumpSystemStackPointer(const Operand& space);
// Helpers ------------------------------------------------------------------
// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
-// TODO(all): Insert a marker in the extra space allocated on the stack.
uintptr_t Simulator::PushAddress(uintptr_t address) {
ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
intptr_t new_sp = sp() - 2 * kXRegSize;
+ uintptr_t* alignment_slot =
+ reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
+ memcpy(alignment_slot, &kSlotsZapValue, kPointerSize);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
- *stack_slot = address;
+ memcpy(stack_slot, &address, kPointerSize);
set_sp(new_sp);
return new_sp;
}
}
-void Simulator::CheckStackAlignment() {
- // TODO(aleram): The sp alignment check to perform depends on the processor
- // state. Check the specifications for more details.
-}
-
-
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
void Simulator::VisitDataProcessing2Source(Instruction* instr) {
- // TODO(mcapewel) move these to a higher level file, as they are global
- // assumptions.
- ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
- ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
-
Shift shift_op = NO_SHIFT;
int64_t result = 0;
switch (instr->Mask(DataProcessing2SourceMask)) {
} else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
if (argc == 2) {
if (strcmp(arg1, "all") == 0) {
- // TODO(all): better support for printing in the debugger.
PrintRegisters(true);
PrintFPRegisters(true);
} else {
Label miss;
Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-
- // TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so
- // we can't use it below, but that isn't very obvious. Is there a better way
- // of handling this?
+ // HandlerFrontendHeader can return its result into scratch1() so do not
+ // use it.
+ Register scratch2 = this->scratch2();
+ Register scratch3 = this->scratch3();
+ Register dictionary = this->scratch4();
+ ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4()));
-
// Load the properties dictionary.
- Register dictionary = scratch4();
__ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
// Probe the dictionary.
&probe_done,
dictionary,
this->name(),
- scratch2(),
- scratch3());
+ scratch2,
+ scratch3);
__ Bind(&probe_done);
// If probing finds an entry in the dictionary, scratch3 contains the
// pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3();
+ Register pointer = scratch3;
const int kElementsStartOffset = NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
- __ Cmp(scratch2(), Operand(callback));
+ __ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ Cmp(scratch2, Operand(callback));
__ B(ne, &miss);
}
void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
+ UseScratchRegisterScope temps(masm());
// Check that the object is a boolean.
- // TODO(all): Optimize this like LCodeGen::DoDeferredTaggedToI.
- __ JumpIfRoot(object, Heap::kTrueValueRootIndex, &success);
- __ JumpIfNotRoot(object, Heap::kFalseValueRootIndex, miss);
- __ Bind(&success);
+ Register true_root = temps.AcquireX();
+ Register false_root = temps.AcquireX();
+ ASSERT(!AreAliased(object, true_root, false_root));
+ __ LoadTrueFalseRoots(true_root, false_root);
+ __ Cmp(object, true_root);
+ __ Ccmp(object, false_root, ZFlag, ne);
+ __ B(ne, miss);
}
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- // TODO(jbramley): Make Push take more than four arguments and combine these
- // two calls.
- __ Push(receiver(), holder_reg);
+ // receiver() and holder_reg can alias.
+ ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
+ ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
__ Mov(scratch1(), Operand(callback));
__ Mov(scratch2(), Operand(name));
- __ Push(scratch1(), scratch2(), value());
+ __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
namespace v8 {
namespace internal {
+// These are global assumptions in v8.
+STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
// Floating point representation.
static inline uint32_t float_to_rawbits(float value) {
uint32_t bits = 0;