// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
// TODO(titzer): should we just use a null handle here instead?
}
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsx_b(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzx_b(dst, src);
+ } else if (r.IsInteger16()) {
+ movsx_w(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzx_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ mov_b(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ mov_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
+ StoreBufferOverflowStub(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
Register result_reg) {
Label done;
Label conv_failure;
- pxor(scratch_reg, scratch_reg);
+ xorps(scratch_reg, scratch_reg);
cvtsd2si(result_reg, input_reg);
test(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmp(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmp(result_reg, Immediate(0x1));
+ j(overflow, &conv_failure, Label::kNear);
mov(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
sub(result_reg, Immediate(1));
and_(result_reg, Immediate(255));
jmp(&done, Label::kNear);
bind(&conv_failure);
- Set(result_reg, Immediate(0));
+ Move(result_reg, Immediate(0));
ucomisd(input_reg, scratch_reg);
j(below, &done, Label::kNear);
- Set(result_reg, Immediate(255));
+ Move(result_reg, Immediate(255));
bind(&done);
}
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
int offset) {
- DoubleToIStub stub(input_reg, result_reg, offset, true);
- call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
}
XMMRegister input_reg) {
Label done;
cvttsd2si(result_reg, Operand(input_reg));
- cmp(result_reg, 0x80000000u);
- j(not_equal, &done, Label::kNear);
+ cmp(result_reg, 0x1);
+ j(no_overflow, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
movsd(MemOperand(esp, 0), input_reg);
CpuFeatureScope scope(this, SSE2);
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cmp(result_reg, 0x80000000u);
- j(not_equal, &done, Label::kNear);
+ cmp(result_reg, 0x1);
+ j(no_overflow, &done, Label::kNear);
// Check if the input was 0x8000000 (kMinInt).
// If no, then we got an overflow and we deoptimize.
ExternalReference min_int = ExternalReference::address_of_min_int();
isolate()->factory()->heap_number_map());
j(not_equal, lost_precision, Label::kNear);
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
ASSERT(!temp.is(no_xmm_reg));
CpuFeatureScope scope(this, SSE2);
return;
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// A single check of the map's pages interesting flag suffices, since it is
// only set during incremental collection, and then it's also guaranteed that
// the from object's page's interesting flag is also set. This optimization
// them.
lea(address, FieldOperand(object, HeapObject::kMapOffset));
mov(value, Immediate(map));
- RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
+ RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
+ save_fp);
CallStub(&stub);
bind(&done);
bind(&ok);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
&done,
Label::kNear);
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
bind(&done);
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- Set(eax, Immediate(0));
+ Move(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+ CEntryStub ces(isolate(), 1);
+ call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif
void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
}
-void MacroAssembler::Set(Register dst, const Immediate& x) {
- if (x.is_zero()) {
- xor_(dst, dst); // Shorter than mov.
- } else {
- mov(dst, x);
- }
-}
-
-
-void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
-
-
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
if (!RelocInfo::IsNone(x.rmode_)) return false;
}
-void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
+void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- Set(dst, Immediate(x.x_ ^ jit_cookie()));
+ Move(dst, Immediate(x.x_ ^ jit_cookie()));
xor_(dst, jit_cookie());
} else {
- Set(dst, x);
+ Move(dst, x);
}
}
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ cmp(FieldOperand(object, 0),
+ Immediate(isolate()->factory()->allocation_site_map()));
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- ExternalReference context_address(Isolate::kContextAddress,
- isolate());
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
}
// Optionally save all XMM registers.
if (save_doubles) {
CpuFeatureScope scope(this, SSE2);
- int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
+ int space = XMMRegister::kNumRegisters * kSIMD128Size +
+ argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movups(Operand(ebp, offset - ((i + 1) * kSIMD128Size)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movups(reg, Operand(ebp, offset - ((i + 1) * kSIMD128Size)));
}
}
}
-// Compute the hash code from the untagged key. This must be kept in sync
-// with ComputeIntegerHash in utils.h.
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
//
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// Xor original key with a seed.
- if (Serializer::enabled()) {
+ if (Serializer::enabled(isolate())) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(Heap::kHashSeedRootIndex));
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
}
+void MacroAssembler::AllocateSIMDHeapObject(int size,
+ Register result,
+ Register scratch,
+ Label* gc_required,
+ Heap::RootListIndex map_index) {
+ Allocate(size, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ // Set the map.
+ switch (map_index) {
+ case Heap::kFloat32x4MapRootIndex:
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->float32x4_map()));
+ break;
+ case Heap::kFloat64x2MapRootIndex:
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->float64x2_map()));
+ break;
+ case Heap::kInt32x4MapRootIndex:
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->int32x4_map()));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register destination,
Register length,
Register scratch) {
- Label loop, done, short_string, short_loop;
- // Experimentation shows that the short string loop is faster if length < 10.
- cmp(length, Immediate(10));
- j(less_equal, &short_string);
-
+ Label short_loop, len4, len8, len12, done, short_string;
ASSERT(source.is(esi));
ASSERT(destination.is(edi));
ASSERT(length.is(ecx));
+ cmp(length, Immediate(4));
+ j(below, &short_string, Label::kNear);
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
+
+ cmp(length, Immediate(8));
+ j(below_equal, &len4, Label::kNear);
+ cmp(length, Immediate(12));
+ j(below_equal, &len8, Label::kNear);
+ cmp(length, Immediate(16));
+ j(below_equal, &len12, Label::kNear);
+
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(scratch, Immediate(0x3));
add(destination, scratch);
- jmp(&done);
+ jmp(&done, Label::kNear);
+
+ bind(&len12);
+ mov(scratch, Operand(source, 8));
+ mov(Operand(destination, 8), scratch);
+ bind(&len8);
+ mov(scratch, Operand(source, 4));
+ mov(Operand(destination, 4), scratch);
+ bind(&len4);
+ mov(scratch, Operand(source, 0));
+ mov(Operand(destination, 0), scratch);
+ add(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
test(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+ call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
- jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+ jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(eax, Immediate(num_arguments));
+ Move(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
- : kDontSaveFPRegs);
+ CEntryStub ces(isolate(),
+ 1,
+ CpuFeatures::IsSupported(SSE2) ? save_doubles
+ : kDontSaveFPRegs);
CallStub(&ces);
}
mov(eax, Immediate(num_arguments));
mov(ebx, Immediate(ref));
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(eax, Immediate(num_arguments));
+ Move(eax, Immediate(num_arguments));
JumpToExternalReference(ext);
}
void MacroAssembler::CallApiFunctionAndReturn(
- Address function_address,
- Address thunk_address,
+ Register function_address,
+ ExternalReference thunk_ref,
Operand thunk_last_arg,
int stack_space,
Operand return_value_operand,
ExternalReference level_address =
ExternalReference::handle_scope_level_address(isolate());
+ ASSERT(edx.is(function_address));
// Allocate HandleScope in callee-save registers.
mov(ebx, Operand::StaticVariable(next_address));
mov(edi, Operand::StaticVariable(limit_address));
Label profiler_disabled;
Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag)));
+ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
cmpb(Operand(eax, 0), 0);
j(zero, &profiler_disabled);
// Additional parameter is the address of the actual getter function.
- mov(thunk_last_arg, Immediate(function_address));
+ mov(thunk_last_arg, function_address);
// Call the api function.
- call(thunk_address, RelocInfo::RUNTIME_ENTRY);
+ mov(eax, Immediate(thunk_ref));
+ call(eax);
jmp(&end_profiler_check);
bind(&profiler_disabled);
// Call the api function.
- call(function_address, RelocInfo::RUNTIME_ENTRY);
+ call(function_address);
bind(&end_profiler_check);
if (FLAG_log_timer_events) {
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
- CEntryStub ces(1);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be ecx to
- // follow the calling convention which requires the call type to be
- // in ecx.
- ASSERT(dst.is(ecx));
- if (call_kind == CALL_AS_FUNCTION) {
- // Set to some non-zero smi by updating the least significant
- // byte.
- mov_b(dst, 1 << kSmiTagSize);
- } else {
- // Set to smi zero by clearing the register.
- xor_(dst, dst);
- }
+ CEntryStub ces(isolate(), 1);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_near,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
- SetCallKind(ecx, call_kind);
call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
jmp(done, done_near);
}
} else {
- SetCallKind(ecx, call_kind);
jmp(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(ecx, call_kind);
call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
jmp(code);
}
bind(&done);
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- Operand dummy(eax, 0);
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
- flag, Label::kNear, call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code, rmode));
- SetCallKind(ecx, call_kind);
- call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code, rmode);
- }
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ParameterCount expected(ebx);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
+ expected, actual, flag, call_wrapper);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- // Get the function and setup the context.
- LoadHeapObject(edi, function);
+ ASSERT(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
+ expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ LoadHeapObject(edi, function);
+ InvokeFunction(edi, expected, actual, flag, call_wrapper);
}
ParameterCount expected(0);
GetBuiltinFunction(edi, id);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper, CALL_AS_METHOD);
+ expected, expected, flag, call_wrapper);
}
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- mov(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalContext(Register global_context) {
- // Load the global or builtins object from the current context.
- mov(global_context,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(global_context,
- FieldOperand(global_context, GlobalObject::kNativeContextOffset));
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function,
}
+void MacroAssembler::Move(Register dst, const Immediate& x) {
+ if (x.is_zero()) {
+ xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
+ } else {
+ mov(dst, x);
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
+ mov(dst, x);
+}
+
+
+void MacroAssembler::Move(XMMRegister dst, double val) {
+ // TODO(titzer): recognize double constants with ExternalReferences.
+ CpuFeatureScope scope(this, SSE2);
+ uint64_t int_val = BitCast<uint64_t, double>(val);
+ if (int_val == 0) {
+ xorps(dst, dst);
+ } else {
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ push(Immediate(upper));
+ push(Immediate(lower));
+ movsd(dst, Operand(esp, 0));
+ add(esp, Immediate(kDoubleSize));
+ }
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
#endif
push(eax);
- push(Immediate(p0));
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
int3();
}
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(eax);
+ push(Immediate(Smi::FromInt(reason)));
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
+ }
+ // will not return here
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object, Label::kNear);
+ Abort(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmp(value, Immediate(encoding_mask));
+ pop(value);
+ Check(equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ SmiTag(index);
+ Check(no_overflow, kIndexIsTooLarge);
+
+ cmp(index, FieldOperand(string, String::kLengthOffset));
+ Check(less, kIndexIsTooLarge);
+
+ cmp(index, Immediate(Smi::FromInt(0)));
+ Check(greater_equal, kIndexIsNegative);
+
+ // Restore the index
+ SmiUntag(index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
+ cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
j(equal, call_runtime);
jmp(&start);
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
+ Label no_elements;
mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
cmp(ecx, isolate()->factory()->empty_fixed_array());
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
j(not_equal, call_runtime);
+ bind(&no_elements);
mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
cmp(ecx, isolate()->factory()->null_value());
j(not_equal, &next);
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Map::kElementsKindMask);
+ shr(scratch1, Map::kElementsKindShift);
+ cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(not_equal, &loop_again);
+}
+
+
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(eax));
+ ASSERT(!dividend.is(edx));
+ MultiplierAndShift ms(divisor);
+ mov(eax, Immediate(ms.multiplier()));
+ imul(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
+ if (ms.shift() > 0) sar(edx, ms.shift());
+ mov(eax, dividend);
+ shr(eax, 31);
+ add(edx, eax);
+}
+
+
+void MacroAssembler::absps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_absolute_constant =
+ { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
+ andps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_absolute_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::abspd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } double_absolute_constant =
+ { 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF };
+ andps(dst,
+ Operand(reinterpret_cast<int32_t>(&double_absolute_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::notps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_not_constant =
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+ xorps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_not_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::negateps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_negate_constant =
+ { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
+ xorps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_negate_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::negatepd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } double_negate_constant =
+ { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
+ xorpd(dst,
+ Operand(reinterpret_cast<int32_t>(&double_negate_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::pnegd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
+ notps(dst);
+ paddd(dst,
+ Operand(reinterpret_cast<int32_t>(&int32_one_constant),
+ RelocInfo::NONE32));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32