From: lrn@chromium.org Date: Wed, 17 Jun 2009 11:50:33 +0000 (+0000) Subject: X64: Implementation of a bunch of stubs, and some new opcodes. X-Git-Tag: upstream/4.7.83~23878 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e9b13d9c39d2bcde711a1b77ab86e586dd5f7aa3;p=platform%2Fupstream%2Fv8.git X64: Implementation of a bunch of stubs, and some new opcodes. Review URL: http://codereview.chromium.org/125185 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2203 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/globals.h b/src/globals.h index 2b0fe15d1..bf83d0d75 100644 --- a/src/globals.h +++ b/src/globals.h @@ -120,8 +120,10 @@ const int kIntptrSize = sizeof(intptr_t); // NOLINT #if V8_HOST_ARCH_64_BIT const int kPointerSizeLog2 = 3; +const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000); #else const int kPointerSizeLog2 = 2; +const intptr_t kIntptrSignBit = 0x80000000; #endif const int kObjectAlignmentBits = kPointerSizeLog2; diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc index 233d1851b..9c9806cbf 100644 --- a/src/ia32/assembler-ia32.cc +++ b/src/ia32/assembler-ia32.cc @@ -1815,7 +1815,7 @@ void Assembler::fcompp() { void Assembler::fnstsw_ax() { EnsureSpace ensure_space(this); last_pc_ = pc_; - EMIT(0xdF); + EMIT(0xDF); EMIT(0xE0); } diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index cc6447135..eaeea194c 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -413,6 +413,16 @@ void Assembler::arithmetic_op(byte opcode, Register dst, Register src) { emit_modrm(dst, src); } + +void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_optional_rex_32(dst, src); + emit(opcode); + emit_modrm(dst, src); +} + + void Assembler::immediate_arithmetic_op(byte subcode, Register dst, Immediate src) { @@ -451,6 +461,27 @@ void Assembler::immediate_arithmetic_op(byte subcode, } +void Assembler::immediate_arithmetic_op_32(byte subcode, + Register dst, + Immediate src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_optional_rex_32(dst); + emit(0x83); + if (is_int8(src.value_)) { + emit_modrm(subcode, dst); + emit(src.value_); + } else if (dst.is(rax)) { + emit(0x05 | (subcode << 3)); + emitl(src.value_); + } else { + emit(0x81); + emit_modrm(subcode, dst); + emitl(src.value_); + } +} + + void Assembler::immediate_arithmetic_op_32(byte subcode, const Operand& dst, Immediate src) { @@ -508,6 +539,15 @@ void Assembler::shift(Register dst, int subcode) { } +void Assembler::shift_32(Register dst, int subcode) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_optional_rex_32(dst); + emit(0xD3); + emit_modrm(subcode, dst); +} + + void Assembler::bt(const Operand& dst, Register src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -640,6 +680,16 @@ void Assembler::idiv(Register src) { } +void Assembler::imul(Register dst, Register src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_rex_64(dst, src); + emit(0x0F); + emit(0xAF); + emit_modrm(dst, src); +} + + void Assembler::imul(Register dst, const Operand& src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -666,6 +716,16 @@ void Assembler::imul(Register dst, Register src, Immediate imm) { } +void Assembler::imull(Register dst, Register src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0xAF); + emit_modrm(dst, src); +} + + void Assembler::incq(Register dst) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -971,6 +1031,25 @@ void Assembler::movq(Register dst, Handle value, RelocInfo::Mode mode) { } +void Assembler::movsxlq(Register dst, Register src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_rex_64(dst, src); + emit(0x63); + emit_modrm(dst, src); +} + + +void Assembler::movzxbq(Register dst, const Operand& src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_rex_64(dst, src); + emit(0x0F); + emit(0xB6); + emit_operand(dst, src); +} + + void Assembler::mul(Register src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -1364,6 +1443,324 @@ void Assembler::testq(Register dst, Immediate mask) { } +// FPU instructions + + +void Assembler::fld(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xD9, 0xC0, i); +} + + +void Assembler::fld1() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xE8); +} + + +void Assembler::fldz() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xEE); +} + + +void Assembler::fld_s(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit_operand(0, adr); +} + + +void Assembler::fld_d(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDD); + emit_operand(0, adr); +} + + +void Assembler::fstp_s(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit_operand(3, adr); +} + + +void Assembler::fstp_d(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDD); + emit_operand(3, adr); +} + + +void Assembler::fild_s(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDB); + emit_operand(0, adr); +} + + +void Assembler::fild_d(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDF); + emit_operand(5, adr); +} + + +void Assembler::fistp_s(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDB); + emit_operand(3, adr); +} + + +void Assembler::fisttp_s(const Operand& adr) { + ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3)); + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDB); + emit_operand(1, adr); +} + + +void Assembler::fist_s(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDB); + emit_operand(2, adr); +} + + +void Assembler::fistp_d(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDF); + emit_operand(8, adr); +} + + +void Assembler::fabs() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xE1); +} + + +void Assembler::fchs() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xE0); +} + + +void Assembler::fcos() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xFF); +} + + +void Assembler::fsin() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xFE); +} + + +void Assembler::fadd(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDC, 0xC0, i); +} + + +void Assembler::fsub(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDC, 0xE8, i); +} + + +void Assembler::fisub_s(const Operand& adr) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDA); + emit_operand(4, adr); +} + + +void Assembler::fmul(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDC, 0xC8, i); +} + + +void Assembler::fdiv(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDC, 0xF8, i); +} + + +void Assembler::faddp(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDE, 0xC0, i); +} + + +void Assembler::fsubp(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDE, 0xE8, i); +} + + +void Assembler::fsubrp(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDE, 0xE0, i); +} + + +void Assembler::fmulp(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDE, 0xC8, i); +} + + +void Assembler::fdivp(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDE, 0xF8, i); +} + + +void Assembler::fprem() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xF8); +} + + +void Assembler::fprem1() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xF5); +} + + +void Assembler::fxch(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xD9, 0xC8, i); +} + + +void Assembler::fincstp() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xF7); +} + + +void Assembler::ffree(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDD, 0xC0, i); +} + + +void Assembler::ftst() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xE4); +} + + +void Assembler::fucomp(int i) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_farith(0xDD, 0xE8, i); +} + + +void Assembler::fucompp() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDA); + emit(0xE9); +} + + +void Assembler::fcompp() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDE); + emit(0xD9); +} + + +void Assembler::fnstsw_ax() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDF); + emit(0xE0); +} + + +void Assembler::fwait() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0x9B); +} + + +void Assembler::frndint() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xD9); + emit(0xFC); +} + + +void Assembler::fnclex() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0xDB); + emit(0xE2); +} + + +void Assembler::emit_farith(int b1, int b2, int i) { + ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode + ASSERT(0 <= i && i < 8); // illegal stack offset + emit(b1); + emit(b2 + i); +} + + // Relocation information implementations void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { @@ -1463,18 +1860,6 @@ const int RelocInfo::kApplyMask = namespace v8 { namespace internal { -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* a) { - UNIMPLEMENTED(); -} - -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* a) { - UNIMPLEMENTED(); -} - -void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* a) { - UNIMPLEMENTED(); -} - void BreakLocationIterator::ClearDebugBreakAtReturn() { UNIMPLEMENTED(); @@ -1501,31 +1886,6 @@ void CallIC::GenerateNormal(MacroAssembler* a, int b) { UNIMPLEMENTED(); } -Object* CallStubCompiler::CompileCallConstant(Object* a, - JSObject* b, - JSFunction* c, - StubCompiler::CheckType d, - Code::Flags flags) { - UNIMPLEMENTED(); - return NULL; -} - -Object* CallStubCompiler::CompileCallField(Object* a, - JSObject* b, - int c, - String* d, - Code::Flags flags) { - UNIMPLEMENTED(); - return NULL; -} - -Object* CallStubCompiler::CompileCallInterceptor(Object* a, - JSObject* b, - String* c) { - UNIMPLEMENTED(); - return NULL; -} - void JumpTarget::DoBind() { UNIMPLEMENTED(); } @@ -1538,60 +1898,5 @@ void JumpTarget::DoJump() { UNIMPLEMENTED(); } -Object* LoadStubCompiler::CompileLoadCallback(JSObject* a, - JSObject* b, - AccessorInfo* c, - String* d) { - UNIMPLEMENTED(); - return NULL; -} - -Object* LoadStubCompiler::CompileLoadConstant(JSObject* a, - JSObject* b, - Object* c, - String* d) { - UNIMPLEMENTED(); - return NULL; -} - -Object* LoadStubCompiler::CompileLoadField(JSObject* a, - JSObject* b, - int c, - String* d) { - UNIMPLEMENTED(); - return NULL; -} - -Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a, - JSObject* b, - String* c) { - UNIMPLEMENTED(); - return NULL; -} - -Object* StoreStubCompiler::CompileStoreCallback(JSObject* a, - AccessorInfo* b, - String* c) { - UNIMPLEMENTED(); - return NULL; -} - -Object* StoreStubCompiler::CompileStoreField(JSObject* a, - int b, - Map* c, - String* d) { - UNIMPLEMENTED(); - return NULL; -} - -Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) { - UNIMPLEMENTED(); - return NULL; -} - -Object* StubCompiler::CompileLazyCompile(Code::Flags a) { - UNIMPLEMENTED(); - return NULL; -} } } // namespace v8::internal diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index 2010375ea..6a89f8dd2 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -115,6 +115,35 @@ extern Register r14; extern Register r15; extern Register no_reg; + +struct MMXRegister { + bool is_valid() const { return 0 <= code_ && code_ < 2; } + int code() const { + ASSERT(is_valid()); + return code_; + } + + int code_; +}; + +extern MMXRegister mm0; +extern MMXRegister mm1; +extern MMXRegister mm2; +extern MMXRegister mm3; +extern MMXRegister mm4; +extern MMXRegister mm5; +extern MMXRegister mm6; +extern MMXRegister mm7; +extern MMXRegister mm8; +extern MMXRegister mm9; +extern MMXRegister mm10; +extern MMXRegister mm11; +extern MMXRegister mm12; +extern MMXRegister mm13; +extern MMXRegister mm14; +extern MMXRegister mm15; + + struct XMMRegister { bool is_valid() const { return 0 <= code_ && code_ < 2; } int code() const { @@ -446,6 +475,9 @@ class Assembler : public Malloced { void movq(Register dst, ExternalReference ext); void movq(Register dst, Handle handle, RelocInfo::Mode rmode); + void movsxlq(Register dst, Register src); + void movzxbq(Register dst, const Operand& src); + // New x64 instruction to load from an immediate 64-bit pointer into RAX. void load_rax(void* ptr, RelocInfo::Mode rmode); void load_rax(ExternalReference ext); @@ -461,6 +493,10 @@ class Assembler : public Malloced { arithmetic_op(0x03, dst, src); } + void addl(Register dst, Register src) { + arithmetic_op_32(0x03, dst, src); + } + void addq(Register dst, const Operand& src) { arithmetic_op(0x03, dst, src); } @@ -502,6 +538,10 @@ class Assembler : public Malloced { immediate_arithmetic_op(0x7, dst, src); } + void cmpl(Register dst, Immediate src) { + immediate_arithmetic_op_32(0x7, dst, src); + } + void cmpq(const Operand& dst, Immediate src) { immediate_arithmetic_op(0x7, dst, src); } @@ -540,6 +580,8 @@ class Assembler : public Malloced { void imul(Register dst, const Operand& src); // Performs the operation dst = src * imm. void imul(Register dst, Register src, Immediate imm); + // Multiply 32 bit registers + void imull(Register dst, Register src); void incq(Register dst); void incq(const Operand& dst); @@ -604,6 +646,10 @@ class Assembler : public Malloced { shift(dst, 0x4); } + void shll(Register dst) { + shift_32(dst, 0x4); + } + void shr(Register dst, Immediate shift_amount) { shift(dst, shift_amount, 0x5); } @@ -612,6 +658,10 @@ class Assembler : public Malloced { shift(dst, 0x5); } + void shrl(Register dst) { + shift_32(dst, 0x5); + } + void store_rax(void* dst, RelocInfo::Mode mode); void store_rax(ExternalReference ref); @@ -635,6 +685,10 @@ class Assembler : public Malloced { immediate_arithmetic_op(0x5, dst, src); } + void subl(Register dst, Register src) { + arithmetic_op_32(0x2B, dst, src); + } + void subl(const Operand& dst, Immediate src) { immediate_arithmetic_op_32(0x5, dst, src); } @@ -770,9 +824,12 @@ class Assembler : public Malloced { void fwait(); void fnclex(); + void fsin(); + void fcos(); + void frndint(); - // SSE2 instructions + // SSE2 instructions void cvttss2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src); @@ -954,13 +1011,17 @@ class Assembler : public Malloced { // similar, differing just in the opcode or in the reg field of the // ModR/M byte. void arithmetic_op(byte opcode, Register dst, Register src); + void arithmetic_op_32(byte opcode, Register dst, Register src); void arithmetic_op(byte opcode, Register reg, const Operand& op); void immediate_arithmetic_op(byte subcode, Register dst, Immediate src); void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src); - // Operate on a 32-bit word in memory. + // Operate on a 32-bit word in memory or register. void immediate_arithmetic_op_32(byte subcode, const Operand& dst, Immediate src); + void immediate_arithmetic_op_32(byte subcode, + Register dst, + Immediate src); // Operate on a byte in memory. void immediate_arithmetic_op_8(byte subcode, const Operand& dst, @@ -969,8 +1030,9 @@ class Assembler : public Malloced { void shift(Register dst, Immediate shift_amount, int subcode); // Shift dst by cl % 64 bits. void shift(Register dst, int subcode); + void shift_32(Register dst, int subcode); - // void emit_farith(int b1, int b2, int i); + void emit_farith(int b1, int b2, int i); // labels // void print(Label* L); diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index dc32227b4..1629c6e8c 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -378,27 +378,26 @@ void CodeGenerator::VisitThisFunction(ThisFunction* a) { UNIMPLEMENTED(); } -void CodeGenerator::GenerateArgumentsAccess(ZoneList* a) { +void CodeGenerator::GenerateArgumentsAccess(ZoneList* args) { UNIMPLEMENTED(); } -void CodeGenerator::GenerateArgumentsLength(ZoneList* a) { - UNIMPLEMENTED(); -} +void CodeGenerator::GenerateArgumentsLength(ZoneList* args) { + UNIMPLEMENTED();} void CodeGenerator::GenerateFastCharCodeAt(ZoneList* a) { UNIMPLEMENTED(); } -void CodeGenerator::GenerateIsArray(ZoneList* a) { +void CodeGenerator::GenerateIsArray(ZoneList* args) { UNIMPLEMENTED(); } -void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList* a) { +void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList* args) { UNIMPLEMENTED(); } -void CodeGenerator::GenerateIsSmi(ZoneList* a) { +void CodeGenerator::GenerateIsSmi(ZoneList* args) { UNIMPLEMENTED(); } @@ -406,7 +405,7 @@ void CodeGenerator::GenerateLog(ZoneList* a) { UNIMPLEMENTED(); } -void CodeGenerator::GenerateObjectEquals(ZoneList* a) { +void CodeGenerator::GenerateObjectEquals(ZoneList* args) { UNIMPLEMENTED(); } @@ -418,11 +417,11 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList* args) { UNIMPLEMENTED(); } -void CodeGenerator::GenerateSetValueOf(ZoneList* a) { +void CodeGenerator::GenerateSetValueOf(ZoneList* args) { UNIMPLEMENTED(); } -void CodeGenerator::GenerateValueOf(ZoneList* a) { +void CodeGenerator::GenerateValueOf(ZoneList* args) { UNIMPLEMENTED(); } @@ -435,6 +434,483 @@ void CodeGenerator::GenerateValueOf(ZoneList* a) { // Stub classes have public member named masm, not masm_. #define __ ACCESS_MASM(masm) +class ToBooleanStub: public CodeStub { + public: + ToBooleanStub() { } + + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return ToBoolean; } + int MinorKey() { return 0; } +}; + + +void ToBooleanStub::Generate(MacroAssembler* masm) { + Label false_result, true_result, not_string; + __ movq(rax, Operand(rsp, 1 * kPointerSize)); + + // 'null' => false. + __ movq(kScratchRegister, Factory::null_value(), RelocInfo::EMBEDDED_OBJECT); + __ cmpq(rax, kScratchRegister); + __ j(equal, &false_result); + + // Get the map and type of the heap object. + __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); + __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset)); + + // Undetectable => false. + __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset)); + __ and_(rbx, Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, &false_result); + + // JavaScript object => true. + __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE)); + __ j(above_equal, &true_result); + + // String value => false iff empty. + __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE)); + __ j(above_equal, ¬_string); + __ and_(rcx, Immediate(kStringSizeMask)); + __ cmpq(rcx, Immediate(kShortStringTag)); + __ j(not_equal, &true_result); // Empty string is always short. + __ movq(rdx, FieldOperand(rax, String::kLengthOffset)); + __ shr(rdx, Immediate(String::kShortLengthShift)); + __ j(zero, &false_result); + __ jmp(&true_result); + + __ bind(¬_string); + // HeapNumber => false iff +0, -0, or NaN. + __ movq(kScratchRegister, + Factory::heap_number_map(), + RelocInfo::EMBEDDED_OBJECT); + __ cmpq(rdx, kScratchRegister); + __ j(not_equal, &true_result); + // TODO(x64): Don't use fp stack, use MMX registers? + __ fldz(); // Load zero onto fp stack + // Load heap-number double value onto fp stack + __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); + __ fucompp(); // Compare and pop both values. + __ movq(kScratchRegister, rax); + __ fnstsw_ax(); // Store fp status word in ax, no checking for exceptions. + __ testb(rax, Immediate(0x08)); // Test FP condition flag C3. + __ movq(rax, kScratchRegister); + __ j(zero, &false_result); + // Fall through to |true_result|. + + // Return 1/0 for true/false in rax. + __ bind(&true_result); + __ movq(rax, Immediate(1)); + __ ret(1 * kPointerSize); + __ bind(&false_result); + __ xor_(rax, rax); + __ ret(1 * kPointerSize); +} + + +// Flag that indicates whether or not the code that handles smi arguments +// should be placed in the stub, inlined, or omitted entirely. +enum GenericBinaryFlags { + SMI_CODE_IN_STUB, + SMI_CODE_INLINED +}; + + +class GenericBinaryOpStub: public CodeStub { + public: + GenericBinaryOpStub(Token::Value op, + OverwriteMode mode, + GenericBinaryFlags flags) + : op_(op), mode_(mode), flags_(flags) { + ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); + } + + void GenerateSmiCode(MacroAssembler* masm, Label* slow); + + private: + Token::Value op_; + OverwriteMode mode_; + GenericBinaryFlags flags_; + + const char* GetName(); + +#ifdef DEBUG + void Print() { + PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", + Token::String(op_), + static_cast(mode_), + static_cast(flags_)); + } +#endif + + // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class FlagBits: public BitField {}; + + Major MajorKey() { return GenericBinaryOp; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | FlagBits::encode(flags_); + } + void Generate(MacroAssembler* masm); +}; + + +const char* GenericBinaryOpStub::GetName() { + switch (op_) { + case Token::ADD: return "GenericBinaryOpStub_ADD"; + case Token::SUB: return "GenericBinaryOpStub_SUB"; + case Token::MUL: return "GenericBinaryOpStub_MUL"; + case Token::DIV: return "GenericBinaryOpStub_DIV"; + case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; + case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; + case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; + case Token::SAR: return "GenericBinaryOpStub_SAR"; + case Token::SHL: return "GenericBinaryOpStub_SHL"; + case Token::SHR: return "GenericBinaryOpStub_SHR"; + default: return "GenericBinaryOpStub"; + } +} + + +void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { + // Perform fast-case smi code for the operation (rax rbx) and + // leave result in register rax. + + // Prepare the smi check of both operands by or'ing them together + // before checking against the smi mask. + __ movq(rcx, rbx); + __ or_(rcx, rax); + + switch (op_) { + case Token::ADD: + __ addl(rax, rbx); // add optimistically + __ j(overflow, slow); + __ movsxlq(rax, rax); // Sign extend eax into rax. + break; + + case Token::SUB: + __ subl(rax, rbx); // subtract optimistically + __ j(overflow, slow); + __ movsxlq(rax, rax); // Sign extend eax into rax. + break; + + case Token::DIV: + case Token::MOD: + // Sign extend rax into rdx:rax + // (also sign extends eax into edx if eax is Smi). + __ cqo(); + // Check for 0 divisor. + __ testq(rbx, rbx); + __ j(zero, slow); + break; + + default: + // Fall-through to smi check. + break; + } + + // Perform the actual smi check. + ASSERT(kSmiTag == 0); // adjust zero check if not the case + __ testl(rcx, Immediate(kSmiTagMask)); + __ j(not_zero, slow); + + switch (op_) { + case Token::ADD: + case Token::SUB: + // Do nothing here. + break; + + case Token::MUL: + // If the smi tag is 0 we can just leave the tag on one operand. + ASSERT(kSmiTag == 0); // adjust code below if not the case + // Remove tag from one of the operands (but keep sign). + __ sar(rax, Immediate(kSmiTagSize)); + // Do multiplication. + __ imull(rax, rbx); // multiplication of smis; result in eax + // Go slow on overflows. + __ j(overflow, slow); + // Check for negative zero result. + __ movsxlq(rax, rax); // Sign extend eax into rax. + __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y + break; + + case Token::DIV: + // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax). + __ idiv(rbx); + // Check that the remainder is zero. + __ testq(rdx, rdx); + __ j(not_zero, slow); + // Check for the corner case of dividing the most negative smi + // by -1. We cannot use the overflow flag, since it is not set + // by idiv instruction. + ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + // TODO(X64):TODO(Smi): Smi implementation dependent constant. + // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1) + __ cmpq(rax, Immediate(0x40000000)); + __ j(equal, slow); + // Check for negative zero result. + __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y + // Tag the result and store it in register rax. + ASSERT(kSmiTagSize == kTimes2); // adjust code if not the case + __ lea(rax, Operand(rax, rax, kTimes1, kSmiTag)); + break; + + case Token::MOD: + // Divide rdx:rax by rbx. + __ idiv(rbx); + // Check for negative zero result. + __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y + // Move remainder to register rax. + __ movq(rax, rdx); + break; + + case Token::BIT_OR: + __ or_(rax, rbx); + break; + + case Token::BIT_AND: + __ and_(rax, rbx); + break; + + case Token::BIT_XOR: + __ xor_(rax, rbx); + break; + + case Token::SHL: + case Token::SHR: + case Token::SAR: + // Move the second operand into register ecx. + __ movq(rcx, rbx); + // Remove tags from operands (but keep sign). + __ sar(rax, Immediate(kSmiTagSize)); + __ sar(rcx, Immediate(kSmiTagSize)); + // Perform the operation. + switch (op_) { + case Token::SAR: + __ sar(rax); + // No checks of result necessary + break; + case Token::SHR: + __ shrl(rax); // ecx is implicit shift register + // Check that the *unsigned* result fits in a smi. + // Neither of the two high-order bits can be set: + // - 0x80000000: high bit would be lost when smi tagging. + // - 0x40000000: this number would convert to negative when + // Smi tagging these two cases can only happen with shifts + // by 0 or 1 when handed a valid smi. + __ testq(rax, Immediate(0xc0000000)); + __ j(not_zero, slow); + break; + case Token::SHL: + __ shll(rax); + // TODO(Smi): Significant change if Smi changes. + // Check that the *signed* result fits in a smi. + // It does, if the 30th and 31st bits are equal, since then + // shifting the SmiTag in at the bottom doesn't change the sign. + ASSERT(kSmiTagSize == 1); + __ cmpl(rax, Immediate(0xc0000000)); + __ j(sign, slow); + __ movsxlq(rax, rax); // Extend new sign of eax into rax. + break; + default: + UNREACHABLE(); + } + // Tag the result and store it in register eax. + ASSERT(kSmiTagSize == kTimes2); // adjust code if not the case + __ lea(rax, Operand(rax, rax, kTimes1, kSmiTag)); + break; + + default: + UNREACHABLE(); + break; + } +} + + +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { +} + + +void UnarySubStub::Generate(MacroAssembler* masm) { +} + +class CompareStub: public CodeStub { + public: + CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } + + void Generate(MacroAssembler* masm); + + private: + Condition cc_; + bool strict_; + + Major MajorKey() { return Compare; } + + int MinorKey() { + // Encode the three parameters in a unique 16 bit value. + ASSERT(static_cast(cc_) < (1 << 15)); + return (static_cast(cc_) << 1) | (strict_ ? 1 : 0); + } + +#ifdef DEBUG + void Print() { + PrintF("CompareStub (cc %d), (strict %s)\n", + static_cast(cc_), + strict_ ? "true" : "false"); + } +#endif +}; + + +void CompareStub::Generate(MacroAssembler* masm) { +} + + +void StackCheckStub::Generate(MacroAssembler* masm) { +} + + +class CallFunctionStub: public CodeStub { + public: + CallFunctionStub(int argc, InLoopFlag in_loop) + : argc_(argc), in_loop_(in_loop) { } + + void Generate(MacroAssembler* masm); + + private: + int argc_; + InLoopFlag in_loop_; + +#ifdef DEBUG + void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); } +#endif + + Major MajorKey() { return CallFunction; } + int MinorKey() { return argc_; } + InLoopFlag InLoop() { return in_loop_; } +}; + + +void CallFunctionStub::Generate(MacroAssembler* masm) { +} + + +void InstanceofStub::Generate(MacroAssembler* masm) { +} + + + +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { + // The displacement is used for skipping the return address and the + // frame pointer on the stack. It is the offset of the last + // parameter (if any) relative to the frame pointer. + static const int kDisplacement = 2 * kPointerSize; + + // Check if the calling frame is an arguments adaptor frame. + Label runtime; + __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); + __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL)); + __ j(not_equal, &runtime); + // Value in rcx is Smi encoded. + + // Patch the arguments.length and the parameters pointer. + __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ movq(Operand(rsp, 1 * kPointerSize), rcx); + __ lea(rdx, Operand(rdx, rcx, kTimes4, kDisplacement)); + __ movq(Operand(rsp, 2 * kPointerSize), rdx); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3); +} + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The key is in rdx and the parameter count is in rax. + + // The displacement is used for skipping the frame pointer on the + // stack. It is the offset of the last parameter (if any) relative + // to the frame pointer. + static const int kDisplacement = 1 * kPointerSize; + + // Check that the key is a smi. + Label slow; + __ testl(rdx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset)); + __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL)); + __ j(equal, &adaptor); + + // Check index against formal parameters count limit passed in + // through register rax. Use unsigned comparison to get negative + // check for free. + __ cmpq(rdx, rax); + __ j(above_equal, &slow); + + // Read the argument from the stack and return it. + // Shifting code depends on SmiEncoding being equivalent to left shift: + // we multiply by four to get pointer alignment. + ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ lea(rbx, Operand(rbp, rax, kTimes4, 0)); + __ neg(rdx); + __ movq(rax, Operand(rbx, rdx, kTimes4, kDisplacement)); + __ Ret(); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmpq(rdx, rcx); + __ j(above_equal, &slow); + + // Read the argument from the stack and return it. + // Shifting code depends on SmiEncoding being equivalent to left shift: + // we multiply by four to get pointer alignment. + ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ lea(rbx, Operand(rbx, rcx, kTimes4, 0)); + __ neg(rdx); + __ movq(rax, Operand(rbx, rdx, kTimes4, kDisplacement)); + __ Ret(); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ pop(rbx); // Return address. + __ push(rdx); + __ push(rbx); + __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1); +} + + +void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) { + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); + __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL)); + __ j(equal, &adaptor); + + // Nothing to do: The formal number of parameters has already been + // passed in register rax by calling function. Just return it. + __ ret(0); + + // Arguments adaptor case: Read the arguments length from the + // adaptor frame and return it. + __ bind(&adaptor); + __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ ret(0); +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { // Check that stack should contain frame pointer, code pointer, state and // return address in that order. @@ -780,7 +1256,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ ret(0); } - #undef __ } } // namespace v8::internal diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index c47bfccf1..27a1f01b9 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -59,6 +59,18 @@ void MacroAssembler::Check(Condition cc, const char* msg) { } +void MacroAssembler::NegativeZeroTest(Register result, + Register op, + Label* then_label) { + Label ok; + testq(result, result); + j(not_zero, &ok); + testq(op, op); + j(sign, then_label); + bind(&ok); +} + + void MacroAssembler::ConstructAndTestJSFunction() { const int initial_buffer_size = 4 * KB; char* buffer = new char[initial_buffer_size]; diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index 209aa2d30..3a2cb834b 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -25,3 +25,107 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "ic-inl.h" +#include "codegen-inl.h" +#include "stub-cache.h" + +namespace v8 { +namespace internal { + + +Object* CallStubCompiler::CompileCallConstant(Object* a, + JSObject* b, + JSFunction* c, + StubCompiler::CheckType d, + Code::Flags flags) { + UNIMPLEMENTED(); + return NULL; +} + +Object* CallStubCompiler::CompileCallField(Object* a, + JSObject* b, + int c, + String* d, + Code::Flags flags) { + UNIMPLEMENTED(); + return NULL; +} + + +Object* CallStubCompiler::CompileCallInterceptor(Object* a, + JSObject* b, + String* c) { + UNIMPLEMENTED(); + return NULL; +} + + + +Object* LoadStubCompiler::CompileLoadCallback(JSObject* a, + JSObject* b, + AccessorInfo* c, + String* d) { + UNIMPLEMENTED(); + return NULL; +} + + +Object* LoadStubCompiler::CompileLoadConstant(JSObject* a, + JSObject* b, + Object* c, + String* d) { + UNIMPLEMENTED(); + return NULL; +} + + +Object* LoadStubCompiler::CompileLoadField(JSObject* a, + JSObject* b, + int c, + String* d) { + UNIMPLEMENTED(); + return NULL; +} + + +Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a, + JSObject* b, + String* c) { + UNIMPLEMENTED(); + return NULL; +} + + +Object* StoreStubCompiler::CompileStoreCallback(JSObject* a, + AccessorInfo* b, + String* c) { + UNIMPLEMENTED(); + return NULL; +} + + +Object* StoreStubCompiler::CompileStoreField(JSObject* a, + int b, + Map* c, + String* d) { + UNIMPLEMENTED(); + return NULL; +} + + +Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) { + UNIMPLEMENTED(); + return NULL; +} + + +Object* StubCompiler::CompileLazyCompile(Code::Flags a) { + UNIMPLEMENTED(); + return NULL; +} + + +} } // namespace v8::internal