From: whesse@chromium.org Date: Tue, 21 Jul 2009 13:30:46 +0000 (+0000) Subject: Make stub cache hash work on X64 platform. Stub cache now works. X-Git-Tag: upstream/4.7.83~23604 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ff242173c3cc57d656927096d718ac9fe16394d7;p=platform%2Fupstream%2Fv8.git Make stub cache hash work on X64 platform. Stub cache now works. Switch arguments of 32-bit arithmetic instructions so they are consistent with 64-bit arithmetic instructions (all on X64 platforms). Review URL: http://codereview.chromium.org/155849 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2516 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/stub-cache.h b/src/stub-cache.h index 8bee370..c6b002b 100644 --- a/src/stub-cache.h +++ b/src/stub-cache.h @@ -256,11 +256,14 @@ class StubCache : public AllStatic { } // Compute the entry for a given offset in exactly the same way as - // we done in generated code. This makes it a lot easier to avoid - // making mistakes in the hashed offset computations. + // we do in generated code. We generate an hash code that already + // ends in String::kHashShift 0s. Then we shift it so it is a multiple + // of sizeof(Entry). This makes it easier to avoid making mistakes + // in the hashed offset computations. static Entry* entry(Entry* table, int offset) { + const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift; return reinterpret_cast( - reinterpret_cast
(table) + (offset << 1)); + reinterpret_cast
(table) + (offset << shift_amount)); } }; diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index f843078..e9a6f7f 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -456,13 +456,13 @@ void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) { void Assembler::arithmetic_op_32(byte opcode, - const Operand& dst, - Register src) { + Register reg, + const Operand& rm_reg) { EnsureSpace ensure_space(this); last_pc_ = pc_; - emit_optional_rex_32(src, dst); + emit_optional_rex_32(reg, rm_reg); emit(opcode); - emit_operand(src, dst); + emit_operand(reg, rm_reg); } diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index ab33736..1b2a35c 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -521,10 +521,6 @@ class Assembler : public Malloced { void xchg(Register dst, Register src); // Arithmetics - void addq(Register dst, Register src) { - arithmetic_op(0x03, dst, src); - } - void addl(Register dst, Register src) { arithmetic_op_32(0x03, dst, src); } @@ -533,14 +529,21 @@ class Assembler : public Malloced { immediate_arithmetic_op_32(0x0, dst, src); } + void addl(Register dst, const Operand& src) { + arithmetic_op_32(0x03, dst, src); + } + void addl(const Operand& dst, Immediate src) { immediate_arithmetic_op_32(0x0, dst, src); } - void addq(Register dst, const Operand& src) { + void addq(Register dst, Register src) { arithmetic_op(0x03, dst, src); } + void addq(Register dst, const Operand& src) { + arithmetic_op(0x03, dst, src); + } void addq(const Operand& dst, Register src) { arithmetic_op(0x01, src, dst); @@ -567,11 +570,11 @@ class Assembler : public Malloced { } void cmpl(Register dst, const Operand& src) { - arithmetic_op_32(0x3B, src, dst); + arithmetic_op_32(0x3B, dst, src); } void cmpl(const Operand& dst, Register src) { - arithmetic_op_32(0x39, dst, src); + arithmetic_op_32(0x39, src, dst); } void cmpl(Register dst, Immediate src) { @@ -1118,8 +1121,8 @@ class Assembler : public Malloced { // ModR/M byte. void arithmetic_op(byte opcode, Register dst, Register src); void arithmetic_op_32(byte opcode, Register dst, Register src); - void arithmetic_op_32(byte opcode, const Operand& dst, Register src); - void arithmetic_op(byte opcode, Register reg, const Operand& op); + void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg); + void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg); void immediate_arithmetic_op(byte subcode, Register dst, Immediate src); void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src); // Operate on a 32-bit word in memory or register. diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index 65d6a99..7f5c1ca 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -564,6 +564,36 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, #define __ ACCESS_MASM(masm) +static void ProbeTable(MacroAssembler* masm, + Code::Flags flags, + StubCache::Table table, + Register name, + Register offset) { + ExternalReference key_offset(SCTableReference::keyReference(table)); + Label miss; + + __ movq(kScratchRegister, key_offset); + // Check that the key in the entry matches the name. + __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0)); + __ j(not_equal, &miss); + // Get the code entry from the cache. + // Use key_offset + kPointerSize, rather than loading value_offset. + __ movq(kScratchRegister, + Operand(kScratchRegister, offset, times_4, kPointerSize)); + // Check that the flags match what we're looking for. + __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset)); + __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup)); + __ cmpl(offset, Immediate(flags)); + __ j(not_equal, &miss); + + // Jump to the first instruction in the code stub. + __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(kScratchRegister); + + __ bind(&miss); +} + + void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); Code* code = NULL; @@ -625,7 +655,43 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register scratch, Register extra) { Label miss; - // TODO(X64): Probe the primary and secondary StubCache tables. + USE(extra); // The register extra is not used on the X64 platform. + // Make sure that code is valid. The shifting code relies on the + // entry size being 16. + ASSERT(sizeof(Entry) == 16); + + // Make sure the flags do not name a specific type. + ASSERT(Code::ExtractTypeFromFlags(flags) == 0); + + // Make sure that there are no register conflicts. + ASSERT(!scratch.is(receiver)); + ASSERT(!scratch.is(name)); + + // Check that the receiver isn't a smi. + __ testl(receiver, Immediate(kSmiTagMask)); + __ j(zero, &miss); + + // Get the map of the receiver and compute the hash. + __ movl(scratch, FieldOperand(name, String::kLengthOffset)); + // Use only the low 32 bits of the map pointer. + __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(scratch, Immediate(flags)); + __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); + + // Probe the primary table. + ProbeTable(masm, flags, kPrimary, name, scratch); + + // Primary miss: Compute hash for secondary probe. + __ movl(scratch, FieldOperand(name, String::kLengthOffset)); + __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(scratch, Immediate(flags)); + __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); + __ subl(scratch, name); + __ addl(scratch, Immediate(flags)); + __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize)); + + // Probe the secondary table. + ProbeTable(masm, flags, kSecondary, name, scratch); // Cache miss: Fall-through and let caller handle the miss by // entering the runtime system.