}
// Compute the entry for a given offset in exactly the same way as
- // we done in generated code. This makes it a lot easier to avoid
- // making mistakes in the hashed offset computations.
+ // we do in generated code. We generate an hash code that already
+ // ends in String::kHashShift 0s. Then we shift it so it is a multiple
+ // of sizeof(Entry). This makes it easier to avoid making mistakes
+ // in the hashed offset computations.
static Entry* entry(Entry* table, int offset) {
+ const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
return reinterpret_cast<Entry*>(
- reinterpret_cast<Address>(table) + (offset << 1));
+ reinterpret_cast<Address>(table) + (offset << shift_amount));
}
};
void Assembler::arithmetic_op_32(byte opcode,
- const Operand& dst,
- Register src) {
+ Register reg,
+ const Operand& rm_reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_optional_rex_32(src, dst);
+ emit_optional_rex_32(reg, rm_reg);
emit(opcode);
- emit_operand(src, dst);
+ emit_operand(reg, rm_reg);
}
void xchg(Register dst, Register src);
// Arithmetics
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
void addl(Register dst, Register src) {
arithmetic_op_32(0x03, dst, src);
}
immediate_arithmetic_op_32(0x0, dst, src);
}
+ void addl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x03, dst, src);
+ }
+
void addl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
- void addq(Register dst, const Operand& src) {
+ void addq(Register dst, Register src) {
arithmetic_op(0x03, dst, src);
}
+ void addq(Register dst, const Operand& src) {
+ arithmetic_op(0x03, dst, src);
+ }
void addq(const Operand& dst, Register src) {
arithmetic_op(0x01, src, dst);
}
void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, src, dst);
+ arithmetic_op_32(0x3B, dst, src);
}
void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, dst, src);
+ arithmetic_op_32(0x39, src, dst);
}
void cmpl(Register dst, Immediate src) {
// ModR/M byte.
void arithmetic_op(byte opcode, Register dst, Register src);
void arithmetic_op_32(byte opcode, Register dst, Register src);
- void arithmetic_op_32(byte opcode, const Operand& dst, Register src);
- void arithmetic_op(byte opcode, Register reg, const Operand& op);
+ void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
+ void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
// Operate on a 32-bit word in memory or register.
#define __ ACCESS_MASM(masm)
+static void ProbeTable(MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset) {
+ ExternalReference key_offset(SCTableReference::keyReference(table));
+ Label miss;
+
+ __ movq(kScratchRegister, key_offset);
+ // Check that the key in the entry matches the name.
+ __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+ __ j(not_equal, &miss);
+ // Get the code entry from the cache.
+ // Use key_offset + kPointerSize, rather than loading value_offset.
+ __ movq(kScratchRegister,
+ Operand(kScratchRegister, offset, times_4, kPointerSize));
+ // Check that the flags match what we're looking for.
+ __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+ __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ cmpl(offset, Immediate(flags));
+ __ j(not_equal, &miss);
+
+ // Jump to the first instruction in the code stub.
+ __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(kScratchRegister);
+
+ __ bind(&miss);
+}
+
+
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
Register scratch,
Register extra) {
Label miss;
- // TODO(X64): Probe the primary and secondary StubCache tables.
+ USE(extra); // The register extra is not used on the X64 platform.
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 16.
+ ASSERT(sizeof(Entry) == 16);
+
+ // Make sure the flags do not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ // Use only the low 32 bits of the map pointer.
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the primary table.
+ ProbeTable(masm, flags, kPrimary, name, scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ subl(scratch, name);
+ __ addl(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the secondary table.
+ ProbeTable(masm, flags, kSecondary, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.