}
+void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ext.
+ // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
+}
+
+
void Assembler::pref(int32_t hint, const MemOperand& rs) {
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
// --------Coprocessor-instructions----------------
DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToExternalReference expects s0 to contain the number of arguments
+ // JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
- __ Daddu(s0, a0, num_extra_args + 1);
- __ dsll(s1, s0, kPointerSizeLog2);
- __ Dsubu(s1, s1, kPointerSize);
+ __ Daddu(a0, a0, num_extra_args + 1);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
- // s0: number of arguments including receiver
- // s1: size of arguments excluding receiver
- // s2: pointer to builtin function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
- // The reason for this is that these arguments would need to be saved anyway
- // so it's faster to set them up directly.
- // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
// Compute the argv pointer in a callee-saved register.
+ __ dsll(s1, a0, kPointerSizeLog2);
__ Daddu(s1, sp, s1);
+ __ Dsubu(s1, s1, kPointerSize);
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
// Prepare arguments for C routine.
// a0 = argc
- __ mov(a0, s0);
+ __ mov(s0, a0);
+ __ mov(s2, a1);
// a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
- // ReturnTrueFalse is only implemented for inlined call sites.
- DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = a0; // Object (lhs).
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
+ if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
__ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
__ Branch(&miss, ne, function, Operand(at));
if (!HasCallSiteInlineCheck()) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ }
} else {
// Patch the call site to return true.
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
if (!HasCallSiteInlineCheck()) {
__ li(v0, Operand(Smi::FromInt(1)));
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ }
} else {
// Patch the call site to return false.
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
// Null is not instance of anything.
__ Branch(&object_not_null, ne, object,
Operand(isolate()->factory()->null_value()));
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null);
// Smi values are not instances of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null_or_smi);
// String values are not instances of anything.
__ IsObjectJSStringType(object, scratch, &slow);
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
// Slow-case. Tail call builtin.
switch (FunctionFieldRaw()) {
case INS:
case EXT:
+ case DEXT:
return kRegisterType;
default:
return kUnsupported;
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(s0.code(), params);
- output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
- output_frame->SetRegister(s2.code(), handler);
+ output_frame->SetRegister(a0.code(), params);
+ output_frame->SetRegister(a1.code(), handler);
}
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
break;
}
+ case DEXT: {
+ Format(instr, "dext 'rt, 'rs, 'sa, 'ss1");
+ break;
+ }
default:
UNREACHABLE();
}
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- has_frame_(false) {
+ has_frame_(false),
+ has_double_zero_reg_set_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
}
+void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant != kMips64r6) {
+ multu(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ muhu(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant != kMips64r6) {
+ multu(rs, at);
+ mfhi(rd);
+ } else {
+ muhu(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
}
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant != kMips64r6) {
+ div(rs, rt.rm());
+ mflo(res);
+ } else {
+ div(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant != kMips64r6) {
+ div(rs, at);
+ mflo(res);
+ } else {
+ div(res, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant != kMips64r6) {
+ div(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ mod(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant != kMips64r6) {
+ div(rs, at);
+ mfhi(rd);
+ } else {
+ mod(rd, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant != kMips64r6) {
+ divu(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ modu(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant != kMips64r6) {
+ divu(rs, at);
+ mfhi(rd);
+ } else {
+ modu(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
}
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant != kMips64r6) {
+ divu(rs, rt.rm());
+ mflo(res);
+ } else {
+ divu(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant != kMips64r6) {
+ divu(rs, at);
+ mflo(res);
+ } else {
+ divu(res, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
}
+void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (kArchVariant != kMips64r6) {
+ ddivu(rs, rt.rm());
+ mflo(res);
+ } else {
+ ddivu(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (kArchVariant != kMips64r6) {
+ ddivu(rs, at);
+ mflo(res);
+ } else {
+ ddivu(res, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
}
+void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
+ if (kArchVariant != kMips64r6) {
+ if (rt.is_reg()) {
+ ddivu(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ ddivu(rs, at);
+ mfhi(rd);
+ }
+ } else {
+ if (rt.is_reg()) {
+ dmodu(rd, rs, rt.rm());
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ dmodu(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (kArchVariant == kMips64r2) {
- if (rt.is_reg()) {
- rotrv(rd, rs, rt.rm());
- } else {
- rotr(rd, rs, rt.imm64_);
- }
+ if (rt.is_reg()) {
+ rotrv(rd, rs, rt.rm());
} else {
- if (rt.is_reg()) {
- subu(at, zero_reg, rt.rm());
- sllv(at, rs, at);
- srlv(rd, rs, rt.rm());
- or_(rd, rd, at);
- } else {
- if (rt.imm64_ == 0) {
- srl(rd, rs, 0);
- } else {
- srl(at, rs, rt.imm64_);
- sll(rd, rs, (0x20 - rt.imm64_) & 0x1f);
- or_(rd, rd, at);
- }
- }
+ rotr(rd, rs, rt.imm64_);
}
}
}
+void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 32);
+ DCHECK(pos + size < 33);
+ dext_(rt, rs, pos, size);
+}
+
+
void MacroAssembler::Ins(Register rt,
Register rs,
uint16_t pos,
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value_rep(imm);
// Handle special values first.
- bool force_load = dst.is(kDoubleRegZero);
- if (value_rep == zero && !force_load) {
+ if (value_rep == zero && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
- } else if (value_rep == minus_zero && !force_load) {
+ } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
} else {
mthc1(zero_reg, dst);
}
+ if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
}
}
b(offset);
break;
case eq:
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- beq(rs, r2, offset);
+ if (rt.imm64_ == 0) {
+ beq(rs, zero_reg, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ DCHECK(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ beq(rs, r2, offset);
+ }
break;
case ne:
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- bne(rs, r2, offset);
+ if (rt.imm64_ == 0) {
+ bne(rs, zero_reg, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ DCHECK(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ bne(rs, r2, offset);
+ }
break;
// Signed comparison.
case greater:
b(offset);
break;
case eq:
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ beq(rs, zero_reg, offset);
+ } else {
+ DCHECK(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ }
break;
case ne:
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ if (rt.imm64_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bne(rs, zero_reg, offset);
+ } else {
+ DCHECK(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ }
break;
// Signed comparison.
case greater:
}
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ daddiu(dst, left, right.immediate()); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ // Load right since xori takes uint16 as immediate.
+ daddiu(t9, zero_reg, right.immediate());
+ xor_(overflow_dst, dst, t9);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ daddiu(dst, left, right.immediate());
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ daddiu(t9, zero_reg, right.immediate());
+ xor_(scratch, dst, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
void MacroAssembler::AdduAndCheckForOverflow(Register dst,
Register left,
Register right,
}
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ daddiu(dst, left, -(right.immediate())); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ // Load right since xori takes uint16 as immediate.
+ daddiu(t9, zero_reg, right.immediate());
+ xor_(scratch, scratch, t9); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ daddiu(dst, left, -(right.immediate()));
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ daddiu(t9, zero_reg, right.immediate());
+ xor_(scratch, left, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
void MacroAssembler::SubuAndCheckForOverflow(Register dst,
Register left,
Register right,
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Daddu);
+ DEFINE_INSTRUCTION(Div);
+ DEFINE_INSTRUCTION(Divu);
+ DEFINE_INSTRUCTION(Ddivu);
+ DEFINE_INSTRUCTION(Mod);
+ DEFINE_INSTRUCTION(Modu);
DEFINE_INSTRUCTION(Ddiv);
DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Dsubu);
DEFINE_INSTRUCTION(Dmod);
+ DEFINE_INSTRUCTION(Dmodu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION(Mulh);
+ DEFINE_INSTRUCTION(Mulhu);
DEFINE_INSTRUCTION(Dmul);
DEFINE_INSTRUCTION(Dmulh);
DEFINE_INSTRUCTION2(Mult);
// MIPS64 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
// ---------------------------------------------------------------------------
// FPU macros. These do not handle special cases like NaN or +- inf.
Register overflow_dst,
Register scratch = at);
+ void AdduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right, Register overflow_dst,
+ Register scratch = at);
+
void SubuAndCheckForOverflow(Register dst,
Register left,
Register right,
Register overflow_dst,
Register scratch = at);
+ void SubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right, Register overflow_dst,
+ Register scratch = at);
+
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
// Runtime calls.
// See comments at the beginning of CEntryStub::Generate.
- inline void PrepareCEntryArgs(int num_args) {
- li(s0, num_args);
- li(s1, (num_args - 1) * kPointerSize);
- }
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
inline void PrepareCEntryFunction(const ExternalReference& ref) {
- li(s2, Operand(ref));
+ li(a1, Operand(ref));
}
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
bool generating_stub_;
bool has_frame_;
+ bool has_double_zero_reg_set_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
*i64hilo = rs * rt;
break;
case MULTU:
- *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ *u64hilo = static_cast<uint64_t>(rs_u & 0xffffffff) *
+ static_cast<uint64_t>(rt_u & 0xffffffff);
break;
case DMULT: // DMULT == D_MUL_MUH.
if (kArchVariant != kMips64r6) {
// Interpret sa field as 5-bit lsb of insert.
uint16_t lsb = sa;
uint16_t size = msb - lsb + 1;
- uint32_t mask = (1 << size) - 1;
+ uint64_t mask = (1ULL << size) - 1;
*alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
break;
}
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa;
uint16_t size = msb + 1;
- uint32_t mask = (1 << size) - 1;
- *alu_out = (rs_u & (mask << lsb)) >> lsb;
+ uint64_t mask = (1ULL << size) - 1;
+ *alu_out = static_cast<int32_t>((rs_u & (mask << lsb)) >> lsb);
+ break;
+ }
+ case DEXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ *alu_out = static_cast<int64_t>((rs_u & (mask << lsb)) >> lsb);
break;
}
default:
TraceRegWr(alu_out);
break;
case EXT:
- // Ext instr leaves result in Rt, rather than Rd.
+ case DEXT:
+ // Dext/Ext instr leaves result in Rt, rather than Rd.
set_register(rt_reg, alu_out);
TraceRegWr(alu_out);
break;
int64_t ft = get_fpu_register(ft_reg);
// Zero extended immediate.
- uint32_t oe_imm16 = 0xffff & imm16;
+ uint64_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
- int32_t se_imm16 = imm16;
+ int64_t se_imm16 = imm16;
// Get current pc.
int64_t current_pc = get_pc();