From 6130206c2f5e7d2bd210054cc5ed45b93401a647 Mon Sep 17 00:00:00 2001 From: "haitao.feng@intel.com" Date: Wed, 26 Mar 2014 12:15:35 +0000 Subject: [PATCH] Introduce andp, notp, orp and xorp for x64 port R=verwaest@chromium.org Review URL: https://codereview.chromium.org/205343013 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20276 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/x64/assembler-x64.cc | 16 +- src/x64/assembler-x64.h | 267 +++++++++++++++++--------------- src/x64/builtins-x64.cc | 4 +- src/x64/code-stubs-x64.cc | 26 ++-- src/x64/codegen-x64.cc | 4 +- src/x64/full-codegen-x64.cc | 8 +- src/x64/ic-x64.cc | 4 +- src/x64/lithium-codegen-x64.cc | 16 +- src/x64/macro-assembler-x64.cc | 102 ++++++------ src/x64/macro-assembler-x64.h | 2 +- src/x64/regexp-macro-assembler-x64.cc | 16 +- src/x64/stub-cache-x64.cc | 12 +- test/cctest/test-assembler-x64.cc | 2 +- test/cctest/test-disasm-x64.cc | 30 ++-- test/cctest/test-macro-assembler-x64.cc | 72 ++++----- 15 files changed, 296 insertions(+), 285 deletions(-) diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index efdd472..60383da 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -1551,30 +1551,22 @@ void Assembler::nop() { } -void Assembler::not_(Register dst) { +void Assembler::emit_not(Register dst, int size) { EnsureSpace ensure_space(this); - emit_rex_64(dst); + emit_rex(dst, size); emit(0xF7); emit_modrm(0x2, dst); } -void Assembler::not_(const Operand& dst) { +void Assembler::emit_not(const Operand& dst, int size) { EnsureSpace ensure_space(this); - emit_rex_64(dst); + emit_rex(dst, size); emit(0xF7); emit_operand(2, dst); } -void Assembler::notl(Register dst) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(dst); - emit(0xF7); - emit_modrm(0x2, dst); -} - - void Assembler::Nop(int n) { // The recommended muti-byte sequences of NOP instructions from the Intel 64 // and IA-32 Architectures Software Developer's Manual. diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index e23b72b..d47ca32 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -511,6 +511,7 @@ class CpuFeatures : public AllStatic { #define ASSEMBLER_INSTRUCTION_LIST(V) \ V(add) \ + V(and) \ V(cmp) \ V(dec) \ V(idiv) \ @@ -521,11 +522,14 @@ class CpuFeatures : public AllStatic { V(movzxb) \ V(movzxw) \ V(neg) \ + V(not) \ + V(or) \ V(repmovs) \ V(sbb) \ V(sub) \ V(test) \ - V(xchg) + V(xchg) \ + V(xor) class Assembler : public AssemblerBase { @@ -674,9 +678,7 @@ class Assembler : public AssemblerBase { // - Instructions on 16-bit (word) operands/registers have a trailing 'w'. // - Instructions on 32-bit (doubleword) operands/registers use 'l'. // - Instructions on 64-bit (quadword) operands/registers use 'q'. - // - // Some mnemonics, such as "and", are the same as C++ keywords. - // Naming conflicts with C++ keywords are resolved by adding a trailing '_'. + // - Instructions on operands/registers with pointer size use 'p'. #define DECLARE_INSTRUCTION(instruction) \ template \ @@ -839,38 +841,6 @@ class Assembler : public AssemblerBase { arithmetic_op_16(0x39, src, dst); } - void and_(Register dst, Register src) { - arithmetic_op(0x23, dst, src); - } - - void and_(Register dst, const Operand& src) { - arithmetic_op(0x23, dst, src); - } - - void and_(const Operand& dst, Register src) { - arithmetic_op(0x21, src, dst); - } - - void and_(Register dst, Immediate src) { - immediate_arithmetic_op(0x4, dst, src); - } - - void and_(const Operand& dst, Immediate src) { - immediate_arithmetic_op(0x4, dst, src); - } - - void andl(Register dst, Immediate src) { - immediate_arithmetic_op_32(0x4, dst, src); - } - - void andl(Register dst, Register src) { - arithmetic_op_32(0x23, dst, src); - } - - void andl(Register dst, const Operand& src) { - arithmetic_op_32(0x23, dst, src); - } - void andb(Register dst, Immediate src) { immediate_arithmetic_op_8(0x4, dst, src); } @@ -886,50 +856,6 @@ class Assembler : public AssemblerBase { // Multiply rax by src, put the result in rdx:rax. void mul(Register src); - void not_(Register dst); - void not_(const Operand& dst); - void notl(Register dst); - - void or_(Register dst, Register src) { - arithmetic_op(0x0B, dst, src); - } - - void orl(Register dst, Register src) { - arithmetic_op_32(0x0B, dst, src); - } - - void or_(Register dst, const Operand& src) { - arithmetic_op(0x0B, dst, src); - } - - void orl(Register dst, const Operand& src) { - arithmetic_op_32(0x0B, dst, src); - } - - void or_(const Operand& dst, Register src) { - arithmetic_op(0x09, src, dst); - } - - void orl(const Operand& dst, Register src) { - arithmetic_op_32(0x09, src, dst); - } - - void or_(Register dst, Immediate src) { - immediate_arithmetic_op(0x1, dst, src); - } - - void orl(Register dst, Immediate src) { - immediate_arithmetic_op_32(0x1, dst, src); - } - - void or_(const Operand& dst, Immediate src) { - immediate_arithmetic_op(0x1, dst, src); - } - - void orl(const Operand& dst, Immediate src) { - immediate_arithmetic_op_32(0x1, dst, src); - } - void rcl(Register dst, Immediate imm8) { shift(dst, imm8, 0x2); } @@ -1030,50 +956,6 @@ class Assembler : public AssemblerBase { void testb(const Operand& op, Immediate mask); void testb(const Operand& op, Register reg); - void xor_(Register dst, Register src) { - if (dst.code() == src.code()) { - arithmetic_op_32(0x33, dst, src); - } else { - arithmetic_op(0x33, dst, src); - } - } - - void xorl(Register dst, Register src) { - arithmetic_op_32(0x33, dst, src); - } - - void xorl(Register dst, const Operand& src) { - arithmetic_op_32(0x33, dst, src); - } - - void xorl(Register dst, Immediate src) { - immediate_arithmetic_op_32(0x6, dst, src); - } - - void xorl(const Operand& dst, Register src) { - arithmetic_op_32(0x31, src, dst); - } - - void xorl(const Operand& dst, Immediate src) { - immediate_arithmetic_op_32(0x6, dst, src); - } - - void xor_(Register dst, const Operand& src) { - arithmetic_op(0x33, dst, src); - } - - void xor_(const Operand& dst, Register src) { - arithmetic_op(0x31, src, dst); - } - - void xor_(Register dst, Immediate src) { - immediate_arithmetic_op(0x6, dst, src); - } - - void xor_(const Operand& dst, Immediate src) { - immediate_arithmetic_op(0x6, dst, src); - } - // Bit operations. void bt(const Operand& dst, Register src); void bts(const Operand& dst, Register src); @@ -1635,6 +1517,51 @@ class Assembler : public AssemblerBase { } } + void emit_and(Register dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x23, dst, src); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x23, dst, src); + } + } + + void emit_and(Register dst, const Operand& src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x23, dst, src); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x23, dst, src); + } + } + + void emit_and(const Operand& dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x21, src, dst); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x21, src, dst); + } + } + + void emit_and(Register dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x4, dst, src); + } else { + ASSERT(size == kInt32Size); + immediate_arithmetic_op_32(0x4, dst, src); + } + } + + void emit_and(const Operand& dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x4, dst, src); + } else { + ASSERT(size == kInt32Size); + immediate_arithmetic_op_32(0x4, dst, src); + } + } + void emit_cmp(Register dst, Register src, int size) { if (size == kInt64Size) { arithmetic_op(0x3B, dst, src); @@ -1713,6 +1640,49 @@ class Assembler : public AssemblerBase { void emit_neg(Register dst, int size); void emit_neg(const Operand& dst, int size); + void emit_not(Register dst, int size); + void emit_not(const Operand& dst, int size); + + void emit_or(Register dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x0B, dst, src); + } else { + arithmetic_op_32(0x0B, dst, src); + } + } + + void emit_or(Register dst, const Operand& src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x0B, dst, src); + } else { + arithmetic_op_32(0x0B, dst, src); + } + } + + void emit_or(const Operand& dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x9, src, dst); + } else { + arithmetic_op_32(0x9, src, dst); + } + } + + void emit_or(Register dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x1, dst, src); + } else { + immediate_arithmetic_op_32(0x1, dst, src); + } + } + + void emit_or(const Operand& dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x1, dst, src); + } else { + immediate_arithmetic_op_32(0x1, dst, src); + } + } + void emit_repmovs(int size); void emit_sbb(Register dst, Register src, int size) { @@ -1777,6 +1747,55 @@ class Assembler : public AssemblerBase { // Exchange two registers void emit_xchg(Register dst, Register src, int size); + void emit_xor(Register dst, Register src, int size) { + if (size == kInt64Size) { + if (dst.code() == src.code()) { + arithmetic_op_32(0x33, dst, src); + } else { + arithmetic_op(0x33, dst, src); + } + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x33, dst, src); + } + } + + void emit_xor(Register dst, const Operand& src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x33, dst, src); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x33, dst, src); + } + } + + void emit_xor(Register dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x6, dst, src); + } else { + ASSERT(size == kInt32Size); + immediate_arithmetic_op_32(0x6, dst, src); + } + } + + void emit_xor(const Operand& dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x6, dst, src); + } else { + ASSERT(size == kInt32Size); + immediate_arithmetic_op_32(0x6, dst, src); + } + } + + void emit_xor(const Operand& dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x31, src, dst); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x31, src, dst); + } + } + friend class CodePatcher; friend class EnsureSpace; friend class RegExpMacroAssemblerX64; diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index d5577f3..45df985 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -278,7 +278,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // rax: initial map // rbx: JSObject // rdi: start of next object - __ or_(rbx, Immediate(kHeapObjectTag)); + __ orp(rbx, Immediate(kHeapObjectTag)); // Check if a non-empty properties array is needed. // Allocate and initialize a FixedArray if it is. @@ -342,7 +342,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // the JSObject // rbx: JSObject // rdi: FixedArray - __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag + __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag __ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi); diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index fe333e9..b070d05 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -1040,7 +1040,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { const int kParameterMapHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize; Label no_parameter_map; - __ xor_(r8, r8); + __ xorp(r8, r8); __ testp(rbx, rbx); __ j(zero, &no_parameter_map, Label::kNear); __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize)); @@ -1839,7 +1839,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { __ JumpIfNotBothSmi(rax, rdx, &non_smi); __ subp(rdx, rax); __ j(no_overflow, &smi_done); - __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here. + __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here. __ bind(&smi_done); __ movp(rax, rdx); __ ret(0); @@ -3119,7 +3119,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, // Find number of bytes left. __ movl(count, kScratchRegister); - __ and_(count, Immediate(kPointerSize - 1)); + __ andp(count, Immediate(kPointerSize - 1)); // Check if there are more bytes to copy. __ bind(&last_bytes); @@ -3848,7 +3848,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { __ subp(rdx, rax); __ j(no_overflow, &done, Label::kNear); // Correct sign of result in case of overflow. - __ not_(rdx); + __ notp(rdx); __ bind(&done); __ movp(rax, rdx); } @@ -3957,7 +3957,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); - __ or_(tmp1, tmp2); + __ orp(tmp1, tmp2); __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); __ j(not_zero, &miss, Label::kNear); @@ -4047,7 +4047,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); __ movp(tmp3, tmp1); STATIC_ASSERT(kNotStringTag != 0); - __ or_(tmp3, tmp2); + __ orp(tmp3, tmp2); __ testb(tmp3, Immediate(kIsNotStringMask)); __ j(not_zero, &miss); @@ -4069,7 +4069,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { if (equality) { Label do_compare; STATIC_ASSERT(kInternalizedTag == 0); - __ or_(tmp1, tmp2); + __ orp(tmp1, tmp2); __ testb(tmp1, Immediate(kIsNotInternalizedMask)); __ j(not_zero, &do_compare, Label::kNear); // Make sure rax is non-zero. At this point input operands are @@ -4193,7 +4193,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, // Capacity is smi 2^n. __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset)); __ decl(index); - __ and_(index, + __ andp(index, Immediate(name->Hash() + NameDictionary::GetProbeOffset(i))); // Scale the index by multiplying by the entry size. @@ -4264,7 +4264,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, if (i > 0) { __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i))); } - __ and_(r1, r0); + __ andp(r1, r0); // Scale the index by multiplying by the entry size. ASSERT(NameDictionary::kEntrySize == 3); @@ -4325,7 +4325,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { if (i > 0) { __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i))); } - __ and_(scratch, Operand(rsp, 0)); + __ andp(scratch, Operand(rsp, 0)); // Scale the index by multiplying by the entry size. ASSERT(NameDictionary::kEntrySize == 3); @@ -4504,7 +4504,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( Label need_incremental_pop_object; __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask)); - __ and_(regs_.scratch0(), regs_.object()); + __ andp(regs_.scratch0(), regs_.object()); __ movp(regs_.scratch1(), Operand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset)); @@ -4942,7 +4942,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset)); __ SmiToInteger32(rdx, rdx); STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); - __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask)); + __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask)); GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); __ bind(&no_info); @@ -5016,7 +5016,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { // but the following masking takes care of that anyway. __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. - __ and_(rcx, Immediate(Map::kElementsKindMask)); + __ andp(rcx, Immediate(Map::kElementsKindMask)); __ shr(rcx, Immediate(Map::kElementsKindShift)); if (FLAG_debug_code) { diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index 333889d..9b92dc8 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -607,12 +607,12 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, __ subsd(double_scratch, result); __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); __ leaq(temp1, Operand(temp2, 0x1ff800)); - __ and_(temp2, Immediate(0x7ff)); + __ andq(temp2, Immediate(0x7ff)); __ shr(temp1, Immediate(11)); __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); __ Move(kScratchRegister, ExternalReference::math_exp_log_table()); __ shl(temp1, Immediate(52)); - __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0)); + __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0)); __ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); __ subsd(double_scratch, input); __ movsd(input, double_scratch); diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index e779c6d..3d3fd06 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -1013,7 +1013,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { if (inline_smi_code) { Label slow_case; __ movp(rcx, rdx); - __ or_(rcx, rax); + __ orp(rcx, rax); patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear); __ cmpp(rdx, rax); @@ -2311,7 +2311,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, Label done, stub_call, smi_case; __ Pop(rdx); __ movp(rcx, rax); - __ or_(rax, rdx); + __ orp(rax, rdx); JumpPatchSite patch_site(masm_); patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear); @@ -3056,7 +3056,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ bind(&done); // Set the bit in the map to indicate that there is no local valueOf field. - __ or_(FieldOperand(rbx, Map::kBitField2Offset), + __ orp(FieldOperand(rbx, Map::kBitField2Offset), Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf)); __ bind(&skip_lookup); @@ -4658,7 +4658,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { if (inline_smi_code) { Label slow_case; __ movp(rcx, rdx); - __ or_(rcx, rax); + __ orp(rcx, rax); patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear); __ cmpp(rdx, rax); Split(cc, if_true, if_false, NULL); diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc index 5449095..ea118d0 100644 --- a/src/x64/ic-x64.cc +++ b/src/x64/ic-x64.cc @@ -424,9 +424,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift)); __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset)); __ shr(rdi, Immediate(String::kHashShift)); - __ xor_(rcx, rdi); + __ xorp(rcx, rdi); int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask); - __ and_(rcx, Immediate(mask)); + __ andp(rcx, Immediate(mask)); // Load the key (consisting of map and internalized string) from the cache and // check for match. diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index 89c7e4c..c4bc955 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -1403,7 +1403,7 @@ void LCodeGen::DoMulI(LMulI* instr) { } } else if (right->IsStackSlot()) { if (instr->hydrogen_value()->representation().IsSmi()) { - __ or_(kScratchRegister, ToOperand(right)); + __ orp(kScratchRegister, ToOperand(right)); } else { __ orl(kScratchRegister, ToOperand(right)); } @@ -1411,7 +1411,7 @@ void LCodeGen::DoMulI(LMulI* instr) { } else { // Test the non-zero operand for negative sign. if (instr->hydrogen_value()->representation().IsSmi()) { - __ or_(kScratchRegister, ToRegister(right)); + __ orp(kScratchRegister, ToRegister(right)); } else { __ orl(kScratchRegister, ToRegister(right)); } @@ -1451,13 +1451,13 @@ void LCodeGen::DoBitI(LBitI* instr) { } else if (right->IsStackSlot()) { switch (instr->op()) { case Token::BIT_AND: - __ and_(ToRegister(left), ToOperand(right)); + __ andp(ToRegister(left), ToOperand(right)); break; case Token::BIT_OR: - __ or_(ToRegister(left), ToOperand(right)); + __ orp(ToRegister(left), ToOperand(right)); break; case Token::BIT_XOR: - __ xor_(ToRegister(left), ToOperand(right)); + __ xorp(ToRegister(left), ToOperand(right)); break; default: UNREACHABLE(); @@ -1467,13 +1467,13 @@ void LCodeGen::DoBitI(LBitI* instr) { ASSERT(right->IsRegister()); switch (instr->op()) { case Token::BIT_AND: - __ and_(ToRegister(left), ToRegister(right)); + __ andp(ToRegister(left), ToRegister(right)); break; case Token::BIT_OR: - __ or_(ToRegister(left), ToRegister(right)); + __ orp(ToRegister(left), ToRegister(right)); break; case Token::BIT_XOR: - __ xor_(ToRegister(left), ToRegister(right)); + __ xorp(ToRegister(left), ToRegister(right)); break; default: UNREACHABLE(); diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index caf9b20..7ea97d4 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -276,10 +276,10 @@ void MacroAssembler::InNewSpace(Register object, // and the running system. if (scratch.is(object)) { Move(kScratchRegister, ExternalReference::new_space_mask(isolate())); - and_(scratch, kScratchRegister); + andp(scratch, kScratchRegister); } else { Move(scratch, ExternalReference::new_space_mask(isolate())); - and_(scratch, object); + andp(scratch, object); } Move(kScratchRegister, ExternalReference::new_space_start(isolate())); cmpp(scratch, kScratchRegister); @@ -295,7 +295,7 @@ void MacroAssembler::InNewSpace(Register object, } else { leap(scratch, Operand(object, kScratchRegister, times_1, 0)); } - and_(scratch, + andp(scratch, Immediate(static_cast(isolate()->heap()->NewSpaceMask()))); j(cc, branch, distance); } @@ -576,7 +576,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) { // the slow case, converting the key to a smi is always valid. // key: string key // hash: key's hash field, including its array index value. - and_(hash, Immediate(String::kArrayIndexValueMask)); + andp(hash, Immediate(String::kArrayIndexValueMask)); shr(hash, Immediate(String::kHashShift)); // Here we actually clobber the key which will be used if calling into // runtime later. However as the new key is the numeric value of a string key @@ -1002,7 +1002,7 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) { if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); Move(kScratchRegister, Smi::FromInt(jit_cookie())); - xor_(dst, kScratchRegister); + xorq(dst, kScratchRegister); } else { Move(dst, src); } @@ -1014,7 +1014,7 @@ void MacroAssembler::SafePush(Smi* src) { if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { Push(Smi::FromInt(src->value() ^ jit_cookie())); Move(kScratchRegister, Smi::FromInt(jit_cookie())); - xor_(Operand(rsp, 0), kScratchRegister); + xorq(Operand(rsp, 0), kScratchRegister); } else { Push(src); } @@ -1255,12 +1255,12 @@ void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2, ASSERT(!src1.is(kScratchRegister)); ASSERT(!src2.is(kScratchRegister)); movp(kScratchRegister, src1); - or_(kScratchRegister, src2); + orp(kScratchRegister, src2); JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump); movp(dst, kScratchRegister); } else { movp(dst, src1); - or_(dst, src2); + orp(dst, src2); JumpIfNotSmi(dst, on_not_smis, near_jump); } } @@ -1307,7 +1307,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, return CheckNonNegativeSmi(first); } movp(kScratchRegister, first); - or_(kScratchRegister, second); + orp(kScratchRegister, second); rol(kScratchRegister, Immediate(1)); testl(kScratchRegister, Immediate(3)); return zero; @@ -1799,7 +1799,7 @@ void MacroAssembler::SmiMul(Register dst, j(not_zero, &correct_result, Label::kNear); movp(dst, kScratchRegister); - xor_(dst, src2); + xorp(dst, src2); // Result was positive zero. j(positive, &zero_correct_result, Label::kNear); @@ -1823,7 +1823,7 @@ void MacroAssembler::SmiMul(Register dst, // One of src1 and src2 is zero, the check whether the other is // negative. movp(kScratchRegister, src1); - xor_(kScratchRegister, src2); + xorp(kScratchRegister, src2); j(negative, on_not_smi_result, near_jump); bind(&correct_result); } @@ -1955,11 +1955,11 @@ void MacroAssembler::SmiNot(Register dst, Register src) { // Set tag and padding bits before negating, so that they are zero afterwards. movl(kScratchRegister, Immediate(~0)); if (dst.is(src)) { - xor_(dst, kScratchRegister); + xorp(dst, kScratchRegister); } else { leap(dst, Operand(src, kScratchRegister, times_1, 0)); } - not_(dst); + notp(dst); } @@ -1968,7 +1968,7 @@ void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { if (!dst.is(src1)) { movp(dst, src1); } - and_(dst, src2); + andp(dst, src2); } @@ -1978,10 +1978,10 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); Register constant_reg = GetSmiConstant(constant); - and_(dst, constant_reg); + andp(dst, constant_reg); } else { LoadSmiConstant(dst, constant); - and_(dst, src); + andp(dst, src); } } @@ -1991,7 +1991,7 @@ void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { ASSERT(!src1.is(src2)); movp(dst, src1); } - or_(dst, src2); + orp(dst, src2); } @@ -1999,10 +1999,10 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); Register constant_reg = GetSmiConstant(constant); - or_(dst, constant_reg); + orp(dst, constant_reg); } else { LoadSmiConstant(dst, constant); - or_(dst, src); + orp(dst, src); } } @@ -2012,7 +2012,7 @@ void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { ASSERT(!src1.is(src2)); movp(dst, src1); } - xor_(dst, src2); + xorp(dst, src2); } @@ -2020,10 +2020,10 @@ void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); Register constant_reg = GetSmiConstant(constant); - xor_(dst, constant_reg); + xorp(dst, constant_reg); } else { LoadSmiConstant(dst, constant); - xor_(dst, src); + xorp(dst, src); } } @@ -2083,7 +2083,7 @@ void MacroAssembler::SmiShiftLeft(Register dst, } SmiToInteger32(rcx, src2); // Shift amount specified by lower 5 bits, not six as the shl opcode. - and_(rcx, Immediate(0x1f)); + andq(rcx, Immediate(0x1f)); shl_cl(dst); } @@ -2172,7 +2172,7 @@ void MacroAssembler::SelectNonSmi(Register dst, STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); movl(kScratchRegister, Immediate(kSmiTagMask)); - and_(kScratchRegister, src1); + andp(kScratchRegister, src1); testl(kScratchRegister, src2); // If non-zero then both are smis. j(not_zero, on_not_smis, near_jump); @@ -2183,10 +2183,10 @@ void MacroAssembler::SelectNonSmi(Register dst, subp(kScratchRegister, Immediate(1)); // If src1 is a smi, then scratch register all 1s, else it is all 0s. movp(dst, src1); - xor_(dst, src2); - and_(dst, kScratchRegister); + xorp(dst, src2); + andp(dst, kScratchRegister); // If src1 is a smi, dst holds src1 ^ src2, else it is zero. - xor_(dst, src1); + xorp(dst, src1); // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. } @@ -2263,7 +2263,7 @@ void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) { shr(dst, Immediate(kSmiShift)); // High bits. shl(dst, Immediate(64 - kSmiShift)); - or_(dst, scratch); + orp(dst, scratch); } @@ -2309,8 +2309,8 @@ void MacroAssembler::LookupNumberStringCache(Register object, STATIC_ASSERT(8 == kDoubleSize); movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); - xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset)); - and_(scratch, mask); + xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset)); + andp(scratch, mask); // Each entry in string cache consists of two pointer sized fields, // but times_twice_pointer_size (multiplication by 16) scale factor // is not supported by addrmode on x64 platform. @@ -2333,7 +2333,7 @@ void MacroAssembler::LookupNumberStringCache(Register object, bind(&is_smi); SmiToInteger32(scratch, object); - and_(scratch, mask); + andp(scratch, mask); // Each entry in string cache consists of two pointer sized fields, // but times_twice_pointer_size (multiplication by 16) scale factor // is not supported by addrmode on x64 platform. @@ -3341,7 +3341,7 @@ void MacroAssembler::EnumLength(Register dst, Register map) { STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); movp(dst, FieldOperand(map, Map::kBitField3Offset)); Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask)); - and_(dst, kScratchRegister); + andp(dst, kScratchRegister); } @@ -3842,7 +3842,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, if (kFrameAlignment > 0) { ASSERT(IsPowerOf2(kFrameAlignment)); ASSERT(is_int8(kFrameAlignment)); - and_(rsp, Immediate(-kFrameAlignment)); + andp(rsp, Immediate(-kFrameAlignment)); } // Patch the saved entry sp. @@ -4068,7 +4068,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, if (i > 0) { addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); } - and_(r2, r1); + andp(r2, r1); // Scale the index by multiplying by the entry size. ASSERT(SeededNumberDictionary::kEntrySize == 3); @@ -4293,7 +4293,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) { ExternalReference::new_space_allocation_top_address(isolate()); // Make sure the object has no tag before resetting top. - and_(object, Immediate(~kHeapObjectTagMask)); + andp(object, Immediate(~kHeapObjectTagMask)); Operand top_operand = ExternalOperand(new_space_allocation_top); #ifdef DEBUG cmpp(object, top_operand); @@ -4329,7 +4329,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, // scratch1 = length * 2 + kObjectAlignmentMask. leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + kHeaderAlignment)); - and_(scratch1, Immediate(~kObjectAlignmentMask)); + andp(scratch1, Immediate(~kObjectAlignmentMask)); if (kHeaderAlignment > 0) { subp(scratch1, Immediate(kHeaderAlignment)); } @@ -4367,7 +4367,7 @@ void MacroAssembler::AllocateAsciiString(Register result, movl(scratch1, length); ASSERT(kCharSize == 1); addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); - and_(scratch1, Immediate(~kObjectAlignmentMask)); + andp(scratch1, Immediate(~kObjectAlignmentMask)); if (kHeaderAlignment > 0) { subp(scratch1, Immediate(kHeaderAlignment)); } @@ -4720,7 +4720,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) { int argument_slots_on_stack = ArgumentStackSlotsForCFunctionCall(num_arguments); subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); - and_(rsp, Immediate(-frame_alignment)); + andp(rsp, Immediate(-frame_alignment)); movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister); } @@ -4789,10 +4789,10 @@ void MacroAssembler::CheckPageFlag( Label::Distance condition_met_distance) { ASSERT(cc == zero || cc == not_zero); if (scratch.is(object)) { - and_(scratch, Immediate(~Page::kPageAlignmentMask)); + andp(scratch, Immediate(~Page::kPageAlignmentMask)); } else { movp(scratch, Immediate(~Page::kPageAlignmentMask)); - and_(scratch, object); + andp(scratch, object); } if (mask < (1 << kBitsPerByte)) { testb(Operand(scratch, MemoryChunk::kFlagsOffset), @@ -4811,7 +4811,7 @@ void MacroAssembler::CheckMapDeprecated(Handle map, Move(scratch, map); movp(scratch, FieldOperand(scratch, Map::kBitField3Offset)); SmiToInteger32(scratch, scratch); - and_(scratch, Immediate(Map::Deprecated::kMask)); + andp(scratch, Immediate(Map::Deprecated::kMask)); j(not_zero, if_deprecated); } } @@ -4833,7 +4833,7 @@ void MacroAssembler::JumpIfBlack(Register object, // rcx = mask | (mask << 1). leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); // Note that we are using a 4-byte aligned 8-byte load. - and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); cmpp(mask_scratch, rcx); j(equal, on_black, on_black_distance); } @@ -4868,19 +4868,19 @@ void MacroAssembler::GetMarkBits(Register addr_reg, ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx)); movp(bitmap_reg, addr_reg); // Sign extended 32 bit immediate. - and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); + andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); movp(rcx, addr_reg); int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; shrl(rcx, Immediate(shift)); - and_(rcx, + andp(rcx, Immediate((Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1))); addp(bitmap_reg, rcx); movp(rcx, addr_reg); shrl(rcx, Immediate(kPointerSizeLog2)); - and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); + andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); movl(mask_reg, Immediate(1)); shl_cl(mask_reg); } @@ -4961,21 +4961,21 @@ void MacroAssembler::EnsureNotWhite( bind(¬_external); // Sequential string, either ASCII or UC16. ASSERT(kOneByteStringTag == 0x04); - and_(length, Immediate(kStringEncodingMask)); - xor_(length, Immediate(kStringEncodingMask)); + andp(length, Immediate(kStringEncodingMask)); + xorp(length, Immediate(kStringEncodingMask)); addp(length, Immediate(0x04)); // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. imulp(length, FieldOperand(value, String::kLengthOffset)); shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); - and_(length, Immediate(~kObjectAlignmentMask)); + andp(length, Immediate(~kObjectAlignmentMask)); bind(&is_data_object); // Value is a data object, and it is white. Mark it black. Since we know // that the object is white we can make it black by flipping one bit. - or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); + orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); - and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); + andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length); bind(&done); @@ -5064,7 +5064,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain( bind(&loop_again); movp(current, FieldOperand(current, HeapObject::kMapOffset)); movp(scratch1, FieldOperand(current, Map::kBitField2Offset)); - and_(scratch1, Immediate(Map::kElementsKindMask)); + andp(scratch1, Immediate(Map::kElementsKindMask)); shr(scratch1, Immediate(Map::kElementsKindShift)); cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS)); j(equal, found); diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 6c9c19f..af65a65 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -1027,7 +1027,7 @@ class MacroAssembler: public Assembler { static const int shift = Field::kShift + kSmiShift; static const int mask = Field::kMask >> Field::kShift; shr(reg, Immediate(shift)); - and_(reg, Immediate(mask)); + andp(reg, Immediate(mask)); shl(reg, Immediate(kSmiShift)); } diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc index 586c61a..c819c71 100644 --- a/src/x64/regexp-macro-assembler-x64.cc +++ b/src/x64/regexp-macro-assembler-x64.cc @@ -293,8 +293,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Mismatch, try case-insensitive match (converting letters to lower-case). // I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's // a match. - __ or_(rax, Immediate(0x20)); // Convert match character to lower-case. - __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case. + __ orp(rax, Immediate(0x20)); // Convert match character to lower-case. + __ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case. __ cmpb(rax, rdx); __ j(not_equal, on_no_match); // Definitely not equal. __ subb(rax, Immediate('a')); @@ -462,7 +462,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c, __ testl(current_character(), Immediate(mask)); } else { __ movl(rax, Immediate(mask)); - __ and_(rax, current_character()); + __ andp(rax, current_character()); __ cmpl(rax, Immediate(c)); } BranchOrBacktrack(equal, on_equal); @@ -476,7 +476,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c, __ testl(current_character(), Immediate(mask)); } else { __ movl(rax, Immediate(mask)); - __ and_(rax, current_character()); + __ andp(rax, current_character()); __ cmpl(rax, Immediate(c)); } BranchOrBacktrack(not_equal, on_not_equal); @@ -490,7 +490,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd( Label* on_not_equal) { ASSERT(minus < String::kMaxUtf16CodeUnit); __ leap(rax, Operand(current_character(), -minus)); - __ and_(rax, Immediate(mask)); + __ andp(rax, Immediate(mask)); __ cmpl(rax, Immediate(c)); BranchOrBacktrack(not_equal, on_not_equal); } @@ -523,7 +523,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable( Register index = current_character(); if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) { __ movp(rbx, current_character()); - __ and_(rbx, Immediate(kTableMask)); + __ andp(rbx, Immediate(kTableMask)); index = rbx; } __ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize), @@ -575,7 +575,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, case '.': { // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029) __ movl(rax, current_character()); - __ xor_(rax, Immediate(0x01)); + __ xorp(rax, Immediate(0x01)); // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c __ subl(rax, Immediate(0x0b)); __ cmpl(rax, Immediate(0x0c - 0x0b)); @@ -593,7 +593,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, case 'n': { // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029) __ movl(rax, current_character()); - __ xor_(rax, Immediate(0x01)); + __ xorp(rax, Immediate(0x01)); // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c __ subl(rax, Immediate(0x0b)); __ cmpl(rax, Immediate(0x0c - 0x0b)); diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index 0c5ee62..13e822d 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -89,7 +89,7 @@ static void ProbeTable(Isolate* isolate, // Check that the flags match what we're looking for. __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset)); - __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup)); + __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup)); __ cmpl(offset, Immediate(flags)); __ j(not_equal, &miss); @@ -195,10 +195,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); // Use only the low 32 bits of the map pointer. __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); - __ xor_(scratch, Immediate(flags)); + __ xorp(scratch, Immediate(flags)); // We mask out the last two bits because they are not part of the hash and // they are always 01 for maps. Also in the two 'and' instructions below. - __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); + __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); // Probe the primary table. ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch); @@ -206,11 +206,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm, // Primary miss: Compute hash for secondary probe. __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); - __ xor_(scratch, Immediate(flags)); - __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); + __ xorp(scratch, Immediate(flags)); + __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); __ subl(scratch, name); __ addl(scratch, Immediate(flags)); - __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize)); + __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize)); // Probe the secondary table. ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch); diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc index 9c65b58..446cec6 100644 --- a/test/cctest/test-assembler-x64.cc +++ b/test/cctest/test-assembler-x64.cc @@ -577,7 +577,7 @@ void DoSSE2(const v8::FunctionCallbackInfo& args) { for (int i = 0; i < ELEMENT_COUNT; i++) { __ movl(rax, Immediate(vec->Get(i)->Int32Value())); __ shl(rax, Immediate(0x20)); - __ or_(rax, Immediate(vec->Get(++i)->Int32Value())); + __ orq(rax, Immediate(vec->Get(++i)->Int32Value())); __ pushq(rax); } diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc index 0a16d22..5ca12b9 100644 --- a/test/cctest/test-disasm-x64.cc +++ b/test/cctest/test-disasm-x64.cc @@ -57,10 +57,10 @@ TEST(DisasmX64) { // Short immediate instructions __ addq(rax, Immediate(12345678)); - __ or_(rax, Immediate(12345678)); + __ orq(rax, Immediate(12345678)); __ subq(rax, Immediate(12345678)); - __ xor_(rax, Immediate(12345678)); - __ and_(rax, Immediate(12345678)); + __ xorq(rax, Immediate(12345678)); + __ andq(rax, Immediate(12345678)); // ---- This one caused crash __ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4] @@ -93,15 +93,15 @@ TEST(DisasmX64) { __ addq(rbx, Immediate(12)); __ nop(); __ nop(); - __ and_(rdx, Immediate(3)); - __ and_(rdx, Operand(rsp, 4)); + __ andq(rdx, Immediate(3)); + __ andq(rdx, Operand(rsp, 4)); __ cmpq(rdx, Immediate(3)); __ cmpq(rdx, Operand(rsp, 4)); __ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000)); __ cmpb(rbx, Operand(rbp, rcx, times_2, 0)); __ cmpb(Operand(rbp, rcx, times_2, 0), rbx); - __ or_(rdx, Immediate(3)); - __ xor_(rdx, Immediate(3)); + __ orq(rdx, Immediate(3)); + __ xorq(rdx, Immediate(3)); __ nop(); __ cpuid(); __ movsxbq(rdx, Operand(rcx, 0)); @@ -159,7 +159,7 @@ TEST(DisasmX64) { __ idivq(rdx); __ mul(rdx); __ negq(rdx); - __ not_(rdx); + __ notq(rdx); __ testq(Operand(rbx, rcx, times_4, 10000), rdx); __ imulq(rdx, Operand(rbx, rcx, times_4, 10000)); @@ -174,8 +174,8 @@ TEST(DisasmX64) { // __ jmp(Operand(rbx, rcx, times_4, 10000)); __ leaq(rdx, Operand(rbx, rcx, times_4, 10000)); - __ or_(rdx, Immediate(12345)); - __ or_(rdx, Operand(rbx, rcx, times_4, 10000)); + __ orq(rdx, Immediate(12345)); + __ orq(rdx, Operand(rbx, rcx, times_4, 10000)); __ nop(); @@ -202,19 +202,19 @@ TEST(DisasmX64) { __ addq(rbx, Immediate(12)); __ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12)); - __ and_(rbx, Immediate(12345)); + __ andq(rbx, Immediate(12345)); __ cmpq(rbx, Immediate(12345)); __ cmpq(rbx, Immediate(12)); __ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12)); __ cmpb(rax, Immediate(100)); - __ or_(rbx, Immediate(12345)); + __ orq(rbx, Immediate(12345)); __ subq(rbx, Immediate(12)); __ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12)); - __ xor_(rbx, Immediate(12345)); + __ xorq(rbx, Immediate(12345)); __ imulq(rdx, rcx, Immediate(12)); __ imulq(rdx, rcx, Immediate(1000)); @@ -230,8 +230,8 @@ TEST(DisasmX64) { __ testb(Operand(rax, -20), Immediate(0x9A)); __ nop(); - __ xor_(rdx, Immediate(12345)); - __ xor_(rdx, Operand(rbx, rcx, times_8, 10000)); + __ xorq(rdx, Immediate(12345)); + __ xorq(rdx, Operand(rbx, rcx, times_8, 10000)); __ bts(Operand(rbx, rcx, times_8, 10000), rdx); __ hlt(); __ int3(); diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc index ee23a0b..f29dacc 100644 --- a/test/cctest/test-macro-assembler-x64.cc +++ b/test/cctest/test-macro-assembler-x64.cc @@ -181,7 +181,7 @@ TEST(SmiMove) { TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257)); TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue)); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -277,7 +277,7 @@ TEST(SmiCompare) { TestSmiCompare(masm, &exit, 0x120, Smi::kMaxValue, Smi::kMinValue); TestSmiCompare(masm, &exit, 0x130, Smi::kMaxValue, Smi::kMaxValue); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -380,7 +380,7 @@ TEST(Integer32ToSmi) { __ j(not_equal, &exit); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -450,7 +450,7 @@ TEST(Integer64PlusConstantToSmi) { TestI64PlusConstantToSmi(masm, &exit, 0xB0, Smi::kMaxValue, 0); TestI64PlusConstantToSmi(masm, &exit, 0xC0, twice_max, Smi::kMinValue); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -490,7 +490,7 @@ TEST(SmiCheck) { __ j(NegateCondition(cond), &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckSmi(rcx); __ j(cond, &exit); @@ -501,7 +501,7 @@ TEST(SmiCheck) { __ j(NegateCondition(cond), &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckSmi(rcx); __ j(cond, &exit); @@ -512,7 +512,7 @@ TEST(SmiCheck) { __ j(NegateCondition(cond), &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckSmi(rcx); __ j(cond, &exit); @@ -523,7 +523,7 @@ TEST(SmiCheck) { __ j(NegateCondition(cond), &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckSmi(rcx); __ j(cond, &exit); @@ -536,7 +536,7 @@ TEST(SmiCheck) { __ j(NegateCondition(cond), &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckNonNegativeSmi(rcx); // "zero" non-smi. __ j(cond, &exit); @@ -553,7 +553,7 @@ TEST(SmiCheck) { __ j(cond, &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckNonNegativeSmi(rcx); // "Negative" non-smi. __ j(cond, &exit); @@ -564,7 +564,7 @@ TEST(SmiCheck) { __ j(NegateCondition(cond), &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckNonNegativeSmi(rcx); // "Positive" non-smi. __ j(cond, &exit); @@ -605,17 +605,17 @@ TEST(SmiCheck) { __ j(NegateCondition(cond), &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckBothSmi(rcx, rdx); __ j(cond, &exit); __ incq(rax); - __ xor_(rdx, Immediate(kSmiTagMask)); + __ xorq(rdx, Immediate(kSmiTagMask)); cond = masm->CheckBothSmi(rcx, rdx); __ j(cond, &exit); __ incq(rax); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); cond = masm->CheckBothSmi(rcx, rdx); __ j(cond, &exit); @@ -649,7 +649,7 @@ TEST(SmiCheck) { __ j(NegateCondition(cond), &exit); // Success - __ xor_(rax, rax); + __ xorq(rax, rax); __ bind(&exit); ExitCode(masm); @@ -736,7 +736,7 @@ TEST(SmiNeg) { TestSmiNeg(masm, &exit, 0x70, Smi::kMaxValue); TestSmiNeg(masm, &exit, 0x80, -Smi::kMaxValue); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -961,7 +961,7 @@ TEST(SmiAdd) { SmiAddOverflowTest(masm, &exit, 0xE0, -42000); SmiAddOverflowTest(masm, &exit, 0xF0, Smi::kMinValue); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -1182,7 +1182,7 @@ TEST(SmiSub) { SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue); SmiSubOverflowTest(masm, &exit, 0x100, 0); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -1269,7 +1269,7 @@ TEST(SmiMul) { TestSmiMul(masm, &exit, 0xd0, (Smi::kMinValue / 2), 2); TestSmiMul(masm, &exit, 0xe0, (Smi::kMinValue / 2) - 1, 2); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -1383,7 +1383,7 @@ TEST(SmiDiv) { TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue); TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1); - __ xor_(r15, r15); // Success. + __ xorq(r15, r15); // Success. __ bind(&exit); __ movq(rax, r15); __ popq(r15); @@ -1493,7 +1493,7 @@ TEST(SmiMod) { TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue); TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1); - __ xor_(r15, r15); // Success. + __ xorq(r15, r15); // Success. __ bind(&exit); __ movq(rax, r15); __ popq(r15); @@ -1573,7 +1573,7 @@ TEST(SmiIndex) { TestSmiIndex(masm, &exit, 0x40, 1000); TestSmiIndex(masm, &exit, 0x50, Smi::kMaxValue); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -1590,7 +1590,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) { __ movl(rax, Immediate(id)); __ Move(rcx, Smi::FromInt(x)); __ Move(rdx, Smi::FromInt(y)); - __ xor_(rdx, Immediate(kSmiTagMask)); + __ xorq(rdx, Immediate(kSmiTagMask)); __ SelectNonSmi(r9, rcx, rdx, exit); __ incq(rax); @@ -1600,7 +1600,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) { __ incq(rax); __ Move(rcx, Smi::FromInt(x)); __ Move(rdx, Smi::FromInt(y)); - __ xor_(rcx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); __ SelectNonSmi(r9, rcx, rdx, exit); __ incq(rax); @@ -1611,8 +1611,8 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) { Label fail_ok; __ Move(rcx, Smi::FromInt(x)); __ Move(rdx, Smi::FromInt(y)); - __ xor_(rcx, Immediate(kSmiTagMask)); - __ xor_(rdx, Immediate(kSmiTagMask)); + __ xorq(rcx, Immediate(kSmiTagMask)); + __ xorq(rdx, Immediate(kSmiTagMask)); __ SelectNonSmi(r9, rcx, rdx, &fail_ok); __ jmp(exit); __ bind(&fail_ok); @@ -1646,7 +1646,7 @@ TEST(SmiSelectNonSmi) { TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue); TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -1727,7 +1727,7 @@ TEST(SmiAnd) { TestSmiAnd(masm, &exit, 0xA0, Smi::kMinValue, -1); TestSmiAnd(masm, &exit, 0xB0, Smi::kMinValue, -1); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -1810,7 +1810,7 @@ TEST(SmiOr) { TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9); TestSmiOr(masm, &exit, 0xD0, Smi::kMinValue, -1); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -1893,7 +1893,7 @@ TEST(SmiXor) { TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9); TestSmiXor(masm, &exit, 0xD0, Smi::kMinValue, -1); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -1955,7 +1955,7 @@ TEST(SmiNot) { TestSmiNot(masm, &exit, 0x70, Smi::kMaxValue); TestSmiNot(masm, &exit, 0x80, 0x05555555); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -2050,7 +2050,7 @@ TEST(SmiShiftLeft) { TestSmiShiftLeft(masm, &exit, 0x150, Smi::kMinValue); TestSmiShiftLeft(masm, &exit, 0x190, -1); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -2156,7 +2156,7 @@ TEST(SmiShiftLogicalRight) { TestSmiShiftLogicalRight(masm, &exit, 0xB0, Smi::kMinValue); TestSmiShiftLogicalRight(masm, &exit, 0xD0, -1); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -2225,7 +2225,7 @@ TEST(SmiShiftArithmeticRight) { TestSmiShiftArithmeticRight(masm, &exit, 0x60, Smi::kMinValue); TestSmiShiftArithmeticRight(masm, &exit, 0x70, -1); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -2291,7 +2291,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) { TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536); TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); ExitCode(masm); __ ret(0); @@ -2796,7 +2796,7 @@ TEST(LoadAndStoreWithRepresentation) { __ cmpq(rcx, rdx); __ j(not_equal, &exit); - __ xor_(rax, rax); // Success. + __ xorq(rax, rax); // Success. __ bind(&exit); __ addq(rsp, Immediate(1 * kPointerSize)); ExitCode(masm); -- 2.7.4