2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86_h
27 #define MacroAssemblerX86_h
29 #if ENABLE(ASSEMBLER) && CPU(X86)
31 #include "MacroAssemblerX86Common.h"
35 class MacroAssemblerX86 : public MacroAssemblerX86Common {
37 static const Scale ScalePtr = TimesFour;
39 using MacroAssemblerX86Common::add32;
40 using MacroAssemblerX86Common::and32;
41 using MacroAssemblerX86Common::branchAdd32;
42 using MacroAssemblerX86Common::branchSub32;
43 using MacroAssemblerX86Common::sub32;
44 using MacroAssemblerX86Common::or32;
45 using MacroAssemblerX86Common::load32;
46 using MacroAssemblerX86Common::store32;
47 using MacroAssemblerX86Common::branch32;
48 using MacroAssemblerX86Common::call;
49 using MacroAssemblerX86Common::jump;
50 using MacroAssemblerX86Common::addDouble;
51 using MacroAssemblerX86Common::loadDouble;
52 using MacroAssemblerX86Common::storeDouble;
53 using MacroAssemblerX86Common::convertInt32ToDouble;
55 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
57 m_assembler.leal_mr(imm.m_value, src, dest);
60 void add32(TrustedImm32 imm, AbsoluteAddress address)
62 m_assembler.addl_im(imm.m_value, address.m_ptr);
65 void add64(TrustedImm32 imm, AbsoluteAddress address)
67 m_assembler.addl_im(imm.m_value, address.m_ptr);
68 m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast<const char*>(address.m_ptr) + sizeof(int32_t));
71 void and32(TrustedImm32 imm, AbsoluteAddress address)
73 m_assembler.andl_im(imm.m_value, address.m_ptr);
76 void or32(TrustedImm32 imm, AbsoluteAddress address)
78 m_assembler.orl_im(imm.m_value, address.m_ptr);
81 void sub32(TrustedImm32 imm, AbsoluteAddress address)
83 m_assembler.subl_im(imm.m_value, address.m_ptr);
86 void load32(const void* address, RegisterID dest)
88 m_assembler.movl_mr(address, dest);
91 void addDouble(AbsoluteAddress address, FPRegisterID dest)
93 m_assembler.addsd_mr(address.m_ptr, dest);
96 void loadDouble(const void* address, FPRegisterID dest)
98 ASSERT(isSSE2Present());
99 m_assembler.movsd_mr(address, dest);
102 void storeDouble(FPRegisterID src, const void* address)
104 ASSERT(isSSE2Present());
105 m_assembler.movsd_rm(src, address);
108 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
110 m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
113 void absDouble(FPRegisterID src, FPRegisterID dst)
116 static const double negativeZeroConstant = -0.0;
117 loadDouble(&negativeZeroConstant, dst);
118 m_assembler.andnpd_rr(src, dst);
121 void store32(TrustedImm32 imm, void* address)
123 m_assembler.movl_i32m(imm.m_value, address);
126 void store32(RegisterID src, void* address)
128 m_assembler.movl_rm(src, address);
131 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
133 m_assembler.addl_im(imm.m_value, dest.m_ptr);
134 return Jump(m_assembler.jCC(x86Condition(cond)));
137 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
139 m_assembler.subl_im(imm.m_value, dest.m_ptr);
140 return Jump(m_assembler.jCC(x86Condition(cond)));
143 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
145 m_assembler.cmpl_rm(right, left.m_ptr);
146 return Jump(m_assembler.jCC(x86Condition(cond)));
149 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
151 m_assembler.cmpl_im(right.m_value, left.m_ptr);
152 return Jump(m_assembler.jCC(x86Condition(cond)));
157 return Call(m_assembler.call(), Call::Linkable);
160 // Address is a memory location containing the address to jump to
161 void jump(AbsoluteAddress address)
163 m_assembler.jmp_m(address.m_ptr);
166 Call tailRecursiveCall()
168 return Call::fromTailJump(jump());
171 Call makeTailRecursiveCall(Jump oldJump)
173 return Call::fromTailJump(oldJump);
177 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
179 m_assembler.movl_i32r(initialValue.asIntptr(), dest);
180 return DataLabelPtr(this);
183 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
185 m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
186 dataLabel = DataLabelPtr(this);
187 return Jump(m_assembler.jCC(x86Condition(cond)));
190 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
192 m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
193 dataLabel = DataLabelPtr(this);
194 return Jump(m_assembler.jCC(x86Condition(cond)));
197 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
199 m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
200 return DataLabelPtr(this);
203 static bool supportsFloatingPoint() { return isSSE2Present(); }
204 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
205 static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
206 static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
207 static bool supportsFloatingPointAbs() { return isSSE2Present(); }
209 static FunctionPtr readCallTarget(CodeLocationCall call)
211 intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1];
212 return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset));
216 friend class LinkBuffer;
217 friend class RepatchBuffer;
219 static void linkCall(void* code, Call call, FunctionPtr function)
221 X86Assembler::linkCall(code, call.m_label, function.value());
224 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
226 X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
229 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
231 X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
237 #endif // ENABLE(ASSEMBLER)
239 #endif // MacroAssemblerX86_h