Fix TIVI-504 (backport of trac.webkit.org/changeset/144137)
[profile/ivi/webkit-efl.git] / Source / JavaScriptCore / jit / JITArithmetic.cpp
1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
34 #include "JITStubs.h"
35 #include "JSArray.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "ResultType.h"
39 #include "SamplingTool.h"
40
41 #ifndef NDEBUG
42 #include <stdio.h>
43 #endif
44
45 using namespace std;
46
47 namespace JSC {
48
49 void JIT::emit_op_jless(Instruction* currentInstruction)
50 {
51     unsigned op1 = currentInstruction[1].u.operand;
52     unsigned op2 = currentInstruction[2].u.operand;
53     unsigned target = currentInstruction[3].u.operand;
54
55     emit_compareAndJump(op_jless, op1, op2, target, LessThan);
56 }
57
58 void JIT::emit_op_jlesseq(Instruction* currentInstruction)
59 {
60     unsigned op1 = currentInstruction[1].u.operand;
61     unsigned op2 = currentInstruction[2].u.operand;
62     unsigned target = currentInstruction[3].u.operand;
63
64     emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual);
65 }
66
67 void JIT::emit_op_jgreater(Instruction* currentInstruction)
68 {
69     unsigned op1 = currentInstruction[1].u.operand;
70     unsigned op2 = currentInstruction[2].u.operand;
71     unsigned target = currentInstruction[3].u.operand;
72
73     emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan);
74 }
75
76 void JIT::emit_op_jgreatereq(Instruction* currentInstruction)
77 {
78     unsigned op1 = currentInstruction[1].u.operand;
79     unsigned op2 = currentInstruction[2].u.operand;
80     unsigned target = currentInstruction[3].u.operand;
81
82     emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual);
83 }
84
85 void JIT::emit_op_jnless(Instruction* currentInstruction)
86 {
87     unsigned op1 = currentInstruction[1].u.operand;
88     unsigned op2 = currentInstruction[2].u.operand;
89     unsigned target = currentInstruction[3].u.operand;
90
91     emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual);
92 }
93
94 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
95 {
96     unsigned op1 = currentInstruction[1].u.operand;
97     unsigned op2 = currentInstruction[2].u.operand;
98     unsigned target = currentInstruction[3].u.operand;
99
100     emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan);
101 }
102
103 void JIT::emit_op_jngreater(Instruction* currentInstruction)
104 {
105     unsigned op1 = currentInstruction[1].u.operand;
106     unsigned op2 = currentInstruction[2].u.operand;
107     unsigned target = currentInstruction[3].u.operand;
108
109     emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual);
110 }
111
112 void JIT::emit_op_jngreatereq(Instruction* currentInstruction)
113 {
114     unsigned op1 = currentInstruction[1].u.operand;
115     unsigned op2 = currentInstruction[2].u.operand;
116     unsigned target = currentInstruction[3].u.operand;
117
118     emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan);
119 }
120
121 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
122 {
123     unsigned op1 = currentInstruction[1].u.operand;
124     unsigned op2 = currentInstruction[2].u.operand;
125     unsigned target = currentInstruction[3].u.operand;
126
127     emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, cti_op_jless, false, iter);
128 }
129
130 void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
131 {
132     unsigned op1 = currentInstruction[1].u.operand;
133     unsigned op2 = currentInstruction[2].u.operand;
134     unsigned target = currentInstruction[3].u.operand;
135
136     emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, cti_op_jlesseq, false, iter);
137 }
138
139 void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
140 {
141     unsigned op1 = currentInstruction[1].u.operand;
142     unsigned op2 = currentInstruction[2].u.operand;
143     unsigned target = currentInstruction[3].u.operand;
144
145     emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, cti_op_jgreater, false, iter);
146 }
147
148 void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
149 {
150     unsigned op1 = currentInstruction[1].u.operand;
151     unsigned op2 = currentInstruction[2].u.operand;
152     unsigned target = currentInstruction[3].u.operand;
153
154     emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, cti_op_jgreatereq, false, iter);
155 }
156
157 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
158 {
159     unsigned op1 = currentInstruction[1].u.operand;
160     unsigned op2 = currentInstruction[2].u.operand;
161     unsigned target = currentInstruction[3].u.operand;
162
163     emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, cti_op_jless, true, iter);
164 }
165
166 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
167 {
168     unsigned op1 = currentInstruction[1].u.operand;
169     unsigned op2 = currentInstruction[2].u.operand;
170     unsigned target = currentInstruction[3].u.operand;
171
172     emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, cti_op_jlesseq, true, iter);
173 }
174
175 void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
176 {
177     unsigned op1 = currentInstruction[1].u.operand;
178     unsigned op2 = currentInstruction[2].u.operand;
179     unsigned target = currentInstruction[3].u.operand;
180
181     emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, cti_op_jgreater, true, iter);
182 }
183
184 void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
185 {
186     unsigned op1 = currentInstruction[1].u.operand;
187     unsigned op2 = currentInstruction[2].u.operand;
188     unsigned target = currentInstruction[3].u.operand;
189
190     emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, cti_op_jgreatereq, true, iter);
191 }
192
193 #if USE(JSVALUE64)
194
195 void JIT::emit_op_negate(Instruction* currentInstruction)
196 {
197     unsigned dst = currentInstruction[1].u.operand;
198     unsigned src = currentInstruction[2].u.operand;
199
200     emitGetVirtualRegister(src, regT0);
201
202     Jump srcNotInt = emitJumpIfNotImmediateInteger(regT0);
203     addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
204     neg32(regT0);
205     emitFastArithReTagImmediate(regT0, regT0);
206
207     Jump end = jump();
208
209     srcNotInt.link(this);
210     emitJumpSlowCaseIfNotImmediateNumber(regT0);
211
212     move(TrustedImmPtr(reinterpret_cast<void*>(0x8000000000000000ull)), regT1);
213     xorPtr(regT1, regT0);
214
215     end.link(this);
216     emitPutVirtualRegister(dst);
217 }
218
219 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
220 {
221     unsigned dst = currentInstruction[1].u.operand;
222
223     linkSlowCase(iter); // 0x7fffffff check
224     linkSlowCase(iter); // double check
225
226     JITStubCall stubCall(this, cti_op_negate);
227     stubCall.addArgument(regT1, regT0);
228     stubCall.call(dst);
229 }
230
231 void JIT::emit_op_lshift(Instruction* currentInstruction)
232 {
233     unsigned result = currentInstruction[1].u.operand;
234     unsigned op1 = currentInstruction[2].u.operand;
235     unsigned op2 = currentInstruction[3].u.operand;
236
237     emitGetVirtualRegisters(op1, regT0, op2, regT2);
238     // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
239     emitJumpSlowCaseIfNotImmediateInteger(regT0);
240     emitJumpSlowCaseIfNotImmediateInteger(regT2);
241     emitFastArithImmToInt(regT0);
242     emitFastArithImmToInt(regT2);
243     lshift32(regT2, regT0);
244     emitFastArithReTagImmediate(regT0, regT0);
245     emitPutVirtualRegister(result);
246 }
247
248 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
249 {
250     unsigned result = currentInstruction[1].u.operand;
251     unsigned op1 = currentInstruction[2].u.operand;
252     unsigned op2 = currentInstruction[3].u.operand;
253
254     UNUSED_PARAM(op1);
255     UNUSED_PARAM(op2);
256     linkSlowCase(iter);
257     linkSlowCase(iter);
258     JITStubCall stubCall(this, cti_op_lshift);
259     stubCall.addArgument(regT0);
260     stubCall.addArgument(regT2);
261     stubCall.call(result);
262 }
263
264 void JIT::emit_op_rshift(Instruction* currentInstruction)
265 {
266     unsigned result = currentInstruction[1].u.operand;
267     unsigned op1 = currentInstruction[2].u.operand;
268     unsigned op2 = currentInstruction[3].u.operand;
269
270     if (isOperandConstantImmediateInt(op2)) {
271         // isOperandConstantImmediateInt(op2) => 1 SlowCase
272         emitGetVirtualRegister(op1, regT0);
273         emitJumpSlowCaseIfNotImmediateInteger(regT0);
274         // Mask with 0x1f as per ecma-262 11.7.2 step 7.
275         rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
276     } else {
277         emitGetVirtualRegisters(op1, regT0, op2, regT2);
278         if (supportsFloatingPointTruncate()) {
279             Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
280             // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
281             addSlowCase(emitJumpIfNotImmediateNumber(regT0));
282             addPtr(tagTypeNumberRegister, regT0);
283             movePtrToDouble(regT0, fpRegT0);
284             addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
285             lhsIsInt.link(this);
286             emitJumpSlowCaseIfNotImmediateInteger(regT2);
287         } else {
288             // !supportsFloatingPoint() => 2 SlowCases
289             emitJumpSlowCaseIfNotImmediateInteger(regT0);
290             emitJumpSlowCaseIfNotImmediateInteger(regT2);
291         }
292         emitFastArithImmToInt(regT2);
293         rshift32(regT2, regT0);
294     }
295     emitFastArithIntToImmNoCheck(regT0, regT0);
296     emitPutVirtualRegister(result);
297 }
298
299 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
300 {
301     unsigned result = currentInstruction[1].u.operand;
302     unsigned op1 = currentInstruction[2].u.operand;
303     unsigned op2 = currentInstruction[3].u.operand;
304
305     JITStubCall stubCall(this, cti_op_rshift);
306
307     if (isOperandConstantImmediateInt(op2)) {
308         linkSlowCase(iter);
309         stubCall.addArgument(regT0);
310         stubCall.addArgument(op2, regT2);
311     } else {
312         if (supportsFloatingPointTruncate()) {
313             linkSlowCase(iter);
314             linkSlowCase(iter);
315             linkSlowCase(iter);
316             // We're reloading op1 to regT0 as we can no longer guarantee that
317             // we have not munged the operand.  It may have already been shifted
318             // correctly, but it still will not have been tagged.
319             stubCall.addArgument(op1, regT0);
320             stubCall.addArgument(regT2);
321         } else {
322             linkSlowCase(iter);
323             linkSlowCase(iter);
324             stubCall.addArgument(regT0);
325             stubCall.addArgument(regT2);
326         }
327     }
328
329     stubCall.call(result);
330 }
331
332 void JIT::emit_op_urshift(Instruction* currentInstruction)
333 {
334     unsigned dst = currentInstruction[1].u.operand;
335     unsigned op1 = currentInstruction[2].u.operand;
336     unsigned op2 = currentInstruction[3].u.operand;
337
338     // Slow case of urshift makes assumptions about what registers hold the
339     // shift arguments, so any changes must be updated there as well.
340     if (isOperandConstantImmediateInt(op2)) {
341         emitGetVirtualRegister(op1, regT0);
342         emitJumpSlowCaseIfNotImmediateInteger(regT0);
343         emitFastArithImmToInt(regT0);
344         int shift = getConstantOperand(op2).asInt32();
345         if (shift)
346             urshift32(Imm32(shift & 0x1f), regT0);
347         // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
348         // a toUint conversion, which can result in a value we can represent
349         // as an immediate int.
350         if (shift < 0 || !(shift & 31))
351             addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
352         emitFastArithReTagImmediate(regT0, regT0);
353         emitPutVirtualRegister(dst, regT0);
354         return;
355     }
356     emitGetVirtualRegisters(op1, regT0, op2, regT1);
357     if (!isOperandConstantImmediateInt(op1))
358         emitJumpSlowCaseIfNotImmediateInteger(regT0);
359     emitJumpSlowCaseIfNotImmediateInteger(regT1);
360     emitFastArithImmToInt(regT0);
361     emitFastArithImmToInt(regT1);
362     urshift32(regT1, regT0);
363     addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
364     emitFastArithReTagImmediate(regT0, regT0);
365     emitPutVirtualRegister(dst, regT0);
366 }
367
368 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
369 {
370     unsigned dst = currentInstruction[1].u.operand;
371     unsigned op1 = currentInstruction[2].u.operand;
372     unsigned op2 = currentInstruction[3].u.operand;
373     if (isOperandConstantImmediateInt(op2)) {
374         int shift = getConstantOperand(op2).asInt32();
375         // op1 = regT0
376         linkSlowCase(iter); // int32 check
377         if (supportsFloatingPointTruncate()) {
378             JumpList failures;
379             failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
380             addPtr(tagTypeNumberRegister, regT0);
381             movePtrToDouble(regT0, fpRegT0);
382             failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
383             if (shift)
384                 urshift32(Imm32(shift & 0x1f), regT0);
385             if (shift < 0 || !(shift & 31))
386                 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
387             emitFastArithReTagImmediate(regT0, regT0);
388             emitPutVirtualRegister(dst, regT0);
389             emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
390             failures.link(this);
391         }
392         if (shift < 0 || !(shift & 31))
393             linkSlowCase(iter); // failed to box in hot path
394     } else {
395         // op1 = regT0
396         // op2 = regT1
397         if (!isOperandConstantImmediateInt(op1)) {
398             linkSlowCase(iter); // int32 check -- op1 is not an int
399             if (supportsFloatingPointTruncate()) {
400                 JumpList failures;
401                 failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
402                 addPtr(tagTypeNumberRegister, regT0);
403                 movePtrToDouble(regT0, fpRegT0);
404                 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
405                 failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
406                 emitFastArithImmToInt(regT1);
407                 urshift32(regT1, regT0);
408                 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
409                 emitFastArithReTagImmediate(regT0, regT0);
410                 emitPutVirtualRegister(dst, regT0);
411                 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
412                 failures.link(this);
413             }
414         }
415         
416         linkSlowCase(iter); // int32 check - op2 is not an int
417         linkSlowCase(iter); // Can't represent unsigned result as an immediate
418     }
419     
420     JITStubCall stubCall(this, cti_op_urshift);
421     stubCall.addArgument(op1, regT0);
422     stubCall.addArgument(op2, regT1);
423     stubCall.call(dst);
424 }
425
426 void JIT::emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
427 {
428     // We generate inline code for the following cases in the fast path:
429     // - int immediate to constant int immediate
430     // - constant int immediate to int immediate
431     // - int immediate to int immediate
432
433     if (isOperandConstantImmediateChar(op1)) {
434         emitGetVirtualRegister(op2, regT0);
435         addSlowCase(emitJumpIfNotJSCell(regT0));
436         JumpList failures;
437         emitLoadCharacterString(regT0, regT0, failures);
438         addSlowCase(failures);
439         addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
440         return;
441     }
442     if (isOperandConstantImmediateChar(op2)) {
443         emitGetVirtualRegister(op1, regT0);
444         addSlowCase(emitJumpIfNotJSCell(regT0));
445         JumpList failures;
446         emitLoadCharacterString(regT0, regT0, failures);
447         addSlowCase(failures);
448         addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
449         return;
450     }
451     if (isOperandConstantImmediateInt(op2)) {
452         emitGetVirtualRegister(op1, regT0);
453         emitJumpSlowCaseIfNotImmediateInteger(regT0);
454         int32_t op2imm = getConstantOperandImmediateInt(op2);
455         addJump(branch32(condition, regT0, Imm32(op2imm)), target);
456     } else if (isOperandConstantImmediateInt(op1)) {
457         emitGetVirtualRegister(op2, regT1);
458         emitJumpSlowCaseIfNotImmediateInteger(regT1);
459         int32_t op1imm = getConstantOperandImmediateInt(op1);
460         addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
461     } else {
462         emitGetVirtualRegisters(op1, regT0, op2, regT1);
463         emitJumpSlowCaseIfNotImmediateInteger(regT0);
464         emitJumpSlowCaseIfNotImmediateInteger(regT1);
465
466         addJump(branch32(condition, regT0, regT1), target);
467     }
468 }
469
470 void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition condition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
471 {
472     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless);
473     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless);
474     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnlesseq), OPCODE_LENGTH_op_jnlesseq_equals_op_jless);
475     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreater), OPCODE_LENGTH_op_jgreater_equals_op_jless);
476     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreatereq), OPCODE_LENGTH_op_jgreatereq_equals_op_jless);
477     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreater), OPCODE_LENGTH_op_jngreater_equals_op_jless);
478     COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreatereq), OPCODE_LENGTH_op_jngreatereq_equals_op_jless);
479     
480     // We generate inline code for the following cases in the slow path:
481     // - floating-point number to constant int immediate
482     // - constant int immediate to floating-point number
483     // - floating-point number to floating-point number.
484     if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
485         linkSlowCase(iter);
486         linkSlowCase(iter);
487         linkSlowCase(iter);
488         linkSlowCase(iter);
489         JITStubCall stubCall(this, stub);
490         stubCall.addArgument(op1, regT0);
491         stubCall.addArgument(op2, regT1);
492         stubCall.call();
493         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
494         return;
495     }
496
497     if (isOperandConstantImmediateInt(op2)) {
498         linkSlowCase(iter);
499
500         if (supportsFloatingPoint()) {
501             Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
502             addPtr(tagTypeNumberRegister, regT0);
503             movePtrToDouble(regT0, fpRegT0);
504
505             int32_t op2imm = getConstantOperand(op2).asInt32();
506
507             move(Imm32(op2imm), regT1);
508             convertInt32ToDouble(regT1, fpRegT1);
509
510             emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
511
512             emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
513
514             fail1.link(this);
515         }
516
517         JITStubCall stubCall(this, stub);
518         stubCall.addArgument(regT0);
519         stubCall.addArgument(op2, regT2);
520         stubCall.call();
521         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
522
523     } else if (isOperandConstantImmediateInt(op1)) {
524         linkSlowCase(iter);
525
526         if (supportsFloatingPoint()) {
527             Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
528             addPtr(tagTypeNumberRegister, regT1);
529             movePtrToDouble(regT1, fpRegT1);
530
531             int32_t op1imm = getConstantOperand(op1).asInt32();
532
533             move(Imm32(op1imm), regT0);
534             convertInt32ToDouble(regT0, fpRegT0);
535
536             emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
537
538             emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
539
540             fail1.link(this);
541         }
542
543         JITStubCall stubCall(this, stub);
544         stubCall.addArgument(op1, regT2);
545         stubCall.addArgument(regT1);
546         stubCall.call();
547         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
548     } else {
549         linkSlowCase(iter);
550
551         if (supportsFloatingPoint()) {
552             Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
553             Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
554             Jump fail3 = emitJumpIfImmediateInteger(regT1);
555             addPtr(tagTypeNumberRegister, regT0);
556             addPtr(tagTypeNumberRegister, regT1);
557             movePtrToDouble(regT0, fpRegT0);
558             movePtrToDouble(regT1, fpRegT1);
559
560             emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
561
562             emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
563
564             fail1.link(this);
565             fail2.link(this);
566             fail3.link(this);
567         }
568
569         linkSlowCase(iter);
570         JITStubCall stubCall(this, stub);
571         stubCall.addArgument(regT0);
572         stubCall.addArgument(regT1);
573         stubCall.call();
574         emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
575     }
576 }
577
578 void JIT::emit_op_bitand(Instruction* currentInstruction)
579 {
580     unsigned result = currentInstruction[1].u.operand;
581     unsigned op1 = currentInstruction[2].u.operand;
582     unsigned op2 = currentInstruction[3].u.operand;
583
584     if (isOperandConstantImmediateInt(op1)) {
585         emitGetVirtualRegister(op2, regT0);
586         emitJumpSlowCaseIfNotImmediateInteger(regT0);
587         int32_t imm = getConstantOperandImmediateInt(op1);
588         andPtr(Imm32(imm), regT0);
589         if (imm >= 0)
590             emitFastArithIntToImmNoCheck(regT0, regT0);
591     } else if (isOperandConstantImmediateInt(op2)) {
592         emitGetVirtualRegister(op1, regT0);
593         emitJumpSlowCaseIfNotImmediateInteger(regT0);
594         int32_t imm = getConstantOperandImmediateInt(op2);
595         andPtr(Imm32(imm), regT0);
596         if (imm >= 0)
597             emitFastArithIntToImmNoCheck(regT0, regT0);
598     } else {
599         emitGetVirtualRegisters(op1, regT0, op2, regT1);
600         andPtr(regT1, regT0);
601         emitJumpSlowCaseIfNotImmediateInteger(regT0);
602     }
603     emitPutVirtualRegister(result);
604 }
605
606 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
607 {
608     unsigned result = currentInstruction[1].u.operand;
609     unsigned op1 = currentInstruction[2].u.operand;
610     unsigned op2 = currentInstruction[3].u.operand;
611
612     linkSlowCase(iter);
613     if (isOperandConstantImmediateInt(op1)) {
614         JITStubCall stubCall(this, cti_op_bitand);
615         stubCall.addArgument(op1, regT2);
616         stubCall.addArgument(regT0);
617         stubCall.call(result);
618     } else if (isOperandConstantImmediateInt(op2)) {
619         JITStubCall stubCall(this, cti_op_bitand);
620         stubCall.addArgument(regT0);
621         stubCall.addArgument(op2, regT2);
622         stubCall.call(result);
623     } else {
624         JITStubCall stubCall(this, cti_op_bitand);
625         stubCall.addArgument(op1, regT2);
626         stubCall.addArgument(regT1);
627         stubCall.call(result);
628     }
629 }
630
631 void JIT::emit_op_post_inc(Instruction* currentInstruction)
632 {
633     unsigned result = currentInstruction[1].u.operand;
634     unsigned srcDst = currentInstruction[2].u.operand;
635
636     emitGetVirtualRegister(srcDst, regT0);
637     move(regT0, regT1);
638     emitJumpSlowCaseIfNotImmediateInteger(regT0);
639     addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT1));
640     emitFastArithIntToImmNoCheck(regT1, regT1);
641     emitPutVirtualRegister(srcDst, regT1);
642     emitPutVirtualRegister(result);
643     if (canBeOptimizedOrInlined())
644         killLastResultRegister();
645 }
646
647 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
648 {
649     unsigned result = currentInstruction[1].u.operand;
650     unsigned srcDst = currentInstruction[2].u.operand;
651
652     linkSlowCase(iter);
653     linkSlowCase(iter);
654     JITStubCall stubCall(this, cti_op_post_inc);
655     stubCall.addArgument(regT0);
656     stubCall.addArgument(Imm32(srcDst));
657     stubCall.call(result);
658 }
659
660 void JIT::emit_op_post_dec(Instruction* currentInstruction)
661 {
662     unsigned result = currentInstruction[1].u.operand;
663     unsigned srcDst = currentInstruction[2].u.operand;
664
665     emitGetVirtualRegister(srcDst, regT0);
666     move(regT0, regT1);
667     emitJumpSlowCaseIfNotImmediateInteger(regT0);
668     addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT1));
669     emitFastArithIntToImmNoCheck(regT1, regT1);
670     emitPutVirtualRegister(srcDst, regT1);
671     emitPutVirtualRegister(result);
672     if (canBeOptimizedOrInlined())
673         killLastResultRegister();
674 }
675
676 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
677 {
678     unsigned result = currentInstruction[1].u.operand;
679     unsigned srcDst = currentInstruction[2].u.operand;
680
681     linkSlowCase(iter);
682     linkSlowCase(iter);
683     JITStubCall stubCall(this, cti_op_post_dec);
684     stubCall.addArgument(regT0);
685     stubCall.addArgument(Imm32(srcDst));
686     stubCall.call(result);
687 }
688
689 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
690 {
691     unsigned srcDst = currentInstruction[1].u.operand;
692
693     emitGetVirtualRegister(srcDst, regT0);
694     emitJumpSlowCaseIfNotImmediateInteger(regT0);
695     addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
696     emitFastArithIntToImmNoCheck(regT0, regT0);
697     emitPutVirtualRegister(srcDst);
698 }
699
700 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
701 {
702     unsigned srcDst = currentInstruction[1].u.operand;
703
704     Jump notImm = getSlowCase(iter);
705     linkSlowCase(iter);
706     emitGetVirtualRegister(srcDst, regT0);
707     notImm.link(this);
708     JITStubCall stubCall(this, cti_op_pre_inc);
709     stubCall.addArgument(regT0);
710     stubCall.call(srcDst);
711 }
712
713 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
714 {
715     unsigned srcDst = currentInstruction[1].u.operand;
716
717     emitGetVirtualRegister(srcDst, regT0);
718     emitJumpSlowCaseIfNotImmediateInteger(regT0);
719     addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
720     emitFastArithIntToImmNoCheck(regT0, regT0);
721     emitPutVirtualRegister(srcDst);
722 }
723
724 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
725 {
726     unsigned srcDst = currentInstruction[1].u.operand;
727
728     Jump notImm = getSlowCase(iter);
729     linkSlowCase(iter);
730     emitGetVirtualRegister(srcDst, regT0);
731     notImm.link(this);
732     JITStubCall stubCall(this, cti_op_pre_dec);
733     stubCall.addArgument(regT0);
734     stubCall.call(srcDst);
735 }
736
737 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
738
739 #if CPU(X86) || CPU(X86_64)
740
741 void JIT::emit_op_mod(Instruction* currentInstruction)
742 {
743     unsigned result = currentInstruction[1].u.operand;
744     unsigned op1 = currentInstruction[2].u.operand;
745     unsigned op2 = currentInstruction[3].u.operand;
746
747     // Make sure registers are correct for x86 IDIV instructions.
748     ASSERT(regT0 == X86Registers::eax);
749     ASSERT(regT1 == X86Registers::edx);
750     ASSERT(regT2 == X86Registers::ecx);
751
752     emitGetVirtualRegisters(op1, regT3, op2, regT2);
753     emitJumpSlowCaseIfNotImmediateInteger(regT3);
754     emitJumpSlowCaseIfNotImmediateInteger(regT2);
755
756     move(regT3, regT0);
757     addSlowCase(branchTest32(Zero, regT2));
758     Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
759     addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
760     denominatorNotNeg1.link(this);
761     m_assembler.cdq();
762     m_assembler.idivl_r(regT2);
763     Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
764     addSlowCase(branchTest32(Zero, regT1));
765     numeratorPositive.link(this);
766     emitFastArithReTagImmediate(regT1, regT0);
767     emitPutVirtualRegister(result);
768 }
769
770 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
771 {
772     unsigned result = currentInstruction[1].u.operand;
773
774     linkSlowCase(iter);
775     linkSlowCase(iter);
776     linkSlowCase(iter);
777     linkSlowCase(iter);
778     linkSlowCase(iter);
779     JITStubCall stubCall(this, cti_op_mod);
780     stubCall.addArgument(regT3);
781     stubCall.addArgument(regT2);
782     stubCall.call(result);
783 }
784
785 #else // CPU(X86) || CPU(X86_64)
786
787 void JIT::emit_op_mod(Instruction* currentInstruction)
788 {
789     unsigned result = currentInstruction[1].u.operand;
790     unsigned op1 = currentInstruction[2].u.operand;
791     unsigned op2 = currentInstruction[3].u.operand;
792
793     JITStubCall stubCall(this, cti_op_mod);
794     stubCall.addArgument(op1, regT2);
795     stubCall.addArgument(op2, regT2);
796     stubCall.call(result);
797 }
798
799 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
800 {
801     ASSERT_NOT_REACHED();
802 }
803
804 #endif // CPU(X86) || CPU(X86_64)
805
806 /* ------------------------------ END: OP_MOD ------------------------------ */
807
808 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
809
810 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
811 {
812     emitGetVirtualRegisters(op1, regT0, op2, regT1);
813     emitJumpSlowCaseIfNotImmediateInteger(regT0);
814     emitJumpSlowCaseIfNotImmediateInteger(regT1);
815 #if ENABLE(VALUE_PROFILER)
816     RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
817 #endif
818     if (opcodeID == op_add)
819         addSlowCase(branchAdd32(Overflow, regT1, regT0));
820     else if (opcodeID == op_sub)
821         addSlowCase(branchSub32(Overflow, regT1, regT0));
822     else {
823         ASSERT(opcodeID == op_mul);
824 #if ENABLE(VALUE_PROFILER)
825         if (shouldEmitProfiling()) {
826             // We want to be able to measure if this is taking the slow case just
827             // because of negative zero. If this produces positive zero, then we
828             // don't want the slow case to be taken because that will throw off
829             // speculative compilation.
830             move(regT0, regT2);
831             addSlowCase(branchMul32(Overflow, regT1, regT2));
832             JumpList done;
833             done.append(branchTest32(NonZero, regT2));
834             Jump negativeZero = branch32(LessThan, regT0, TrustedImm32(0));
835             done.append(branch32(GreaterThanOrEqual, regT1, TrustedImm32(0)));
836             negativeZero.link(this);
837             // We only get here if we have a genuine negative zero. Record this,
838             // so that the speculative JIT knows that we failed speculation
839             // because of a negative zero.
840             add32(TrustedImm32(1), AbsoluteAddress(&profile->m_counter));
841             addSlowCase(jump());
842             done.link(this);
843             move(regT2, regT0);
844         } else {
845             addSlowCase(branchMul32(Overflow, regT1, regT0));
846             addSlowCase(branchTest32(Zero, regT0));
847         }
848 #else
849         addSlowCase(branchMul32(Overflow, regT1, regT0));
850         addSlowCase(branchTest32(Zero, regT0));
851 #endif
852     }
853     emitFastArithIntToImmNoCheck(regT0, regT0);
854 }
855
856 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
857 {
858     // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
859     COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
860
861     Jump notImm1;
862     Jump notImm2;
863     if (op1HasImmediateIntFastCase) {
864         notImm2 = getSlowCase(iter);
865     } else if (op2HasImmediateIntFastCase) {
866         notImm1 = getSlowCase(iter);
867     } else {
868         notImm1 = getSlowCase(iter);
869         notImm2 = getSlowCase(iter);
870     }
871
872     linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
873     if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
874         linkSlowCase(iter);
875     emitGetVirtualRegister(op1, regT0);
876
877     Label stubFunctionCall(this);
878     JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
879     if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
880         emitGetVirtualRegister(op1, regT0);
881         emitGetVirtualRegister(op2, regT1);
882     }
883     stubCall.addArgument(regT0);
884     stubCall.addArgument(regT1);
885     stubCall.call(result);
886     Jump end = jump();
887
888     if (op1HasImmediateIntFastCase) {
889         notImm2.link(this);
890         if (!types.second().definitelyIsNumber())
891             emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
892         emitGetVirtualRegister(op1, regT1);
893         convertInt32ToDouble(regT1, fpRegT1);
894         addPtr(tagTypeNumberRegister, regT0);
895         movePtrToDouble(regT0, fpRegT2);
896     } else if (op2HasImmediateIntFastCase) {
897         notImm1.link(this);
898         if (!types.first().definitelyIsNumber())
899             emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
900         emitGetVirtualRegister(op2, regT1);
901         convertInt32ToDouble(regT1, fpRegT1);
902         addPtr(tagTypeNumberRegister, regT0);
903         movePtrToDouble(regT0, fpRegT2);
904     } else {
905         // if we get here, eax is not an int32, edx not yet checked.
906         notImm1.link(this);
907         if (!types.first().definitelyIsNumber())
908             emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
909         if (!types.second().definitelyIsNumber())
910             emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
911         addPtr(tagTypeNumberRegister, regT0);
912         movePtrToDouble(regT0, fpRegT1);
913         Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
914         convertInt32ToDouble(regT1, fpRegT2);
915         Jump op2wasInteger = jump();
916
917         // if we get here, eax IS an int32, edx is not.
918         notImm2.link(this);
919         if (!types.second().definitelyIsNumber())
920             emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
921         convertInt32ToDouble(regT0, fpRegT1);
922         op2isDouble.link(this);
923         addPtr(tagTypeNumberRegister, regT1);
924         movePtrToDouble(regT1, fpRegT2);
925         op2wasInteger.link(this);
926     }
927
928     if (opcodeID == op_add)
929         addDouble(fpRegT2, fpRegT1);
930     else if (opcodeID == op_sub)
931         subDouble(fpRegT2, fpRegT1);
932     else if (opcodeID == op_mul)
933         mulDouble(fpRegT2, fpRegT1);
934     else {
935         ASSERT(opcodeID == op_div);
936         divDouble(fpRegT2, fpRegT1);
937     }
938     moveDoubleToPtr(fpRegT1, regT0);
939     subPtr(tagTypeNumberRegister, regT0);
940     emitPutVirtualRegister(result, regT0);
941
942     end.link(this);
943 }
944
945 void JIT::emit_op_add(Instruction* currentInstruction)
946 {
947     unsigned result = currentInstruction[1].u.operand;
948     unsigned op1 = currentInstruction[2].u.operand;
949     unsigned op2 = currentInstruction[3].u.operand;
950     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
951
952     if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
953         addSlowCase();
954         JITStubCall stubCall(this, cti_op_add);
955         stubCall.addArgument(op1, regT2);
956         stubCall.addArgument(op2, regT2);
957         stubCall.call(result);
958         return;
959     }
960
961     if (isOperandConstantImmediateInt(op1)) {
962         emitGetVirtualRegister(op2, regT0);
963         emitJumpSlowCaseIfNotImmediateInteger(regT0);
964         addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op1)), regT1));
965         emitFastArithIntToImmNoCheck(regT1, regT0);
966     } else if (isOperandConstantImmediateInt(op2)) {
967         emitGetVirtualRegister(op1, regT0);
968         emitJumpSlowCaseIfNotImmediateInteger(regT0);
969         addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op2)), regT1));
970         emitFastArithIntToImmNoCheck(regT1, regT0);
971     } else
972         compileBinaryArithOp(op_add, result, op1, op2, types);
973
974     emitPutVirtualRegister(result);
975 }
976
977 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
978 {
979     unsigned result = currentInstruction[1].u.operand;
980     unsigned op1 = currentInstruction[2].u.operand;
981     unsigned op2 = currentInstruction[3].u.operand;
982     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
983
984     if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
985         linkDummySlowCase(iter);
986         return;
987     }
988
989     bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
990     bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
991     compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
992 }
993
994 void JIT::emit_op_mul(Instruction* currentInstruction)
995 {
996     unsigned result = currentInstruction[1].u.operand;
997     unsigned op1 = currentInstruction[2].u.operand;
998     unsigned op2 = currentInstruction[3].u.operand;
999     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1000
1001     // For now, only plant a fast int case if the constant operand is greater than zero.
1002     int32_t value;
1003     if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
1004 #if ENABLE(VALUE_PROFILER)
1005         // Add a special fast case profile because the DFG JIT will expect one.
1006         m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1007 #endif
1008         emitGetVirtualRegister(op2, regT0);
1009         emitJumpSlowCaseIfNotImmediateInteger(regT0);
1010         addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
1011         emitFastArithReTagImmediate(regT1, regT0);
1012     } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
1013 #if ENABLE(VALUE_PROFILER)
1014         // Add a special fast case profile because the DFG JIT will expect one.
1015         m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1016 #endif
1017         emitGetVirtualRegister(op1, regT0);
1018         emitJumpSlowCaseIfNotImmediateInteger(regT0);
1019         addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
1020         emitFastArithReTagImmediate(regT1, regT0);
1021     } else
1022         compileBinaryArithOp(op_mul, result, op1, op2, types);
1023
1024     emitPutVirtualRegister(result);
1025 }
1026
1027 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1028 {
1029     unsigned result = currentInstruction[1].u.operand;
1030     unsigned op1 = currentInstruction[2].u.operand;
1031     unsigned op2 = currentInstruction[3].u.operand;
1032     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1033
1034     bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
1035     bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
1036     compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
1037 }
1038
1039 void JIT::emit_op_div(Instruction* currentInstruction)
1040 {
1041     unsigned dst = currentInstruction[1].u.operand;
1042     unsigned op1 = currentInstruction[2].u.operand;
1043     unsigned op2 = currentInstruction[3].u.operand;
1044     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1045
1046     if (isOperandConstantImmediateDouble(op1)) {
1047         emitGetVirtualRegister(op1, regT0);
1048         addPtr(tagTypeNumberRegister, regT0);
1049         movePtrToDouble(regT0, fpRegT0);
1050     } else if (isOperandConstantImmediateInt(op1)) {
1051         emitLoadInt32ToDouble(op1, fpRegT0);
1052     } else {
1053         emitGetVirtualRegister(op1, regT0);
1054         if (!types.first().definitelyIsNumber())
1055             emitJumpSlowCaseIfNotImmediateNumber(regT0);
1056         Jump notInt = emitJumpIfNotImmediateInteger(regT0);
1057         convertInt32ToDouble(regT0, fpRegT0);
1058         Jump skipDoubleLoad = jump();
1059         notInt.link(this);
1060         addPtr(tagTypeNumberRegister, regT0);
1061         movePtrToDouble(regT0, fpRegT0);
1062         skipDoubleLoad.link(this);
1063     }
1064
1065     if (isOperandConstantImmediateDouble(op2)) {
1066         emitGetVirtualRegister(op2, regT1);
1067         addPtr(tagTypeNumberRegister, regT1);
1068         movePtrToDouble(regT1, fpRegT1);
1069     } else if (isOperandConstantImmediateInt(op2)) {
1070         emitLoadInt32ToDouble(op2, fpRegT1);
1071     } else {
1072         emitGetVirtualRegister(op2, regT1);
1073         if (!types.second().definitelyIsNumber())
1074             emitJumpSlowCaseIfNotImmediateNumber(regT1);
1075         Jump notInt = emitJumpIfNotImmediateInteger(regT1);
1076         convertInt32ToDouble(regT1, fpRegT1);
1077         Jump skipDoubleLoad = jump();
1078         notInt.link(this);
1079         addPtr(tagTypeNumberRegister, regT1);
1080         movePtrToDouble(regT1, fpRegT1);
1081         skipDoubleLoad.link(this);
1082     }
1083     divDouble(fpRegT1, fpRegT0);
1084     
1085 #if ENABLE(VALUE_PROFILER)
1086     // Is the result actually an integer? The DFG JIT would really like to know. If it's
1087     // not an integer, we increment a count. If this together with the slow case counter
1088     // are below threshold then the DFG JIT will compile this division with a specualtion
1089     // that the remainder is zero.
1090     
1091     // As well, there are cases where a double result here would cause an important field
1092     // in the heap to sometimes have doubles in it, resulting in double predictions getting
1093     // propagated to a use site where it might cause damage (such as the index to an array
1094     // access). So if we are DFG compiling anything in the program, we want this code to
1095     // ensure that it produces integers whenever possible.
1096     
1097     // FIXME: This will fail to convert to integer if the result is zero. We should
1098     // distinguish between positive zero and negative zero here.
1099     
1100     JumpList notInteger;
1101     branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1);
1102     // If we've got an integer, we might as well make that the result of the division.
1103     emitFastArithReTagImmediate(regT0, regT0);
1104     Jump isInteger = jump();
1105     notInteger.link(this);
1106     add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
1107     moveDoubleToPtr(fpRegT0, regT0);
1108     subPtr(tagTypeNumberRegister, regT0);
1109     isInteger.link(this);
1110 #else
1111     // Double result.
1112     moveDoubleToPtr(fpRegT0, regT0);
1113     subPtr(tagTypeNumberRegister, regT0);
1114 #endif
1115
1116     emitPutVirtualRegister(dst, regT0);
1117 }
1118
1119 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1120 {
1121     unsigned result = currentInstruction[1].u.operand;
1122     unsigned op1 = currentInstruction[2].u.operand;
1123     unsigned op2 = currentInstruction[3].u.operand;
1124     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1125     if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
1126 #ifndef NDEBUG
1127         breakpoint();
1128 #endif
1129         return;
1130     }
1131     if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
1132         if (!types.first().definitelyIsNumber())
1133             linkSlowCase(iter);
1134     }
1135     if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
1136         if (!types.second().definitelyIsNumber())
1137             linkSlowCase(iter);
1138     }
1139     // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1140     JITStubCall stubCall(this, cti_op_div);
1141     stubCall.addArgument(op1, regT2);
1142     stubCall.addArgument(op2, regT2);
1143     stubCall.call(result);
1144 }
1145
1146 void JIT::emit_op_sub(Instruction* currentInstruction)
1147 {
1148     unsigned result = currentInstruction[1].u.operand;
1149     unsigned op1 = currentInstruction[2].u.operand;
1150     unsigned op2 = currentInstruction[3].u.operand;
1151     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1152
1153     compileBinaryArithOp(op_sub, result, op1, op2, types);
1154     emitPutVirtualRegister(result);
1155 }
1156
1157 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1158 {
1159     unsigned result = currentInstruction[1].u.operand;
1160     unsigned op1 = currentInstruction[2].u.operand;
1161     unsigned op2 = currentInstruction[3].u.operand;
1162     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1163
1164     compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
1165 }
1166
1167 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
1168
1169 #endif // USE(JSVALUE64)
1170
1171 } // namespace JSC
1172
1173 #endif // ENABLE(JIT)