updating changelog for release
[profile/ivi/webkit-efl.git] / Source / JavaScriptCore / jit / JITArithmetic32_64.cpp
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
35 #include "JITStubs.h"
36 #include "JSArray.h"
37 #include "JSFunction.h"
38 #include "Interpreter.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
41
42 #ifndef NDEBUG
43 #include <stdio.h>
44 #endif
45
46 using namespace std;
47
48 namespace JSC {
49
50 void JIT::emit_op_negate(Instruction* currentInstruction)
51 {
52     unsigned dst = currentInstruction[1].u.operand;
53     unsigned src = currentInstruction[2].u.operand;
54
55     emitLoad(src, regT1, regT0);
56
57     Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
58     addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
59     neg32(regT0);
60     emitStoreInt32(dst, regT0, (dst == src));
61
62     Jump end = jump();
63
64     srcNotInt.link(this);
65     addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
66
67     xor32(TrustedImm32(1 << 31), regT1);
68     store32(regT1, tagFor(dst));
69     if (dst != src)
70         store32(regT0, payloadFor(dst));
71
72     end.link(this);
73 }
74
75 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
76 {
77     unsigned dst = currentInstruction[1].u.operand;
78
79     linkSlowCase(iter); // 0x7fffffff check
80     linkSlowCase(iter); // double check
81
82     JITStubCall stubCall(this, cti_op_negate);
83     stubCall.addArgument(regT1, regT0);
84     stubCall.call(dst);
85 }
86
87 void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
88 {
89     JumpList notInt32Op1;
90     JumpList notInt32Op2;
91
92     // Character less.
93     if (isOperandConstantImmediateChar(op1)) {
94         emitLoad(op2, regT1, regT0);
95         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
96         JumpList failures;
97         emitLoadCharacterString(regT0, regT0, failures);
98         addSlowCase(failures);
99         addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
100         return;
101     }
102     if (isOperandConstantImmediateChar(op2)) {
103         emitLoad(op1, regT1, regT0);
104         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
105         JumpList failures;
106         emitLoadCharacterString(regT0, regT0, failures);
107         addSlowCase(failures);
108         addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
109         return;
110     } 
111     if (isOperandConstantImmediateInt(op1)) {
112         emitLoad(op2, regT3, regT2);
113         notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
114         addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
115     } else if (isOperandConstantImmediateInt(op2)) {
116         emitLoad(op1, regT1, regT0);
117         notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
118         addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
119     } else {
120         emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
121         notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
122         notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
123         addJump(branch32(condition, regT0, regT2), target);
124     }
125
126     if (!supportsFloatingPoint()) {
127         addSlowCase(notInt32Op1);
128         addSlowCase(notInt32Op2);
129         return;
130     }
131     Jump end = jump();
132
133     // Double less.
134     emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
135     end.link(this);
136 }
137
138 void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
139 {
140     if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
141         linkSlowCase(iter);
142         linkSlowCase(iter);
143         linkSlowCase(iter);
144         linkSlowCase(iter);
145     } else {
146         if (!supportsFloatingPoint()) {
147             if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
148                 linkSlowCase(iter); // int32 check
149             linkSlowCase(iter); // int32 check
150         } else {
151             if (!isOperandConstantImmediateInt(op1)) {
152                 linkSlowCase(iter); // double check
153                 linkSlowCase(iter); // int32 check
154             }
155             if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
156                 linkSlowCase(iter); // double check
157         }
158     }
159     JITStubCall stubCall(this, stub);
160     stubCall.addArgument(op1);
161     stubCall.addArgument(op2);
162     stubCall.call();
163     emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
164 }
165
166 // LeftShift (<<)
167
168 void JIT::emit_op_lshift(Instruction* currentInstruction)
169 {
170     unsigned dst = currentInstruction[1].u.operand;
171     unsigned op1 = currentInstruction[2].u.operand;
172     unsigned op2 = currentInstruction[3].u.operand;
173
174     if (isOperandConstantImmediateInt(op2)) {
175         emitLoad(op1, regT1, regT0);
176         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
177         lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
178         emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
179         return;
180     }
181
182     emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
183     if (!isOperandConstantImmediateInt(op1))
184         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
185     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
186     lshift32(regT2, regT0);
187     emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
188 }
189
190 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
191 {
192     unsigned dst = currentInstruction[1].u.operand;
193     unsigned op1 = currentInstruction[2].u.operand;
194     unsigned op2 = currentInstruction[3].u.operand;
195
196     if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
197         linkSlowCase(iter); // int32 check
198     linkSlowCase(iter); // int32 check
199
200     JITStubCall stubCall(this, cti_op_lshift);
201     stubCall.addArgument(op1);
202     stubCall.addArgument(op2);
203     stubCall.call(dst);
204 }
205
206 // RightShift (>>) and UnsignedRightShift (>>>) helper
207
208 void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
209 {
210     unsigned dst = currentInstruction[1].u.operand;
211     unsigned op1 = currentInstruction[2].u.operand;
212     unsigned op2 = currentInstruction[3].u.operand;
213
214     // Slow case of rshift makes assumptions about what registers hold the
215     // shift arguments, so any changes must be updated there as well.
216     if (isOperandConstantImmediateInt(op2)) {
217         emitLoad(op1, regT1, regT0);
218         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
219         int shift = getConstantOperand(op2).asInt32() & 0x1f;
220         if (shift) {
221             if (isUnsigned)
222                 urshift32(Imm32(shift), regT0);
223             else
224                 rshift32(Imm32(shift), regT0);
225         } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
226             addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
227         emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
228     } else {
229         emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
230         if (!isOperandConstantImmediateInt(op1))
231             addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
232         addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
233         if (isUnsigned) {
234             urshift32(regT2, regT0);
235             addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
236         } else
237             rshift32(regT2, regT0);
238         emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
239     }
240 }
241
242 void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
243 {
244     unsigned dst = currentInstruction[1].u.operand;
245     unsigned op1 = currentInstruction[2].u.operand;
246     unsigned op2 = currentInstruction[3].u.operand;
247     if (isOperandConstantImmediateInt(op2)) {
248         int shift = getConstantOperand(op2).asInt32() & 0x1f;
249         // op1 = regT1:regT0
250         linkSlowCase(iter); // int32 check
251         if (supportsFloatingPointTruncate()) {
252             JumpList failures;
253             failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
254             emitLoadDouble(op1, fpRegT0);
255             failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
256             if (shift) {
257                 if (isUnsigned)
258                     urshift32(Imm32(shift), regT0);
259                 else
260                     rshift32(Imm32(shift), regT0);
261             } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
262                 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
263             move(TrustedImm32(JSValue::Int32Tag), regT1);
264             emitStoreInt32(dst, regT0, false);
265             emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
266             failures.link(this);
267         }
268         if (isUnsigned && !shift)
269             linkSlowCase(iter); // failed to box in hot path
270     } else {
271         // op1 = regT1:regT0
272         // op2 = regT3:regT2
273         if (!isOperandConstantImmediateInt(op1)) {
274             linkSlowCase(iter); // int32 check -- op1 is not an int
275             if (supportsFloatingPointTruncate()) {
276                 JumpList failures;
277                 failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
278                 emitLoadDouble(op1, fpRegT0);
279                 failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
280                 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
281                 if (isUnsigned) {
282                     urshift32(regT2, regT0);
283                     failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
284                 } else
285                     rshift32(regT2, regT0);
286                 move(TrustedImm32(JSValue::Int32Tag), regT1);
287                 emitStoreInt32(dst, regT0, false);
288                 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
289                 failures.link(this);
290             }
291         }
292
293         linkSlowCase(iter); // int32 check - op2 is not an int
294         if (isUnsigned)
295             linkSlowCase(iter); // Can't represent unsigned result as an immediate
296     }
297
298     JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
299     stubCall.addArgument(op1);
300     stubCall.addArgument(op2);
301     stubCall.call(dst);
302 }
303
304 // RightShift (>>)
305
306 void JIT::emit_op_rshift(Instruction* currentInstruction)
307 {
308     emitRightShift(currentInstruction, false);
309 }
310
311 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
312 {
313     emitRightShiftSlowCase(currentInstruction, iter, false);
314 }
315
316 // UnsignedRightShift (>>>)
317
318 void JIT::emit_op_urshift(Instruction* currentInstruction)
319 {
320     emitRightShift(currentInstruction, true);
321 }
322
323 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
324 {
325     emitRightShiftSlowCase(currentInstruction, iter, true);
326 }
327
328 // BitAnd (&)
329
330 void JIT::emit_op_bitand(Instruction* currentInstruction)
331 {
332     unsigned dst = currentInstruction[1].u.operand;
333     unsigned op1 = currentInstruction[2].u.operand;
334     unsigned op2 = currentInstruction[3].u.operand;
335
336     unsigned op;
337     int32_t constant;
338     if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
339         emitLoad(op, regT1, regT0);
340         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
341         and32(Imm32(constant), regT0);
342         emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
343         return;
344     }
345
346     emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
347     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
348     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
349     and32(regT2, regT0);
350     emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
351 }
352
353 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
354 {
355     unsigned dst = currentInstruction[1].u.operand;
356     unsigned op1 = currentInstruction[2].u.operand;
357     unsigned op2 = currentInstruction[3].u.operand;
358
359     if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
360         linkSlowCase(iter); // int32 check
361     linkSlowCase(iter); // int32 check
362
363     JITStubCall stubCall(this, cti_op_bitand);
364     stubCall.addArgument(op1);
365     stubCall.addArgument(op2);
366     stubCall.call(dst);
367 }
368
369 // BitOr (|)
370
371 void JIT::emit_op_bitor(Instruction* currentInstruction)
372 {
373     unsigned dst = currentInstruction[1].u.operand;
374     unsigned op1 = currentInstruction[2].u.operand;
375     unsigned op2 = currentInstruction[3].u.operand;
376
377     unsigned op;
378     int32_t constant;
379     if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
380         emitLoad(op, regT1, regT0);
381         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
382         or32(Imm32(constant), regT0);
383         emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
384         return;
385     }
386
387     emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
388     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
389     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
390     or32(regT2, regT0);
391     emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
392 }
393
394 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
395 {
396     unsigned dst = currentInstruction[1].u.operand;
397     unsigned op1 = currentInstruction[2].u.operand;
398     unsigned op2 = currentInstruction[3].u.operand;
399
400     if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
401         linkSlowCase(iter); // int32 check
402     linkSlowCase(iter); // int32 check
403
404     JITStubCall stubCall(this, cti_op_bitor);
405     stubCall.addArgument(op1);
406     stubCall.addArgument(op2);
407     stubCall.call(dst);
408 }
409
410 // BitXor (^)
411
412 void JIT::emit_op_bitxor(Instruction* currentInstruction)
413 {
414     unsigned dst = currentInstruction[1].u.operand;
415     unsigned op1 = currentInstruction[2].u.operand;
416     unsigned op2 = currentInstruction[3].u.operand;
417
418     unsigned op;
419     int32_t constant;
420     if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
421         emitLoad(op, regT1, regT0);
422         addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
423         xor32(Imm32(constant), regT0);
424         emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
425         return;
426     }
427
428     emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
429     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
430     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
431     xor32(regT2, regT0);
432     emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
433 }
434
435 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
436 {
437     unsigned dst = currentInstruction[1].u.operand;
438     unsigned op1 = currentInstruction[2].u.operand;
439     unsigned op2 = currentInstruction[3].u.operand;
440
441     if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
442         linkSlowCase(iter); // int32 check
443     linkSlowCase(iter); // int32 check
444
445     JITStubCall stubCall(this, cti_op_bitxor);
446     stubCall.addArgument(op1);
447     stubCall.addArgument(op2);
448     stubCall.call(dst);
449 }
450
451 // PostInc (i++)
452
453 void JIT::emit_op_post_inc(Instruction* currentInstruction)
454 {
455     unsigned dst = currentInstruction[1].u.operand;
456     unsigned srcDst = currentInstruction[2].u.operand;
457
458     emitLoad(srcDst, regT1, regT0);
459     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
460
461     if (dst == srcDst) // x = x++ is a noop for ints.
462         return;
463
464     move(regT0, regT2);
465     addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT2));
466     emitStoreInt32(srcDst, regT2, true);
467
468     emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_inc));
469     if (canBeOptimizedOrInlined())
470         unmap();
471 }
472
473 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
474 {
475     unsigned dst = currentInstruction[1].u.operand;
476     unsigned srcDst = currentInstruction[2].u.operand;
477
478     linkSlowCase(iter); // int32 check
479     if (dst != srcDst)
480         linkSlowCase(iter); // overflow check
481
482     JITStubCall stubCall(this, cti_op_post_inc);
483     stubCall.addArgument(srcDst);
484     stubCall.addArgument(TrustedImm32(srcDst));
485     stubCall.call(dst);
486 }
487
488 // PostDec (i--)
489
490 void JIT::emit_op_post_dec(Instruction* currentInstruction)
491 {
492     unsigned dst = currentInstruction[1].u.operand;
493     unsigned srcDst = currentInstruction[2].u.operand;
494
495     emitLoad(srcDst, regT1, regT0);
496     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
497
498     if (dst == srcDst) // x = x-- is a noop for ints.
499         return;
500
501     move(regT0, regT2);
502     addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT2));
503     emitStoreInt32(srcDst, regT2, true);
504
505     emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_dec));
506     if (canBeOptimizedOrInlined())
507         unmap();
508 }
509
510 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
511 {
512     unsigned dst = currentInstruction[1].u.operand;
513     unsigned srcDst = currentInstruction[2].u.operand;
514
515     linkSlowCase(iter); // int32 check
516     if (dst != srcDst)
517         linkSlowCase(iter); // overflow check
518
519     JITStubCall stubCall(this, cti_op_post_dec);
520     stubCall.addArgument(srcDst);
521     stubCall.addArgument(TrustedImm32(srcDst));
522     stubCall.call(dst);
523 }
524
525 // PreInc (++i)
526
527 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
528 {
529     unsigned srcDst = currentInstruction[1].u.operand;
530
531     emitLoad(srcDst, regT1, regT0);
532
533     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
534     addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
535     emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_inc));
536 }
537
538 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
539 {
540     unsigned srcDst = currentInstruction[1].u.operand;
541
542     linkSlowCase(iter); // int32 check
543     linkSlowCase(iter); // overflow check
544
545     JITStubCall stubCall(this, cti_op_pre_inc);
546     stubCall.addArgument(srcDst);
547     stubCall.call(srcDst);
548 }
549
550 // PreDec (--i)
551
552 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
553 {
554     unsigned srcDst = currentInstruction[1].u.operand;
555
556     emitLoad(srcDst, regT1, regT0);
557
558     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
559     addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
560     emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_dec));
561 }
562
563 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
564 {
565     unsigned srcDst = currentInstruction[1].u.operand;
566
567     linkSlowCase(iter); // int32 check
568     linkSlowCase(iter); // overflow check
569
570     JITStubCall stubCall(this, cti_op_pre_dec);
571     stubCall.addArgument(srcDst);
572     stubCall.call(srcDst);
573 }
574
575 // Addition (+)
576
577 void JIT::emit_op_add(Instruction* currentInstruction)
578 {
579     unsigned dst = currentInstruction[1].u.operand;
580     unsigned op1 = currentInstruction[2].u.operand;
581     unsigned op2 = currentInstruction[3].u.operand;
582     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
583
584     if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
585         addSlowCase();
586         JITStubCall stubCall(this, cti_op_add);
587         stubCall.addArgument(op1);
588         stubCall.addArgument(op2);
589         stubCall.call(dst);
590         return;
591     }
592
593     JumpList notInt32Op1;
594     JumpList notInt32Op2;
595
596     unsigned op;
597     int32_t constant;
598     if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
599         emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
600         return;
601     }
602
603     emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
604     notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
605     notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
606
607     // Int32 case.
608     addSlowCase(branchAdd32(Overflow, regT2, regT0));
609     emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
610
611     if (!supportsFloatingPoint()) {
612         addSlowCase(notInt32Op1);
613         addSlowCase(notInt32Op2);
614         return;
615     }
616     Jump end = jump();
617
618     // Double case.
619     emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
620     end.link(this);
621 }
622
623 void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
624 {
625     // Int32 case.
626     emitLoad(op, regT1, regT2);
627     Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
628     addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
629     emitStoreInt32(dst, regT0, (op == dst));
630
631     // Double case.
632     if (!supportsFloatingPoint()) {
633         addSlowCase(notInt32);
634         return;
635     }
636     Jump end = jump();
637
638     notInt32.link(this);
639     if (!opType.definitelyIsNumber())
640         addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
641     move(Imm32(constant), regT2);
642     convertInt32ToDouble(regT2, fpRegT0);
643     emitLoadDouble(op, fpRegT1);
644     addDouble(fpRegT1, fpRegT0);
645     emitStoreDouble(dst, fpRegT0);
646
647     end.link(this);
648 }
649
650 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
651 {
652     unsigned dst = currentInstruction[1].u.operand;
653     unsigned op1 = currentInstruction[2].u.operand;
654     unsigned op2 = currentInstruction[3].u.operand;
655     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
656
657     if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
658         linkDummySlowCase(iter);
659         return;
660     }
661
662     unsigned op;
663     int32_t constant;
664     if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
665         linkSlowCase(iter); // overflow check
666
667         if (!supportsFloatingPoint())
668             linkSlowCase(iter); // non-sse case
669         else {
670             ResultType opType = op == op1 ? types.first() : types.second();
671             if (!opType.definitelyIsNumber())
672                 linkSlowCase(iter); // double check
673         }
674     } else {
675         linkSlowCase(iter); // overflow check
676
677         if (!supportsFloatingPoint()) {
678             linkSlowCase(iter); // int32 check
679             linkSlowCase(iter); // int32 check
680         } else {
681             if (!types.first().definitelyIsNumber())
682                 linkSlowCase(iter); // double check
683
684             if (!types.second().definitelyIsNumber()) {
685                 linkSlowCase(iter); // int32 check
686                 linkSlowCase(iter); // double check
687             }
688         }
689     }
690
691     JITStubCall stubCall(this, cti_op_add);
692     stubCall.addArgument(op1);
693     stubCall.addArgument(op2);
694     stubCall.call(dst);
695 }
696
697 // Subtraction (-)
698
699 void JIT::emit_op_sub(Instruction* currentInstruction)
700 {
701     unsigned dst = currentInstruction[1].u.operand;
702     unsigned op1 = currentInstruction[2].u.operand;
703     unsigned op2 = currentInstruction[3].u.operand;
704     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
705
706     JumpList notInt32Op1;
707     JumpList notInt32Op2;
708
709     if (isOperandConstantImmediateInt(op2)) {
710         emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
711         return;
712     }
713
714     emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
715     notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
716     notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
717
718     // Int32 case.
719     addSlowCase(branchSub32(Overflow, regT2, regT0));
720     emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
721
722     if (!supportsFloatingPoint()) {
723         addSlowCase(notInt32Op1);
724         addSlowCase(notInt32Op2);
725         return;
726     }
727     Jump end = jump();
728
729     // Double case.
730     emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
731     end.link(this);
732 }
733
734 void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
735 {
736     // Int32 case.
737     emitLoad(op, regT1, regT0);
738     Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
739 #if ENABLE(JIT_CONSTANT_BLINDING)
740     addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
741 #else
742     addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2));
743 #endif
744     
745     emitStoreInt32(dst, regT2, (op == dst));
746
747     // Double case.
748     if (!supportsFloatingPoint()) {
749         addSlowCase(notInt32);
750         return;
751     }
752     Jump end = jump();
753
754     notInt32.link(this);
755     if (!opType.definitelyIsNumber())
756         addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
757     move(Imm32(constant), regT2);
758     convertInt32ToDouble(regT2, fpRegT0);
759     emitLoadDouble(op, fpRegT1);
760     subDouble(fpRegT0, fpRegT1);
761     emitStoreDouble(dst, fpRegT1);
762
763     end.link(this);
764 }
765
766 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
767 {
768     unsigned dst = currentInstruction[1].u.operand;
769     unsigned op1 = currentInstruction[2].u.operand;
770     unsigned op2 = currentInstruction[3].u.operand;
771     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
772
773     if (isOperandConstantImmediateInt(op2)) {
774         linkSlowCase(iter); // overflow check
775
776         if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
777             linkSlowCase(iter); // int32 or double check
778     } else {
779         linkSlowCase(iter); // overflow check
780
781         if (!supportsFloatingPoint()) {
782             linkSlowCase(iter); // int32 check
783             linkSlowCase(iter); // int32 check
784         } else {
785             if (!types.first().definitelyIsNumber())
786                 linkSlowCase(iter); // double check
787
788             if (!types.second().definitelyIsNumber()) {
789                 linkSlowCase(iter); // int32 check
790                 linkSlowCase(iter); // double check
791             }
792         }
793     }
794
795     JITStubCall stubCall(this, cti_op_sub);
796     stubCall.addArgument(op1);
797     stubCall.addArgument(op2);
798     stubCall.call(dst);
799 }
800
801 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
802 {
803     JumpList end;
804
805     if (!notInt32Op1.empty()) {
806         // Double case 1: Op1 is not int32; Op2 is unknown.
807         notInt32Op1.link(this);
808
809         ASSERT(op1IsInRegisters);
810
811         // Verify Op1 is double.
812         if (!types.first().definitelyIsNumber())
813             addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
814
815         if (!op2IsInRegisters)
816             emitLoad(op2, regT3, regT2);
817
818         Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
819
820         if (!types.second().definitelyIsNumber())
821             addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
822
823         convertInt32ToDouble(regT2, fpRegT0);
824         Jump doTheMath = jump();
825
826         // Load Op2 as double into double register.
827         doubleOp2.link(this);
828         emitLoadDouble(op2, fpRegT0);
829
830         // Do the math.
831         doTheMath.link(this);
832         switch (opcodeID) {
833             case op_mul:
834                 emitLoadDouble(op1, fpRegT2);
835                 mulDouble(fpRegT2, fpRegT0);
836                 emitStoreDouble(dst, fpRegT0);
837                 break;
838             case op_add:
839                 emitLoadDouble(op1, fpRegT2);
840                 addDouble(fpRegT2, fpRegT0);
841                 emitStoreDouble(dst, fpRegT0);
842                 break;
843             case op_sub:
844                 emitLoadDouble(op1, fpRegT1);
845                 subDouble(fpRegT0, fpRegT1);
846                 emitStoreDouble(dst, fpRegT1);
847                 break;
848             case op_div: {
849                 emitLoadDouble(op1, fpRegT1);
850                 divDouble(fpRegT0, fpRegT1);
851
852 #if ENABLE(VALUE_PROFILER)
853                 // Is the result actually an integer? The DFG JIT would really like to know. If it's
854                 // not an integer, we increment a count. If this together with the slow case counter
855                 // are below threshold then the DFG JIT will compile this division with a specualtion
856                 // that the remainder is zero.
857                 
858                 // As well, there are cases where a double result here would cause an important field
859                 // in the heap to sometimes have doubles in it, resulting in double predictions getting
860                 // propagated to a use site where it might cause damage (such as the index to an array
861                 // access). So if we are DFG compiling anything in the program, we want this code to
862                 // ensure that it produces integers whenever possible.
863                 
864                 // FIXME: This will fail to convert to integer if the result is zero. We should
865                 // distinguish between positive zero and negative zero here.
866                 
867                 JumpList notInteger;
868                 branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
869                 // If we've got an integer, we might as well make that the result of the division.
870                 emitStoreInt32(dst, regT2);
871                 Jump isInteger = jump();
872                 notInteger.link(this);
873                 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
874                 emitStoreDouble(dst, fpRegT1);
875                 isInteger.link(this);
876 #else
877                 emitStoreDouble(dst, fpRegT1);
878 #endif
879                 break;
880             }
881             case op_jless:
882                 emitLoadDouble(op1, fpRegT2);
883                 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
884                 break;
885             case op_jlesseq:
886                 emitLoadDouble(op1, fpRegT2);
887                 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
888                 break;
889             case op_jgreater:
890                 emitLoadDouble(op1, fpRegT2);
891                 addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
892                 break;
893             case op_jgreatereq:
894                 emitLoadDouble(op1, fpRegT2);
895                 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
896                 break;
897             case op_jnless:
898                 emitLoadDouble(op1, fpRegT2);
899                 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
900                 break;
901             case op_jnlesseq:
902                 emitLoadDouble(op1, fpRegT2);
903                 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
904                 break;
905             case op_jngreater:
906                 emitLoadDouble(op1, fpRegT2);
907                 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
908                 break;
909             case op_jngreatereq:
910                 emitLoadDouble(op1, fpRegT2);
911                 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
912                 break;
913             default:
914                 ASSERT_NOT_REACHED();
915         }
916
917         if (!notInt32Op2.empty())
918             end.append(jump());
919     }
920
921     if (!notInt32Op2.empty()) {
922         // Double case 2: Op1 is int32; Op2 is not int32.
923         notInt32Op2.link(this);
924
925         ASSERT(op2IsInRegisters);
926
927         if (!op1IsInRegisters)
928             emitLoadPayload(op1, regT0);
929
930         convertInt32ToDouble(regT0, fpRegT0);
931
932         // Verify op2 is double.
933         if (!types.second().definitelyIsNumber())
934             addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
935
936         // Do the math.
937         switch (opcodeID) {
938             case op_mul:
939                 emitLoadDouble(op2, fpRegT2);
940                 mulDouble(fpRegT2, fpRegT0);
941                 emitStoreDouble(dst, fpRegT0);
942                 break;
943             case op_add:
944                 emitLoadDouble(op2, fpRegT2);
945                 addDouble(fpRegT2, fpRegT0);
946                 emitStoreDouble(dst, fpRegT0);
947                 break;
948             case op_sub:
949                 emitLoadDouble(op2, fpRegT2);
950                 subDouble(fpRegT2, fpRegT0);
951                 emitStoreDouble(dst, fpRegT0);
952                 break;
953             case op_div: {
954                 emitLoadDouble(op2, fpRegT2);
955                 divDouble(fpRegT2, fpRegT0);
956 #if ENABLE(VALUE_PROFILER)
957                 // Is the result actually an integer? The DFG JIT would really like to know. If it's
958                 // not an integer, we increment a count. If this together with the slow case counter
959                 // are below threshold then the DFG JIT will compile this division with a specualtion
960                 // that the remainder is zero.
961                 
962                 // As well, there are cases where a double result here would cause an important field
963                 // in the heap to sometimes have doubles in it, resulting in double predictions getting
964                 // propagated to a use site where it might cause damage (such as the index to an array
965                 // access). So if we are DFG compiling anything in the program, we want this code to
966                 // ensure that it produces integers whenever possible.
967                 
968                 // FIXME: This will fail to convert to integer if the result is zero. We should
969                 // distinguish between positive zero and negative zero here.
970                 
971                 JumpList notInteger;
972                 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
973                 // If we've got an integer, we might as well make that the result of the division.
974                 emitStoreInt32(dst, regT2);
975                 Jump isInteger = jump();
976                 notInteger.link(this);
977                 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
978                 emitStoreDouble(dst, fpRegT0);
979                 isInteger.link(this);
980 #else
981                 emitStoreDouble(dst, fpRegT0);
982 #endif
983                 break;
984             }
985             case op_jless:
986                 emitLoadDouble(op2, fpRegT1);
987                 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
988                 break;
989             case op_jlesseq:
990                 emitLoadDouble(op2, fpRegT1);
991                 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
992                 break;
993             case op_jgreater:
994                 emitLoadDouble(op2, fpRegT1);
995                 addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
996                 break;
997             case op_jgreatereq:
998                 emitLoadDouble(op2, fpRegT1);
999                 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
1000                 break;
1001             case op_jnless:
1002                 emitLoadDouble(op2, fpRegT1);
1003                 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
1004                 break;
1005             case op_jnlesseq:
1006                 emitLoadDouble(op2, fpRegT1);
1007                 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
1008                 break;
1009             case op_jngreater:
1010                 emitLoadDouble(op2, fpRegT1);
1011                 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
1012                 break;
1013             case op_jngreatereq:
1014                 emitLoadDouble(op2, fpRegT1);
1015                 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
1016                 break;
1017             default:
1018                 ASSERT_NOT_REACHED();
1019         }
1020     }
1021
1022     end.link(this);
1023 }
1024
1025 // Multiplication (*)
1026
1027 void JIT::emit_op_mul(Instruction* currentInstruction)
1028 {
1029     unsigned dst = currentInstruction[1].u.operand;
1030     unsigned op1 = currentInstruction[2].u.operand;
1031     unsigned op2 = currentInstruction[3].u.operand;
1032     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1033
1034 #if ENABLE(VALUE_PROFILER)
1035     m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1036 #endif
1037
1038     JumpList notInt32Op1;
1039     JumpList notInt32Op2;
1040
1041     emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1042     notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1043     notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
1044
1045     // Int32 case.
1046     move(regT0, regT3);
1047     addSlowCase(branchMul32(Overflow, regT2, regT0));
1048     addSlowCase(branchTest32(Zero, regT0));
1049     emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1050
1051     if (!supportsFloatingPoint()) {
1052         addSlowCase(notInt32Op1);
1053         addSlowCase(notInt32Op2);
1054         return;
1055     }
1056     Jump end = jump();
1057
1058     // Double case.
1059     emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1060     end.link(this);
1061 }
1062
1063 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1064 {
1065     unsigned dst = currentInstruction[1].u.operand;
1066     unsigned op1 = currentInstruction[2].u.operand;
1067     unsigned op2 = currentInstruction[3].u.operand;
1068     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1069
1070     Jump overflow = getSlowCase(iter); // overflow check
1071     linkSlowCase(iter); // zero result check
1072
1073     Jump negZero = branchOr32(Signed, regT2, regT3);
1074     emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
1075
1076     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
1077
1078     negZero.link(this);
1079 #if ENABLE(VALUE_PROFILER)
1080     // We only get here if we have a genuine negative zero. Record this,
1081     // so that the speculative JIT knows that we failed speculation
1082     // because of a negative zero.
1083     add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1084 #endif
1085     overflow.link(this);
1086
1087     if (!supportsFloatingPoint()) {
1088         linkSlowCase(iter); // int32 check
1089         linkSlowCase(iter); // int32 check
1090     }
1091
1092     if (supportsFloatingPoint()) {
1093         if (!types.first().definitelyIsNumber())
1094             linkSlowCase(iter); // double check
1095
1096         if (!types.second().definitelyIsNumber()) {
1097             linkSlowCase(iter); // int32 check
1098             linkSlowCase(iter); // double check
1099         }
1100     }
1101
1102     Label jitStubCall(this);
1103     JITStubCall stubCall(this, cti_op_mul);
1104     stubCall.addArgument(op1);
1105     stubCall.addArgument(op2);
1106     stubCall.call(dst);
1107 }
1108
1109 // Division (/)
1110
1111 void JIT::emit_op_div(Instruction* currentInstruction)
1112 {
1113     unsigned dst = currentInstruction[1].u.operand;
1114     unsigned op1 = currentInstruction[2].u.operand;
1115     unsigned op2 = currentInstruction[3].u.operand;
1116     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1117
1118 #if ENABLE(VALUE_PROFILER)
1119     m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1120 #endif
1121
1122     if (!supportsFloatingPoint()) {
1123         addSlowCase(jump());
1124         return;
1125     }
1126
1127     // Int32 divide.
1128     JumpList notInt32Op1;
1129     JumpList notInt32Op2;
1130
1131     JumpList end;
1132
1133     emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1134
1135     notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1136     notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
1137
1138     convertInt32ToDouble(regT0, fpRegT0);
1139     convertInt32ToDouble(regT2, fpRegT1);
1140     divDouble(fpRegT1, fpRegT0);
1141 #if ENABLE(VALUE_PROFILER)
1142     // Is the result actually an integer? The DFG JIT would really like to know. If it's
1143     // not an integer, we increment a count. If this together with the slow case counter
1144     // are below threshold then the DFG JIT will compile this division with a specualtion
1145     // that the remainder is zero.
1146     
1147     // As well, there are cases where a double result here would cause an important field
1148     // in the heap to sometimes have doubles in it, resulting in double predictions getting
1149     // propagated to a use site where it might cause damage (such as the index to an array
1150     // access). So if we are DFG compiling anything in the program, we want this code to
1151     // ensure that it produces integers whenever possible.
1152     
1153     // FIXME: This will fail to convert to integer if the result is zero. We should
1154     // distinguish between positive zero and negative zero here.
1155     
1156     JumpList notInteger;
1157     branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
1158     // If we've got an integer, we might as well make that the result of the division.
1159     emitStoreInt32(dst, regT2);
1160     end.append(jump());
1161     notInteger.link(this);
1162     add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1163     emitStoreDouble(dst, fpRegT0);
1164 #else
1165     emitStoreDouble(dst, fpRegT0);
1166 #endif
1167     end.append(jump());
1168
1169     // Double divide.
1170     emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1171     end.link(this);
1172 }
1173
1174 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1175 {
1176     unsigned dst = currentInstruction[1].u.operand;
1177     unsigned op1 = currentInstruction[2].u.operand;
1178     unsigned op2 = currentInstruction[3].u.operand;
1179     OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1180
1181     if (!supportsFloatingPoint())
1182         linkSlowCase(iter);
1183     else {
1184         if (!types.first().definitelyIsNumber())
1185             linkSlowCase(iter); // double check
1186
1187         if (!types.second().definitelyIsNumber()) {
1188             linkSlowCase(iter); // int32 check
1189             linkSlowCase(iter); // double check
1190         }
1191     }
1192
1193     JITStubCall stubCall(this, cti_op_div);
1194     stubCall.addArgument(op1);
1195     stubCall.addArgument(op2);
1196     stubCall.call(dst);
1197 }
1198
1199 // Mod (%)
1200
1201 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1202
1203 void JIT::emit_op_mod(Instruction* currentInstruction)
1204 {
1205     unsigned dst = currentInstruction[1].u.operand;
1206     unsigned op1 = currentInstruction[2].u.operand;
1207     unsigned op2 = currentInstruction[3].u.operand;
1208
1209 #if CPU(X86) || CPU(X86_64)
1210     // Make sure registers are correct for x86 IDIV instructions.
1211     ASSERT(regT0 == X86Registers::eax);
1212     ASSERT(regT1 == X86Registers::edx);
1213     ASSERT(regT2 == X86Registers::ecx);
1214     ASSERT(regT3 == X86Registers::ebx);
1215
1216     emitLoad2(op1, regT0, regT3, op2, regT1, regT2);
1217     addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1218     addSlowCase(branch32(NotEqual, regT0, TrustedImm32(JSValue::Int32Tag)));
1219
1220     move(regT3, regT0);
1221     addSlowCase(branchTest32(Zero, regT2));
1222     Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
1223     addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
1224     denominatorNotNeg1.link(this);
1225     m_assembler.cdq();
1226     m_assembler.idivl_r(regT2);
1227     Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
1228     addSlowCase(branchTest32(Zero, regT1));
1229     numeratorPositive.link(this);
1230     emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
1231 #else
1232     JITStubCall stubCall(this, cti_op_mod);
1233     stubCall.addArgument(op1);
1234     stubCall.addArgument(op2);
1235     stubCall.call(dst);
1236 #endif
1237 }
1238
1239 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1240 {
1241 #if CPU(X86) || CPU(X86_64)
1242     unsigned result = currentInstruction[1].u.operand;
1243     unsigned op1 = currentInstruction[2].u.operand;
1244     unsigned op2 = currentInstruction[3].u.operand;
1245     linkSlowCase(iter);
1246     linkSlowCase(iter);
1247     linkSlowCase(iter);
1248     linkSlowCase(iter);
1249     linkSlowCase(iter);
1250     JITStubCall stubCall(this, cti_op_mod);
1251     stubCall.addArgument(op1);
1252     stubCall.addArgument(op2);
1253     stubCall.call(result);
1254 #else
1255     UNUSED_PARAM(currentInstruction);
1256     UNUSED_PARAM(iter);
1257     // We would have really useful assertions here if it wasn't for the compiler's
1258     // insistence on attribute noreturn.
1259     // ASSERT_NOT_REACHED();
1260 #endif
1261 }
1262
1263 /* ------------------------------ END: OP_MOD ------------------------------ */
1264
1265 } // namespace JSC
1266
1267 #endif // USE(JSVALUE32_64)
1268 #endif // ENABLE(JIT)