2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
37 #include "JSFunction.h"
38 #include "Interpreter.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
50 void JIT::emit_op_negate(Instruction* currentInstruction)
52 unsigned dst = currentInstruction[1].u.operand;
53 unsigned src = currentInstruction[2].u.operand;
55 emitLoad(src, regT1, regT0);
57 Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
58 addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
60 emitStoreInt32(dst, regT0, (dst == src));
65 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
67 xor32(TrustedImm32(1 << 31), regT1);
68 store32(regT1, tagFor(dst));
70 store32(regT0, payloadFor(dst));
75 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
77 unsigned dst = currentInstruction[1].u.operand;
79 linkSlowCase(iter); // 0x7fffffff check
80 linkSlowCase(iter); // double check
82 JITStubCall stubCall(this, cti_op_negate);
83 stubCall.addArgument(regT1, regT0);
87 void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
93 if (isOperandConstantImmediateChar(op1)) {
94 emitLoad(op2, regT1, regT0);
95 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
97 emitLoadCharacterString(regT0, regT0, failures);
98 addSlowCase(failures);
99 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
102 if (isOperandConstantImmediateChar(op2)) {
103 emitLoad(op1, regT1, regT0);
104 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
106 emitLoadCharacterString(regT0, regT0, failures);
107 addSlowCase(failures);
108 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
111 if (isOperandConstantImmediateInt(op1)) {
112 emitLoad(op2, regT3, regT2);
113 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
114 addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
115 } else if (isOperandConstantImmediateInt(op2)) {
116 emitLoad(op1, regT1, regT0);
117 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
118 addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
120 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
121 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
122 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
123 addJump(branch32(condition, regT0, regT2), target);
126 if (!supportsFloatingPoint()) {
127 addSlowCase(notInt32Op1);
128 addSlowCase(notInt32Op2);
134 emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
138 void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
140 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
146 if (!supportsFloatingPoint()) {
147 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
148 linkSlowCase(iter); // int32 check
149 linkSlowCase(iter); // int32 check
151 if (!isOperandConstantImmediateInt(op1)) {
152 linkSlowCase(iter); // double check
153 linkSlowCase(iter); // int32 check
155 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
156 linkSlowCase(iter); // double check
159 JITStubCall stubCall(this, stub);
160 stubCall.addArgument(op1);
161 stubCall.addArgument(op2);
163 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
168 void JIT::emit_op_lshift(Instruction* currentInstruction)
170 unsigned dst = currentInstruction[1].u.operand;
171 unsigned op1 = currentInstruction[2].u.operand;
172 unsigned op2 = currentInstruction[3].u.operand;
174 if (isOperandConstantImmediateInt(op2)) {
175 emitLoad(op1, regT1, regT0);
176 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
177 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
178 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
182 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
183 if (!isOperandConstantImmediateInt(op1))
184 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
185 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
186 lshift32(regT2, regT0);
187 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
190 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
192 unsigned dst = currentInstruction[1].u.operand;
193 unsigned op1 = currentInstruction[2].u.operand;
194 unsigned op2 = currentInstruction[3].u.operand;
196 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
197 linkSlowCase(iter); // int32 check
198 linkSlowCase(iter); // int32 check
200 JITStubCall stubCall(this, cti_op_lshift);
201 stubCall.addArgument(op1);
202 stubCall.addArgument(op2);
206 // RightShift (>>) and UnsignedRightShift (>>>) helper
208 void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
210 unsigned dst = currentInstruction[1].u.operand;
211 unsigned op1 = currentInstruction[2].u.operand;
212 unsigned op2 = currentInstruction[3].u.operand;
214 // Slow case of rshift makes assumptions about what registers hold the
215 // shift arguments, so any changes must be updated there as well.
216 if (isOperandConstantImmediateInt(op2)) {
217 emitLoad(op1, regT1, regT0);
218 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
219 int shift = getConstantOperand(op2).asInt32() & 0x1f;
222 urshift32(Imm32(shift), regT0);
224 rshift32(Imm32(shift), regT0);
225 } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
226 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
227 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
229 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
230 if (!isOperandConstantImmediateInt(op1))
231 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
232 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
234 urshift32(regT2, regT0);
235 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
237 rshift32(regT2, regT0);
238 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
242 void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
244 unsigned dst = currentInstruction[1].u.operand;
245 unsigned op1 = currentInstruction[2].u.operand;
246 unsigned op2 = currentInstruction[3].u.operand;
247 if (isOperandConstantImmediateInt(op2)) {
248 int shift = getConstantOperand(op2).asInt32() & 0x1f;
250 linkSlowCase(iter); // int32 check
251 if (supportsFloatingPointTruncate()) {
253 failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
254 emitLoadDouble(op1, fpRegT0);
255 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
258 urshift32(Imm32(shift), regT0);
260 rshift32(Imm32(shift), regT0);
261 } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
262 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
263 move(TrustedImm32(JSValue::Int32Tag), regT1);
264 emitStoreInt32(dst, regT0, false);
265 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
268 if (isUnsigned && !shift)
269 linkSlowCase(iter); // failed to box in hot path
273 if (!isOperandConstantImmediateInt(op1)) {
274 linkSlowCase(iter); // int32 check -- op1 is not an int
275 if (supportsFloatingPointTruncate()) {
277 failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
278 emitLoadDouble(op1, fpRegT0);
279 failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
280 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
282 urshift32(regT2, regT0);
283 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
285 rshift32(regT2, regT0);
286 move(TrustedImm32(JSValue::Int32Tag), regT1);
287 emitStoreInt32(dst, regT0, false);
288 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
293 linkSlowCase(iter); // int32 check - op2 is not an int
295 linkSlowCase(iter); // Can't represent unsigned result as an immediate
298 JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
299 stubCall.addArgument(op1);
300 stubCall.addArgument(op2);
306 void JIT::emit_op_rshift(Instruction* currentInstruction)
308 emitRightShift(currentInstruction, false);
311 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
313 emitRightShiftSlowCase(currentInstruction, iter, false);
316 // UnsignedRightShift (>>>)
318 void JIT::emit_op_urshift(Instruction* currentInstruction)
320 emitRightShift(currentInstruction, true);
323 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
325 emitRightShiftSlowCase(currentInstruction, iter, true);
330 void JIT::emit_op_bitand(Instruction* currentInstruction)
332 unsigned dst = currentInstruction[1].u.operand;
333 unsigned op1 = currentInstruction[2].u.operand;
334 unsigned op2 = currentInstruction[3].u.operand;
338 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
339 emitLoad(op, regT1, regT0);
340 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
341 and32(Imm32(constant), regT0);
342 emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
346 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
347 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
348 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
350 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
353 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
355 unsigned dst = currentInstruction[1].u.operand;
356 unsigned op1 = currentInstruction[2].u.operand;
357 unsigned op2 = currentInstruction[3].u.operand;
359 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
360 linkSlowCase(iter); // int32 check
361 linkSlowCase(iter); // int32 check
363 JITStubCall stubCall(this, cti_op_bitand);
364 stubCall.addArgument(op1);
365 stubCall.addArgument(op2);
371 void JIT::emit_op_bitor(Instruction* currentInstruction)
373 unsigned dst = currentInstruction[1].u.operand;
374 unsigned op1 = currentInstruction[2].u.operand;
375 unsigned op2 = currentInstruction[3].u.operand;
379 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
380 emitLoad(op, regT1, regT0);
381 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
382 or32(Imm32(constant), regT0);
383 emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
387 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
388 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
389 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
391 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
394 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
396 unsigned dst = currentInstruction[1].u.operand;
397 unsigned op1 = currentInstruction[2].u.operand;
398 unsigned op2 = currentInstruction[3].u.operand;
400 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
401 linkSlowCase(iter); // int32 check
402 linkSlowCase(iter); // int32 check
404 JITStubCall stubCall(this, cti_op_bitor);
405 stubCall.addArgument(op1);
406 stubCall.addArgument(op2);
412 void JIT::emit_op_bitxor(Instruction* currentInstruction)
414 unsigned dst = currentInstruction[1].u.operand;
415 unsigned op1 = currentInstruction[2].u.operand;
416 unsigned op2 = currentInstruction[3].u.operand;
420 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
421 emitLoad(op, regT1, regT0);
422 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
423 xor32(Imm32(constant), regT0);
424 emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
428 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
429 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
430 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
432 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
435 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
437 unsigned dst = currentInstruction[1].u.operand;
438 unsigned op1 = currentInstruction[2].u.operand;
439 unsigned op2 = currentInstruction[3].u.operand;
441 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
442 linkSlowCase(iter); // int32 check
443 linkSlowCase(iter); // int32 check
445 JITStubCall stubCall(this, cti_op_bitxor);
446 stubCall.addArgument(op1);
447 stubCall.addArgument(op2);
453 void JIT::emit_op_post_inc(Instruction* currentInstruction)
455 unsigned dst = currentInstruction[1].u.operand;
456 unsigned srcDst = currentInstruction[2].u.operand;
458 emitLoad(srcDst, regT1, regT0);
459 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
461 if (dst == srcDst) // x = x++ is a noop for ints.
465 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT2));
466 emitStoreInt32(srcDst, regT2, true);
468 emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_inc));
469 if (canBeOptimizedOrInlined())
473 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
475 unsigned dst = currentInstruction[1].u.operand;
476 unsigned srcDst = currentInstruction[2].u.operand;
478 linkSlowCase(iter); // int32 check
480 linkSlowCase(iter); // overflow check
482 JITStubCall stubCall(this, cti_op_post_inc);
483 stubCall.addArgument(srcDst);
484 stubCall.addArgument(TrustedImm32(srcDst));
490 void JIT::emit_op_post_dec(Instruction* currentInstruction)
492 unsigned dst = currentInstruction[1].u.operand;
493 unsigned srcDst = currentInstruction[2].u.operand;
495 emitLoad(srcDst, regT1, regT0);
496 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
498 if (dst == srcDst) // x = x-- is a noop for ints.
502 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT2));
503 emitStoreInt32(srcDst, regT2, true);
505 emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_dec));
506 if (canBeOptimizedOrInlined())
510 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
512 unsigned dst = currentInstruction[1].u.operand;
513 unsigned srcDst = currentInstruction[2].u.operand;
515 linkSlowCase(iter); // int32 check
517 linkSlowCase(iter); // overflow check
519 JITStubCall stubCall(this, cti_op_post_dec);
520 stubCall.addArgument(srcDst);
521 stubCall.addArgument(TrustedImm32(srcDst));
527 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
529 unsigned srcDst = currentInstruction[1].u.operand;
531 emitLoad(srcDst, regT1, regT0);
533 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
534 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
535 emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_inc));
538 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
540 unsigned srcDst = currentInstruction[1].u.operand;
542 linkSlowCase(iter); // int32 check
543 linkSlowCase(iter); // overflow check
545 JITStubCall stubCall(this, cti_op_pre_inc);
546 stubCall.addArgument(srcDst);
547 stubCall.call(srcDst);
552 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
554 unsigned srcDst = currentInstruction[1].u.operand;
556 emitLoad(srcDst, regT1, regT0);
558 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
559 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
560 emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_dec));
563 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
565 unsigned srcDst = currentInstruction[1].u.operand;
567 linkSlowCase(iter); // int32 check
568 linkSlowCase(iter); // overflow check
570 JITStubCall stubCall(this, cti_op_pre_dec);
571 stubCall.addArgument(srcDst);
572 stubCall.call(srcDst);
577 void JIT::emit_op_add(Instruction* currentInstruction)
579 unsigned dst = currentInstruction[1].u.operand;
580 unsigned op1 = currentInstruction[2].u.operand;
581 unsigned op2 = currentInstruction[3].u.operand;
582 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
584 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
586 JITStubCall stubCall(this, cti_op_add);
587 stubCall.addArgument(op1);
588 stubCall.addArgument(op2);
593 JumpList notInt32Op1;
594 JumpList notInt32Op2;
598 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
599 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
603 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
604 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
605 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
608 addSlowCase(branchAdd32(Overflow, regT2, regT0));
609 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
611 if (!supportsFloatingPoint()) {
612 addSlowCase(notInt32Op1);
613 addSlowCase(notInt32Op2);
619 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
623 void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
626 emitLoad(op, regT1, regT2);
627 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
628 addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
629 emitStoreInt32(dst, regT0, (op == dst));
632 if (!supportsFloatingPoint()) {
633 addSlowCase(notInt32);
639 if (!opType.definitelyIsNumber())
640 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
641 move(Imm32(constant), regT2);
642 convertInt32ToDouble(regT2, fpRegT0);
643 emitLoadDouble(op, fpRegT1);
644 addDouble(fpRegT1, fpRegT0);
645 emitStoreDouble(dst, fpRegT0);
650 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
652 unsigned dst = currentInstruction[1].u.operand;
653 unsigned op1 = currentInstruction[2].u.operand;
654 unsigned op2 = currentInstruction[3].u.operand;
655 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
657 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
658 linkDummySlowCase(iter);
664 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
665 linkSlowCase(iter); // overflow check
667 if (!supportsFloatingPoint())
668 linkSlowCase(iter); // non-sse case
670 ResultType opType = op == op1 ? types.first() : types.second();
671 if (!opType.definitelyIsNumber())
672 linkSlowCase(iter); // double check
675 linkSlowCase(iter); // overflow check
677 if (!supportsFloatingPoint()) {
678 linkSlowCase(iter); // int32 check
679 linkSlowCase(iter); // int32 check
681 if (!types.first().definitelyIsNumber())
682 linkSlowCase(iter); // double check
684 if (!types.second().definitelyIsNumber()) {
685 linkSlowCase(iter); // int32 check
686 linkSlowCase(iter); // double check
691 JITStubCall stubCall(this, cti_op_add);
692 stubCall.addArgument(op1);
693 stubCall.addArgument(op2);
699 void JIT::emit_op_sub(Instruction* currentInstruction)
701 unsigned dst = currentInstruction[1].u.operand;
702 unsigned op1 = currentInstruction[2].u.operand;
703 unsigned op2 = currentInstruction[3].u.operand;
704 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
706 JumpList notInt32Op1;
707 JumpList notInt32Op2;
709 if (isOperandConstantImmediateInt(op2)) {
710 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
714 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
715 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
716 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
719 addSlowCase(branchSub32(Overflow, regT2, regT0));
720 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
722 if (!supportsFloatingPoint()) {
723 addSlowCase(notInt32Op1);
724 addSlowCase(notInt32Op2);
730 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
734 void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
737 emitLoad(op, regT1, regT0);
738 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
739 #if ENABLE(JIT_CONSTANT_BLINDING)
740 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
742 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2));
745 emitStoreInt32(dst, regT2, (op == dst));
748 if (!supportsFloatingPoint()) {
749 addSlowCase(notInt32);
755 if (!opType.definitelyIsNumber())
756 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
757 move(Imm32(constant), regT2);
758 convertInt32ToDouble(regT2, fpRegT0);
759 emitLoadDouble(op, fpRegT1);
760 subDouble(fpRegT0, fpRegT1);
761 emitStoreDouble(dst, fpRegT1);
766 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
768 unsigned dst = currentInstruction[1].u.operand;
769 unsigned op1 = currentInstruction[2].u.operand;
770 unsigned op2 = currentInstruction[3].u.operand;
771 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
773 if (isOperandConstantImmediateInt(op2)) {
774 linkSlowCase(iter); // overflow check
776 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
777 linkSlowCase(iter); // int32 or double check
779 linkSlowCase(iter); // overflow check
781 if (!supportsFloatingPoint()) {
782 linkSlowCase(iter); // int32 check
783 linkSlowCase(iter); // int32 check
785 if (!types.first().definitelyIsNumber())
786 linkSlowCase(iter); // double check
788 if (!types.second().definitelyIsNumber()) {
789 linkSlowCase(iter); // int32 check
790 linkSlowCase(iter); // double check
795 JITStubCall stubCall(this, cti_op_sub);
796 stubCall.addArgument(op1);
797 stubCall.addArgument(op2);
801 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
805 if (!notInt32Op1.empty()) {
806 // Double case 1: Op1 is not int32; Op2 is unknown.
807 notInt32Op1.link(this);
809 ASSERT(op1IsInRegisters);
811 // Verify Op1 is double.
812 if (!types.first().definitelyIsNumber())
813 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
815 if (!op2IsInRegisters)
816 emitLoad(op2, regT3, regT2);
818 Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
820 if (!types.second().definitelyIsNumber())
821 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
823 convertInt32ToDouble(regT2, fpRegT0);
824 Jump doTheMath = jump();
826 // Load Op2 as double into double register.
827 doubleOp2.link(this);
828 emitLoadDouble(op2, fpRegT0);
831 doTheMath.link(this);
834 emitLoadDouble(op1, fpRegT2);
835 mulDouble(fpRegT2, fpRegT0);
836 emitStoreDouble(dst, fpRegT0);
839 emitLoadDouble(op1, fpRegT2);
840 addDouble(fpRegT2, fpRegT0);
841 emitStoreDouble(dst, fpRegT0);
844 emitLoadDouble(op1, fpRegT1);
845 subDouble(fpRegT0, fpRegT1);
846 emitStoreDouble(dst, fpRegT1);
849 emitLoadDouble(op1, fpRegT1);
850 divDouble(fpRegT0, fpRegT1);
852 #if ENABLE(VALUE_PROFILER)
853 // Is the result actually an integer? The DFG JIT would really like to know. If it's
854 // not an integer, we increment a count. If this together with the slow case counter
855 // are below threshold then the DFG JIT will compile this division with a specualtion
856 // that the remainder is zero.
858 // As well, there are cases where a double result here would cause an important field
859 // in the heap to sometimes have doubles in it, resulting in double predictions getting
860 // propagated to a use site where it might cause damage (such as the index to an array
861 // access). So if we are DFG compiling anything in the program, we want this code to
862 // ensure that it produces integers whenever possible.
864 // FIXME: This will fail to convert to integer if the result is zero. We should
865 // distinguish between positive zero and negative zero here.
868 branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
869 // If we've got an integer, we might as well make that the result of the division.
870 emitStoreInt32(dst, regT2);
871 Jump isInteger = jump();
872 notInteger.link(this);
873 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
874 emitStoreDouble(dst, fpRegT1);
875 isInteger.link(this);
877 emitStoreDouble(dst, fpRegT1);
882 emitLoadDouble(op1, fpRegT2);
883 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
886 emitLoadDouble(op1, fpRegT2);
887 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
890 emitLoadDouble(op1, fpRegT2);
891 addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
894 emitLoadDouble(op1, fpRegT2);
895 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
898 emitLoadDouble(op1, fpRegT2);
899 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
902 emitLoadDouble(op1, fpRegT2);
903 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
906 emitLoadDouble(op1, fpRegT2);
907 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
910 emitLoadDouble(op1, fpRegT2);
911 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
914 ASSERT_NOT_REACHED();
917 if (!notInt32Op2.empty())
921 if (!notInt32Op2.empty()) {
922 // Double case 2: Op1 is int32; Op2 is not int32.
923 notInt32Op2.link(this);
925 ASSERT(op2IsInRegisters);
927 if (!op1IsInRegisters)
928 emitLoadPayload(op1, regT0);
930 convertInt32ToDouble(regT0, fpRegT0);
932 // Verify op2 is double.
933 if (!types.second().definitelyIsNumber())
934 addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
939 emitLoadDouble(op2, fpRegT2);
940 mulDouble(fpRegT2, fpRegT0);
941 emitStoreDouble(dst, fpRegT0);
944 emitLoadDouble(op2, fpRegT2);
945 addDouble(fpRegT2, fpRegT0);
946 emitStoreDouble(dst, fpRegT0);
949 emitLoadDouble(op2, fpRegT2);
950 subDouble(fpRegT2, fpRegT0);
951 emitStoreDouble(dst, fpRegT0);
954 emitLoadDouble(op2, fpRegT2);
955 divDouble(fpRegT2, fpRegT0);
956 #if ENABLE(VALUE_PROFILER)
957 // Is the result actually an integer? The DFG JIT would really like to know. If it's
958 // not an integer, we increment a count. If this together with the slow case counter
959 // are below threshold then the DFG JIT will compile this division with a specualtion
960 // that the remainder is zero.
962 // As well, there are cases where a double result here would cause an important field
963 // in the heap to sometimes have doubles in it, resulting in double predictions getting
964 // propagated to a use site where it might cause damage (such as the index to an array
965 // access). So if we are DFG compiling anything in the program, we want this code to
966 // ensure that it produces integers whenever possible.
968 // FIXME: This will fail to convert to integer if the result is zero. We should
969 // distinguish between positive zero and negative zero here.
972 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
973 // If we've got an integer, we might as well make that the result of the division.
974 emitStoreInt32(dst, regT2);
975 Jump isInteger = jump();
976 notInteger.link(this);
977 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
978 emitStoreDouble(dst, fpRegT0);
979 isInteger.link(this);
981 emitStoreDouble(dst, fpRegT0);
986 emitLoadDouble(op2, fpRegT1);
987 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
990 emitLoadDouble(op2, fpRegT1);
991 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
994 emitLoadDouble(op2, fpRegT1);
995 addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
998 emitLoadDouble(op2, fpRegT1);
999 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
1002 emitLoadDouble(op2, fpRegT1);
1003 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
1006 emitLoadDouble(op2, fpRegT1);
1007 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
1010 emitLoadDouble(op2, fpRegT1);
1011 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
1013 case op_jngreatereq:
1014 emitLoadDouble(op2, fpRegT1);
1015 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
1018 ASSERT_NOT_REACHED();
1025 // Multiplication (*)
1027 void JIT::emit_op_mul(Instruction* currentInstruction)
1029 unsigned dst = currentInstruction[1].u.operand;
1030 unsigned op1 = currentInstruction[2].u.operand;
1031 unsigned op2 = currentInstruction[3].u.operand;
1032 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1034 #if ENABLE(VALUE_PROFILER)
1035 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1038 JumpList notInt32Op1;
1039 JumpList notInt32Op2;
1041 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1042 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1043 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
1047 addSlowCase(branchMul32(Overflow, regT2, regT0));
1048 addSlowCase(branchTest32(Zero, regT0));
1049 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1051 if (!supportsFloatingPoint()) {
1052 addSlowCase(notInt32Op1);
1053 addSlowCase(notInt32Op2);
1059 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1063 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1065 unsigned dst = currentInstruction[1].u.operand;
1066 unsigned op1 = currentInstruction[2].u.operand;
1067 unsigned op2 = currentInstruction[3].u.operand;
1068 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1070 Jump overflow = getSlowCase(iter); // overflow check
1071 linkSlowCase(iter); // zero result check
1073 Jump negZero = branchOr32(Signed, regT2, regT3);
1074 emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
1076 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
1079 #if ENABLE(VALUE_PROFILER)
1080 // We only get here if we have a genuine negative zero. Record this,
1081 // so that the speculative JIT knows that we failed speculation
1082 // because of a negative zero.
1083 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1085 overflow.link(this);
1087 if (!supportsFloatingPoint()) {
1088 linkSlowCase(iter); // int32 check
1089 linkSlowCase(iter); // int32 check
1092 if (supportsFloatingPoint()) {
1093 if (!types.first().definitelyIsNumber())
1094 linkSlowCase(iter); // double check
1096 if (!types.second().definitelyIsNumber()) {
1097 linkSlowCase(iter); // int32 check
1098 linkSlowCase(iter); // double check
1102 Label jitStubCall(this);
1103 JITStubCall stubCall(this, cti_op_mul);
1104 stubCall.addArgument(op1);
1105 stubCall.addArgument(op2);
1111 void JIT::emit_op_div(Instruction* currentInstruction)
1113 unsigned dst = currentInstruction[1].u.operand;
1114 unsigned op1 = currentInstruction[2].u.operand;
1115 unsigned op2 = currentInstruction[3].u.operand;
1116 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1118 #if ENABLE(VALUE_PROFILER)
1119 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1122 if (!supportsFloatingPoint()) {
1123 addSlowCase(jump());
1128 JumpList notInt32Op1;
1129 JumpList notInt32Op2;
1133 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1135 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1136 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
1138 convertInt32ToDouble(regT0, fpRegT0);
1139 convertInt32ToDouble(regT2, fpRegT1);
1140 divDouble(fpRegT1, fpRegT0);
1141 #if ENABLE(VALUE_PROFILER)
1142 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1143 // not an integer, we increment a count. If this together with the slow case counter
1144 // are below threshold then the DFG JIT will compile this division with a specualtion
1145 // that the remainder is zero.
1147 // As well, there are cases where a double result here would cause an important field
1148 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1149 // propagated to a use site where it might cause damage (such as the index to an array
1150 // access). So if we are DFG compiling anything in the program, we want this code to
1151 // ensure that it produces integers whenever possible.
1153 // FIXME: This will fail to convert to integer if the result is zero. We should
1154 // distinguish between positive zero and negative zero here.
1156 JumpList notInteger;
1157 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
1158 // If we've got an integer, we might as well make that the result of the division.
1159 emitStoreInt32(dst, regT2);
1161 notInteger.link(this);
1162 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1163 emitStoreDouble(dst, fpRegT0);
1165 emitStoreDouble(dst, fpRegT0);
1170 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1174 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1176 unsigned dst = currentInstruction[1].u.operand;
1177 unsigned op1 = currentInstruction[2].u.operand;
1178 unsigned op2 = currentInstruction[3].u.operand;
1179 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1181 if (!supportsFloatingPoint())
1184 if (!types.first().definitelyIsNumber())
1185 linkSlowCase(iter); // double check
1187 if (!types.second().definitelyIsNumber()) {
1188 linkSlowCase(iter); // int32 check
1189 linkSlowCase(iter); // double check
1193 JITStubCall stubCall(this, cti_op_div);
1194 stubCall.addArgument(op1);
1195 stubCall.addArgument(op2);
1201 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1203 void JIT::emit_op_mod(Instruction* currentInstruction)
1205 unsigned dst = currentInstruction[1].u.operand;
1206 unsigned op1 = currentInstruction[2].u.operand;
1207 unsigned op2 = currentInstruction[3].u.operand;
1209 #if CPU(X86) || CPU(X86_64)
1210 // Make sure registers are correct for x86 IDIV instructions.
1211 ASSERT(regT0 == X86Registers::eax);
1212 ASSERT(regT1 == X86Registers::edx);
1213 ASSERT(regT2 == X86Registers::ecx);
1214 ASSERT(regT3 == X86Registers::ebx);
1216 emitLoad2(op1, regT0, regT3, op2, regT1, regT2);
1217 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1218 addSlowCase(branch32(NotEqual, regT0, TrustedImm32(JSValue::Int32Tag)));
1221 addSlowCase(branchTest32(Zero, regT2));
1222 Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
1223 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
1224 denominatorNotNeg1.link(this);
1226 m_assembler.idivl_r(regT2);
1227 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
1228 addSlowCase(branchTest32(Zero, regT1));
1229 numeratorPositive.link(this);
1230 emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
1232 JITStubCall stubCall(this, cti_op_mod);
1233 stubCall.addArgument(op1);
1234 stubCall.addArgument(op2);
1239 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1241 #if CPU(X86) || CPU(X86_64)
1242 unsigned result = currentInstruction[1].u.operand;
1243 unsigned op1 = currentInstruction[2].u.operand;
1244 unsigned op2 = currentInstruction[3].u.operand;
1250 JITStubCall stubCall(this, cti_op_mod);
1251 stubCall.addArgument(op1);
1252 stubCall.addArgument(op2);
1253 stubCall.call(result);
1255 UNUSED_PARAM(currentInstruction);
1257 // We would have really useful assertions here if it wasn't for the compiler's
1258 // insistence on attribute noreturn.
1259 // ASSERT_NOT_REACHED();
1263 /* ------------------------------ END: OP_MOD ------------------------------ */
1267 #endif // USE(JSVALUE32_64)
1268 #endif // ENABLE(JIT)