} else {
Load(node->expression());
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
switch (op) {
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnarySubStub stub(overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
__ test(operand.reg(), Immediate(kSmiTagMask));
smi_label.Branch(zero, &operand, taken);
- frame_->Push(&operand); // undo popping of TOS
- Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
- CALL_FUNCTION, 1);
-
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
+
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
__ not_(answer.reg());
__ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
+
continue_label.Bind(&answer);
frame_->Push(&answer);
break;
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ebx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
GenerateReturn(masm);
}
}
-void UnarySubStub::Generate(MacroAssembler* masm) {
- Label undo;
- Label slow;
- Label done;
- Label try_float;
-
- // Check whether the value is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
- // Enter runtime system if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &try_float, not_taken);
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(overflow, &undo, not_taken);
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
- // If result is a smi we are done.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done, taken);
-
- // Restore eax and enter runtime system.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ Label undo;
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(overflow, &undo, not_taken);
- // Enter runtime system.
- __ bind(&slow);
- __ pop(ecx); // pop return address
- __ push(eax);
- __ push(ecx); // push return address
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ // If result is a smi we are done.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done, taken);
+
+ // Restore eax and go slow case.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+ __ jmp(&slow);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ if (overwrite_) {
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ } else {
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ __ AllocateHeapNumber(eax, ebx, ecx, &undo);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow, not_taken);
+
+ // Convert the heap number in eax to an untagged integer in ecx.
+ IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), &slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(ecx);
+ __ cmp(ecx, 0xc0000000);
+ __ j(sign, &try_float, not_taken);
+
+ // Tag the result as a smi and we're done.
+ ASSERT(kSmiTagSize == 1);
+ __ lea(eax, Operand(ecx, times_2, kSmiTag));
+ __ jmp(&done);
- // Try floating point case.
- __ bind(&try_float);
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- if (overwrite_) {
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (!overwrite_) {
+ // Allocate a fresh heap number, but don't overwrite eax until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in eax.
+ __ AllocateHeapNumber(ebx, edx, edi, &slow);
+ __ mov(eax, Operand(ebx));
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ecx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
} else {
- __ mov(edx, Operand(eax));
- // edx: operand
- __ AllocateHeapNumber(eax, ebx, ecx, &undo);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ UNIMPLEMENTED();
}
+ // Return from the stub.
__ bind(&done);
-
__ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address.
+ __ push(eax);
+ __ push(ecx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function testBitNot(x) {
+ // The VM constant folds so we use that to check the result.
+ var expected = eval("~(" + x + ")");
+ var actual = ~x;
+ assertEquals(expected, actual, "x: " + x);
+
+ // Test the path where we can overwrite the result. Use -
+ // to avoid concatenating strings.
+ expected = eval("~(" + x + " - 0.01)");
+ actual = ~(x - 0.01);
+ assertEquals(expected, actual, "x - 0.01: " + x);
+}
+
+
+testBitNot(0);
+testBitNot(1);
+testBitNot(-1);
+testBitNot(100);
+testBitNot(0x40000000);
+testBitNot(0x7fffffff);
+testBitNot(0x80000000);
+
+testBitNot(2.2);
+testBitNot(-2.3);
+testBitNot(Infinity);
+testBitNot(NaN);
+testBitNot(-Infinity);
+testBitNot(0x40000000 + 0.12345);
+testBitNot(0x40000000 - 0.12345);
+testBitNot(0x7fffffff + 0.12345);
+testBitNot(0x7fffffff - 0.12345);
+testBitNot(0x80000000 + 0.12345);
+testBitNot(0x80000000 - 0.12345);
+
+testBitNot("0");
+testBitNot("2.3");
+testBitNot("-9.4");
+
+
+// Try to test that we can deal with allocation failures in
+// the fast path and just use the slow path instead.
+function TryToGC() {
+ var x = 0x40000000;
+ for (var i = 0; i < 1000000; i++) {
+ assertEquals(~0x40000000, ~x);
+ }
+}
+TryToGC();