void FullCodeGenerator::DoTest(Label* if_true,
Label* if_false,
Label* fall_through) {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
+ // Emit the inlined tests assumed by the stub.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_false);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_true);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_false);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(result_register(), result_register());
+ __ b(eq, if_false);
+ __ JumpIfSmi(result_register(), if_true);
+
+ // Call the ToBoolean stub for all other cases.
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub);
+ __ tst(result_register(), result_register());
+
+ // The stub returns nonzero for true.
+ Split(ne, if_true, if_false, fall_through);
}
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
+
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
-
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ scope()->is_function_scope() &&
info->num_literals() == 0 &&
!pretenure) {
FastNewClosureStub stub;
Comment cmnt(masm_, "[ RegExpLiteral");
Label materialized;
// Registers will be used as follows:
+ // r5 = materialized value (RegExp literal)
// r4 = JS function, literals array
// r3 = literal index
// r2 = RegExp pattern
// r1 = RegExp flags
- // r0 = temp + materialized value (RegExp literal)
+ // r0 = RegExp literal clone
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r0, FieldMemOperand(r4, literal_offset));
+ __ ldr(r5, FieldMemOperand(r4, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(r5, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function.
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mov(r5, r0);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- __ push(r0);
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(r5);
__ mov(r0, Operand(Smi::FromInt(size)));
__ push(r0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ pop(r5);
+ __ bind(&allocated);
// After this, registers are used as follows:
// r0: Newly allocated regexp.
- // r1: Materialized regexp.
+ // r5: Materialized regexp.
// r2: temp.
- __ pop(r1);
- __ CopyFields(r0, r1, r2.bit(), size / kPointerSize);
+ __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime.
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::SIN);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sin, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime.
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::COS);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_cos, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime function.
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::LOG);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the runtime function.
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_log, 1);
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
context()->Plug(r0);
}
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(r0);
+ }
+
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
+
context()->Plug(r0);
}
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB,
- overwrite,
- NO_UNARY_FLAGS);
+ GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1, r1, r2);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1, r1, r2);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2);
+ default:
+ break;
+ }
+
__ Call(ic, mode);
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1, r1, r2);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1, r1, r2);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2);
+ default:
+ break;
+ }
+
__ Call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
__ j(equal, if_true);
__ cmp(result_register(), Factory::false_value());
__ j(equal, if_false);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(result_register(), Operand(result_register()));
__ j(zero, if_false);
__ test(result_register(), Immediate(kSmiTagMask));
ASSERT(variable != NULL); // Must have been resolved.
Slot* slot = variable->AsSlot();
Property* prop = variable->AsProperty();
+
if (slot != NULL) {
switch (slot->type()) {
case Slot::PARAMETER:
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site);
-
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- VisitForStackValue(prop->obj());
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property.
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0));
if (FLAG_debug_code) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
+
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1);
+ default:
+ break;
+ }
+
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
__ j(equal, if_true);
__ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
__ j(equal, if_false);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ SmiCompare(result_register(), Smi::FromInt(0));
__ j(equal, if_false);
Condition is_smi = masm_->CheckSmi(result_register());
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ scope()->is_function_scope() &&
info->num_literals() == 0 &&
!pretenure) {
FastNewClosureStub stub;
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0));
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(rax);
+ }
+
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
__ IndexFromHash(rax, rax);
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1);
+ default:
+ break;
+ }
+
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();