Label install_optimized;
HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
HObjectAccess::ForFirstContextSlot());
+ HValue* first_osr_ast_slot = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFirstOsrAstIdSlot());
+ HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
IfBuilder already_in(this);
already_in.If<HCompareObjectEqAndBranch>(native_context,
first_context_slot);
+ already_in.AndIf<HCompareObjectEqAndBranch>(first_osr_ast_slot,
+ osr_ast_id_none);
already_in.Then();
{
HValue* code_object = Add<HLoadNamedField>(optimized_map,
shared_function_entry_length);
HValue* array_length = Add<HLoadNamedField>(optimized_map,
HObjectAccess::ForFixedArrayLength());
- HValue* key = loop_builder.BeginBody(array_length,
+ HValue* slot_iterator = loop_builder.BeginBody(array_length,
graph()->GetConstant0(),
Token::GT);
{
HValue* second_entry_index =
Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
IfBuilder restore_check(this);
- restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
- Token::EQ);
+ restore_check.If<HCompareNumericAndBranch>(
+ slot_iterator, second_entry_index, Token::EQ);
restore_check.Then();
{
// Store the unoptimized code
}
restore_check.Else();
{
- HValue* keyed_minus = AddUncasted<HSub>(
- key, shared_function_entry_length);
- HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
- keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ STATIC_ASSERT(SharedFunctionInfo::kContextOffset == 0);
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength -
+ SharedFunctionInfo::kOsrAstIdOffset == 1);
+ HValue* native_context_slot = AddUncasted<HSub>(
+ slot_iterator, shared_function_entry_length);
+ HValue* osr_ast_id_slot = AddUncasted<HSub>(
+ slot_iterator, graph()->GetConstant1());
+ HInstruction* native_context_entry = Add<HLoadKeyed>(optimized_map,
+ native_context_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ HInstruction* osr_ast_id_entry = Add<HLoadKeyed>(optimized_map,
+ osr_ast_id_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
IfBuilder done_check(this);
done_check.If<HCompareObjectEqAndBranch>(native_context,
- keyed_lookup);
+ native_context_entry);
+ done_check.AndIf<HCompareObjectEqAndBranch>(osr_ast_id_entry,
+ osr_ast_id_none);
done_check.Then();
{
// Hit: fetch the optimized code.
- HValue* keyed_plus = AddUncasted<HAdd>(
- keyed_minus, graph()->GetConstant1());
+ HValue* code_slot = AddUncasted<HAdd>(
+ native_context_slot, graph()->GetConstant1());
HValue* code_object = Add<HLoadKeyed>(optimized_map,
- keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ code_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
BuildInstallOptimizedCode(js_function, native_context, code_object);
// Fall out of the loop
}
-static Handle<Code> GetCodeFromOptimizedCodeMap(Handle<JSFunction> function) {
+static Handle<Code> GetCodeFromOptimizedCodeMap(Handle<JSFunction> function,
+ BailoutId osr_ast_id) {
if (FLAG_cache_optimized_code) {
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
int index = shared->SearchOptimizedCodeMap(
- function->context()->native_context());
+ function->context()->native_context(), osr_ast_id);
if (index > 0) {
if (FLAG_trace_opt) {
PrintF("[found optimized code for ");
function->ShortPrint();
+ if (!osr_ast_id.IsNone()) {
+ PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+ }
PrintF("]\n");
}
FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
- // Cache non-OSR optimized code.
- if (FLAG_cache_optimized_code && !info->is_osr()) {
+ // Cache optimized code.
+ if (FLAG_cache_optimized_code) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(
- shared, native_context, code, literals);
+ shared, native_context, code, literals, info->osr_ast_id());
}
}
Handle<Code> current_code,
ConcurrencyMode mode,
BailoutId osr_ast_id) {
- if (osr_ast_id.IsNone()) { // No cache for OSR.
- Handle<Code> cached_code = GetCodeFromOptimizedCodeMap(function);
- if (!cached_code.is_null()) return cached_code;
- }
+ Handle<Code> cached_code = GetCodeFromOptimizedCodeMap(function, osr_ast_id);
+ if (!cached_code.is_null()) return cached_code;
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function));
Isolate* isolate = info->isolate();
Compiler::RecordFunctionCompilation(
Logger::LAZY_COMPILE_TAG, info.get(), shared);
if (info->shared_info()->SearchOptimizedCodeMap(
- info->context()->native_context()) == -1) {
+ info->context()->native_context(), info->osr_ast_id()) == -1) {
InsertCodeIntoOptimizedCodeMap(info.get());
}
result->set_context(*context);
- int index = function_info->SearchOptimizedCodeMap(context->native_context());
+ int index = function_info->SearchOptimizedCodeMap(context->native_context(),
+ BailoutId::None());
if (!function_info->bound() && index < 0) {
int number_of_literals = function_info->num_literals();
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
}
+ static HObjectAccess ForFirstOsrAstIdSlot() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kFirstOsrAstIdSlot);
+ }
+
static HObjectAccess ForOptimizedCodeMap() {
return HObjectAccess(kInobject,
SharedFunctionInfo::kOptimizedCodeMapOffset);
void CodeFlusher::ProcessOptimizedCodeMaps() {
- static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
- static const int kEntryLength = SharedFunctionInfo::kEntryLength;
- static const int kContextOffset = 0;
- static const int kCodeOffset = 1;
- static const int kLiteralsOffset = 2;
- STATIC_ASSERT(kEntryLength == 3);
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
SharedFunctionInfo* next_holder;
+
while (holder != NULL) {
next_holder = GetNextCodeMap(holder);
ClearNextCodeMap(holder);
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- int new_length = kEntriesStart;
+ int new_length = SharedFunctionInfo::kEntriesStart;
int old_length = code_map->length();
- for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
- Code* code = Code::cast(code_map->get(i + kCodeOffset));
- MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
- continue;
+ for (int i = SharedFunctionInfo::kEntriesStart;
+ i < old_length;
+ i += SharedFunctionInfo::kEntryLength) {
+ Code* code =
+ Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+ if (!Marking::MarkBitFrom(code).Get()) continue;
+
+ // Move every slot in the entry.
+ for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
+ int dst_index = new_length++;
+ Object** slot = code_map->RawFieldOfElementAt(dst_index);
+ Object* object = code_map->get(i + j);
+ code_map->set(dst_index, object);
+ if (j == SharedFunctionInfo::kOsrAstIdOffset) {
+ ASSERT(object->IsSmi());
+ } else {
+ ASSERT(Marking::IsBlack(
+ Marking::MarkBitFrom(HeapObject::cast(*slot))));
+ isolate_->heap()->mark_compact_collector()->
+ RecordSlot(slot, slot, *slot);
+ }
}
-
- // Update and record the context slot in the optimized code map.
- Object** context_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kContextOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(context_slot, context_slot, *context_slot);
-
- // Update and record the code slot in the optimized code map.
- Object** code_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kCodeOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(code_slot, code_slot, *code_slot);
-
- // Update and record the literals slot in the optimized code map.
- Object** literals_slot = HeapObject::RawField(code_map,
- FixedArray::OffsetOfElementAt(new_length));
- code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
- ASSERT(Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
- isolate_->heap()->mark_compact_collector()->
- RecordSlot(literals_slot, literals_slot, *literals_slot);
}
// Trim the optimized code map if entries have been removed.
cached_map,
SKIP_WRITE_BARRIER);
}
- Object** slot =
- HeapObject::RawField(prototype_transitions,
- FixedArray::OffsetOfElementAt(proto_index));
+ Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
RecordSlot(slot, slot, prototype);
new_number_of_transitions++;
}
for (int i = 0; i < table->Capacity(); i++) {
if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
Object** key_slot =
- HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
- ObjectHashTable::EntryToIndex(i)));
+ table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
RecordSlot(anchor, key_slot, *key_slot);
Object** value_slot =
- HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
- ObjectHashTable::EntryToValueIndex(i)));
+ table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
MarkCompactMarkingVisitor::MarkObjectByPointer(
this, anchor, value_slot);
}
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToKeyIndex(descriptor_number)));
+ return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
}
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToValueIndex(descriptor_number)));
+ return RawFieldOfElementAt(ToValueIndex(descriptor_number));
}
void JSFunctionResultCache::Clear() {
int cache_size = size();
- Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
+ Object** entries_start = RawFieldOfElementAt(kEntriesIndex);
MemsetPointer(entries_start,
GetHeap()->the_hole_value(),
cache_size - kEntriesIndex);
Object** DependentCode::slot_at(int i) {
- return HeapObject::RawField(
- this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
+ return RawFieldOfElementAt(kCodesStartIndex + i);
}
for (int idx = Context::FIRST_WEAK_SLOT;
idx < Context::NATIVE_CONTEXT_SLOTS;
++idx) {
- Object** slot =
- HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+ Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
collector->RecordSlot(slot, slot, *slot);
}
}
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
- Handle<FixedArray> literals) {
+ Handle<FixedArray> literals,
+ BailoutId osr_ast_id) {
CALL_HEAP_FUNCTION_VOID(
shared->GetIsolate(),
- shared->AddToOptimizedCodeMap(*native_context, *code, *literals));
+ shared->AddToOptimizedCodeMap(
+ *native_context, *code, *literals, osr_ast_id));
}
MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
Code* code,
- FixedArray* literals) {
+ FixedArray* literals,
+ BailoutId osr_ast_id) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(native_context->IsNativeContext());
- STATIC_ASSERT(kEntryLength == 3);
+ STATIC_ASSERT(kEntryLength == 4);
Heap* heap = GetHeap();
FixedArray* new_code_map;
Object* value = optimized_code_map();
+ Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
if (value->IsSmi()) {
// No optimized code map.
ASSERT_EQ(0, Smi::cast(value)->value());
// Create 3 entries per context {context, code, literals}.
MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
if (!maybe->To(&new_code_map)) return maybe;
- new_code_map->set(kEntriesStart + 0, native_context);
- new_code_map->set(kEntriesStart + 1, code);
- new_code_map->set(kEntriesStart + 2, literals);
+ new_code_map->set(kEntriesStart + kContextOffset, native_context);
+ new_code_map->set(kEntriesStart + kCachedCodeOffset, code);
+ new_code_map->set(kEntriesStart + kLiteralsOffset, literals);
+ new_code_map->set(kEntriesStart + kOsrAstIdOffset, osr_ast_id_smi);
} else {
// Copy old map and append one new entry.
FixedArray* old_code_map = FixedArray::cast(value);
- ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context));
+ ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context, osr_ast_id));
int old_length = old_code_map->length();
int new_length = old_length + kEntryLength;
MaybeObject* maybe = old_code_map->CopySize(new_length);
if (!maybe->To(&new_code_map)) return maybe;
- new_code_map->set(old_length + 0, native_context);
- new_code_map->set(old_length + 1, code);
- new_code_map->set(old_length + 2, literals);
+ new_code_map->set(old_length + kContextOffset, native_context);
+ new_code_map->set(old_length + kCachedCodeOffset, code);
+ new_code_map->set(old_length + kLiteralsOffset, literals);
+ new_code_map->set(old_length + kOsrAstIdOffset, osr_ast_id_smi);
// Zap the old map for the sake of the heap verifier.
if (Heap::ShouldZapGarbage()) {
Object** data = old_code_map->data_start();
}
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
- ASSERT(new_code_map->get(i)->IsNativeContext());
- ASSERT(new_code_map->get(i + 1)->IsCode());
- ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
+ ASSERT(new_code_map->get(i + kContextOffset)->IsNativeContext());
+ ASSERT(new_code_map->get(i + kCachedCodeOffset)->IsCode());
+ ASSERT(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() ==
Code::OPTIMIZED_FUNCTION);
- ASSERT(new_code_map->get(i + 2)->IsFixedArray());
+ ASSERT(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
+ ASSERT(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
set_optimized_code_map(new_code_map);
}
-
Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) {
ASSERT(index > kEntriesStart);
FixedArray* code_map = FixedArray::cast(optimized_code_map());
}
}
while (i < (code_map->length() - kEntryLength)) {
- code_map->set(i, code_map->get(i + kEntryLength));
- code_map->set(i + 1, code_map->get(i + 1 + kEntryLength));
- code_map->set(i + 2, code_map->get(i + 2 + kEntryLength));
+ code_map->set(i + kContextOffset,
+ code_map->get(i + kContextOffset + kEntryLength));
+ code_map->set(i + kCachedCodeOffset,
+ code_map->get(i + kCachedCodeOffset + kEntryLength));
+ code_map->set(i + kLiteralsOffset,
+ code_map->get(i + kLiteralsOffset + kEntryLength));
+ code_map->set(i + kOsrAstIdOffset,
+ code_map->get(i + kOsrAstIdOffset + kEntryLength));
i += kEntryLength;
}
if (removed_entry) {
}
-int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
+int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
+ BailoutId osr_ast_id) {
ASSERT(native_context->IsNativeContext());
if (!FLAG_cache_optimized_code) return -1;
Object* value = optimized_code_map();
if (!value->IsSmi()) {
FixedArray* optimized_code_map = FixedArray::cast(value);
int length = optimized_code_map->length();
+ Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = kEntriesStart; i < length; i += kEntryLength) {
- if (optimized_code_map->get(i) == native_context) {
- return i + 1;
+ if (optimized_code_map->get(i + kContextOffset) == native_context &&
+ optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+ return i + kCachedCodeOffset;
}
}
if (FLAG_trace_opt) {
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
+ // Garbage collection support.
+ Object** RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+ }
+
// Casting.
static inline FixedArray* cast(Object* obj);
// and a shared literals array or Smi(0) if none.
DECL_ACCESSORS(optimized_code_map, Object)
- // Returns index i of the entry with the specified context. At position
- // i - 1 is the context, position i the code, and i + 1 the literals array.
- // Returns -1 when no matching entry is found.
- int SearchOptimizedCodeMap(Context* native_context);
+ // Returns index i of the entry with the specified context and OSR entry.
+ // At position i - 1 is the context, position i the code, and i + 1 the
+ // literals array. Returns -1 when no matching entry is found.
+ int SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id);
// Installs optimized code from the code map on the given closure. The
// index has to be consistent with a search result as defined above.
// Add a new entry to the optimized code map.
MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
Code* code,
- FixedArray* literals);
+ FixedArray* literals,
+ BailoutId osr_ast_id);
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
Handle<Code> code,
- Handle<FixedArray> literals);
+ Handle<FixedArray> literals,
+ BailoutId osr_ast_id);
// Layout description of the optimized code map.
static const int kNextMapIndex = 0;
static const int kEntriesStart = 1;
- static const int kEntryLength = 3;
- static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize;
- static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize;
+ static const int kContextOffset = 0;
+ static const int kCachedCodeOffset = 1;
+ static const int kLiteralsOffset = 2;
+ static const int kOsrAstIdOffset = 3;
+ static const int kEntryLength = 4;
+ static const int kFirstContextSlot = FixedArray::kHeaderSize +
+ (kEntriesStart + kContextOffset) * kPointerSize;
+ static const int kFirstCodeSlot = FixedArray::kHeaderSize +
+ (kEntriesStart + kCachedCodeOffset) * kPointerSize;
+ static const int kFirstOsrAstIdSlot = FixedArray::kHeaderSize +
+ (kEntriesStart + kOsrAstIdOffset) * kPointerSize;
static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
static const int kInitialLength = kEntriesStart + kEntryLength;
Object** TransitionArray::GetKeySlot(int transition_number) {
ASSERT(!IsSimpleTransition());
ASSERT(transition_number < number_of_transitions());
- return HeapObject::RawField(
- reinterpret_cast<HeapObject*>(this),
- OffsetOfElementAt(ToKeyIndex(transition_number)));
+ return RawFieldOfElementAt(ToKeyIndex(transition_number));
}