// Update the code and feedback vector for the shared function info.
shared->ReplaceCode(*info->code());
- if (shared->optimization_disabled()) info->code()->set_optimizable(false);
shared->set_feedback_vector(*info->feedback_vector());
return info->code();
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
- code->set_optimizable(info->IsOptimizable() &&
- !info->function()->dont_optimize() &&
- info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
} \
} while (false);
-// ComputeMarker must only be used when SharedFunctionInfo is known.
-static const char* ComputeMarker(Code* code) {
+static const char* ComputeMarker(SharedFunctionInfo* shared, Code* code) {
switch (code->kind()) {
- case Code::FUNCTION: return code->optimizable() ? "~" : "";
- case Code::OPTIMIZED_FUNCTION: return "*";
- default: return "";
+ case Code::FUNCTION:
+ return shared->optimization_disabled() ? "" : "~";
+ case Code::OPTIMIZED_FUNCTION:
+ return "*";
+ default:
+ return "";
}
}
CompilationInfo* info,
Name* name) {
name_buffer_->Init(tag);
- name_buffer_->AppendBytes(ComputeMarker(code));
+ name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendName(name);
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
CompilationInfo* info,
Name* source, int line, int column) {
name_buffer_->Init(tag);
- name_buffer_->AppendBytes(ComputeMarker(code));
+ name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendString(shared->DebugName());
name_buffer_->AppendByte(' ');
if (source->IsString()) {
}
msg.Append(',');
msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(code));
+ msg.Append(",%s", ComputeMarker(shared, code));
msg.WriteToLogFile();
}
}
msg.Append(":%d:%d\",", line, column);
msg.AppendAddress(shared->address());
- msg.Append(",%s", ComputeMarker(code));
+ msg.Append(",%s", ComputeMarker(shared, code));
msg.WriteToLogFile();
}
}
-bool Code::optimizable() {
- DCHECK_EQ(FUNCTION, kind());
- return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
-}
-
-
-void Code::set_optimizable(bool value) {
- DCHECK_EQ(FUNCTION, kind());
- WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
-}
-
-
bool Code::has_deoptimization_support() {
DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
set_compiler_hints(BooleanBit::set(compiler_hints(),
kOptimizationDisabled,
disable));
- // If disabling optimizations we reflect that in the code object so
- // it will not be counted as optimizable code.
- if ((code()->kind() == Code::FUNCTION) && disable) {
- code()->set_optimizable(false);
- }
}
set_optimization_disabled(false);
set_opt_count(0);
set_deopt_count(0);
- code()->set_optimizable(true);
}
}
bool JSFunction::IsOptimizable() {
- return code()->kind() == Code::FUNCTION && code()->optimizable();
+ return code()->kind() == Code::FUNCTION && !shared()->optimization_disabled();
}
void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
DCHECK(!IsOptimized());
- DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
+ DCHECK(shared()->allows_lazy_compilation() || IsOptimizable());
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
}
DCHECK(!IsInOptimizationQueue());
DCHECK(!IsOptimized());
- DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
+ DCHECK(shared()->allows_lazy_compilation() || IsOptimizable());
DCHECK(isolate->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
bool SharedFunctionInfo::IsInlineable() {
// Check that the function has a script associated with it.
if (!script()->IsScript()) return false;
- if (optimization_disabled()) return false;
- // If we never ran this (unlikely) then lets try to optimize it.
- if (code()->kind() != Code::FUNCTION) return true;
- return code()->optimizable();
+ return !optimization_disabled();
}
DCHECK(reason != kNoReason);
set_optimization_disabled(true);
set_disable_optimization_reason(reason);
- // Code should be the lazy compilation stub or else unoptimized. If the
- // latter, disable optimization for the code too.
+ // Code should be the lazy compilation stub or else unoptimized.
DCHECK(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
- if (code()->kind() == Code::FUNCTION) {
- code()->set_optimizable(false);
- }
PROFILE(GetIsolate(), CodeDisableOptEvent(code(), this));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
opt_count() >= FLAG_max_opt_count) {
// Re-enable optimizations if they were disabled due to opt_count limit.
set_optimization_disabled(false);
- code()->set_optimizable(true);
}
set_opt_count(0);
set_deopt_count(0);
inline bool can_have_weak_objects();
inline void set_can_have_weak_objects(bool value);
- // [optimizable]: For FUNCTION kind, tells if it is optimizable.
- inline bool optimizable();
- inline void set_optimizable(bool value);
-
// [has_deoptimization_support]: For FUNCTION kind, tells if it has
// deoptimization support.
inline bool has_deoptimization_support();
STATIC_ASSERT((kConstantPoolOffset & kPointerAlignmentMask) == 0);
// Byte offsets within kKindSpecificFlags1Offset.
- static const int kOptimizableOffset = kKindSpecificFlags1Offset;
-
- static const int kFullCodeFlags = kOptimizableOffset + 1;
+ static const int kFullCodeFlags = kKindSpecificFlags1Offset;
class FullCodeFlagsHasDeoptimizationSupportField:
public BitField<bool, 0, 1> {}; // NOLINT
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
}
// If the code is not optimizable, don't try OSR.
- if (!shared->code()->optimizable()) return;
+ if (shared->optimization_disabled()) return;
// We are not prepared to do OSR for a function that already has an
// allocated arguments object. The optimized code would bypass it for
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
- Handle<JSFunction> function,
- Handle<Code> current_code) {
+ Handle<JSFunction> function) {
// Keep track of whether we've succeeded in optimizing.
- if (!current_code->optimizable()) return false;
+ if (function->shared()->optimization_disabled()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
result = Compiler::GetConcurrentlyOptimizedCode(job);
- } else if (IsSuitableForOnStackReplacement(isolate, function, caller_code)) {
+ } else if (IsSuitableForOnStackReplacement(isolate, function)) {
if (FLAG_trace_osr) {
PrintF("[OSR - Compiling: ");
function->PrintName();
// The following assertion was lifted from the DCHECK inside
// JSFunction::MarkForOptimization().
RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
- (function->code()->kind() == Code::FUNCTION &&
- function->code()->optimizable()));
+ function->IsOptimizable());
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
// The following assertion was lifted from the DCHECK inside
// JSFunction::MarkForOptimization().
RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
- (function->code()->kind() == Code::FUNCTION &&
- function->code()->optimizable()));
+ function->IsOptimizable());
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();