void set_max_young_space_size(int value) { max_young_space_size_ = value; }
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
- int max_executable_size() { return max_executable_size_; }
+ int max_executable_size() const { return max_executable_size_; }
void set_max_executable_size(int value) { max_executable_size_ = value; }
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+ int max_available_threads() const { return max_available_threads_; }
+ // Set the number of threads available to V8, assuming at least 1.
+ void set_max_available_threads(int value) {
+ max_available_threads_ = value;
+ }
private:
int max_young_space_size_;
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
+ int max_available_threads_;
};
: max_young_space_size_(0),
max_old_space_size_(0),
max_executable_size_(0),
- stack_limit_(NULL) { }
+ stack_limit_(NULL),
+ max_available_threads_(0) { }
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory) {
set_max_old_space_size(700 * lump_of_memory);
set_max_executable_size(256 * lump_of_memory);
}
+
+ set_max_available_threads(0);
}
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
+
+ isolate->set_max_available_threads(constraints->max_available_threads());
return true;
}
}
void AbortDueToDependencyChange() {
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
abort_due_to_dependency_ = true;
}
bool HasAbortedDueToDependencyChange() {
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
return abort_due_to_dependency_;
}
Deoptimizer::DeoptimizeAll(isolate);
}
if (stack_guard->IsInstallCodeRequest()) {
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(isolate->concurrent_recompilation_enabled());
stack_guard->Continue(INSTALL_CODE);
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
Isolate* isolate = heap()->isolate();
HandleScope scope(isolate);
- if (FLAG_concurrent_recompilation) {
+ if (isolate->concurrent_recompilation_enabled()) {
isolate->optimizing_compiler_thread()->Flush();
}
store_buffer()->GCPrologue();
- if (FLAG_concurrent_osr) {
+ if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
}
}
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
- if (FLAG_concurrent_recompilation) {
+ if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
isolate()->optimizing_compiler_thread()->Flush();
int Heap::NotifyContextDisposed() {
- if (FLAG_concurrent_recompilation) {
+ if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
}
+bool Heap::AdvanceSweepers(int step_size) {
+ ASSERT(isolate()->num_sweeper_threads() == 0);
+ bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
+ sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
+ return sweeping_complete;
+}
+
+
intptr_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
store_buffer()->SetUp();
if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
-#ifdef DEBUG
- relocation_mutex_locked_by_optimizer_thread_ = false;
-#endif // DEBUG
return true;
}
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
+ relocation_mutex_ = NULL;
}
ClearObjectStats();
}
-
-Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Lock();
-#ifdef DEBUG
- heap_->relocation_mutex_locked_by_optimizer_thread_ =
- heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
-#endif // DEBUG
- }
-}
-
} } // namespace v8::internal
old_pointer_space()->IsLazySweepingComplete();
}
- bool AdvanceSweepers(int step_size) {
- ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
- bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
- sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
- return sweeping_complete;
- }
+ bool AdvanceSweepers(int step_size);
bool EnsureSweepersProgressed(int step_size) {
bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
// only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
- explicit RelocationLock(Heap* heap);
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_concurrent_recompilation) {
+ heap_->relocation_mutex_->Lock();
+ }
+ }
+
~RelocationLock() {
if (FLAG_concurrent_recompilation) {
-#ifdef DEBUG
- heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
-#endif // DEBUG
heap_->relocation_mutex_->Unlock();
}
}
-#ifdef DEBUG
- static bool IsLockedByOptimizerThread(Heap* heap) {
- return heap->relocation_mutex_locked_by_optimizer_thread_;
- }
-#endif // DEBUG
-
private:
Heap* heap_;
};
void HGraph::FinalizeUniqueness() {
DisallowHeapAllocation no_gc;
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
it.Current()->FinalizeUniqueness();
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- ASSERT(!FLAG_concurrent_recompilation);
+ ASSERT(!chunk->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- ASSERT(!FLAG_concurrent_recompilation);
+ ASSERT(!graph->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
}
-int SystemThreadManager::NumberOfParallelSystemThreads(
- ParallelSystemComponent type) {
- int number_of_threads = Min(CPU::NumberOfProcessorsOnline(), kMaxThreads);
- ASSERT(number_of_threads > 0);
- if (number_of_threads == 1) {
- return 0;
- }
- if (type == PARALLEL_SWEEPING) {
- return number_of_threads;
- } else if (type == CONCURRENT_SWEEPING) {
- return number_of_threads - 1;
- }
- return 1;
-}
-
-
// Create a dummy thread that will wait forever on a semaphore. The only
// purpose for this thread is to have some stack area to save essential data
// into for use by a stacks only core dump (aka minidump).
deferred_handles_head_(NULL),
optimizing_compiler_thread_(NULL),
sweeper_thread_(NULL),
+ num_sweeper_threads_(0),
+ max_available_threads_(0),
stress_deopt_count_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
debugger()->UnloadDebugger();
#endif
- if (FLAG_concurrent_recompilation) {
+ if (concurrent_recompilation_enabled()) {
optimizing_compiler_thread_->Stop();
delete optimizing_compiler_thread_;
+ optimizing_compiler_thread_ = NULL;
}
- if (FLAG_sweeper_threads > 0) {
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i]->Stop();
- delete sweeper_thread_[i];
- }
- delete[] sweeper_thread_;
+ for (int i = 0; i < num_sweeper_threads_; i++) {
+ sweeper_thread_[i]->Stop();
+ delete sweeper_thread_[i];
+ sweeper_thread_[i] = NULL;
}
+ delete[] sweeper_thread_;
+ sweeper_thread_ = NULL;
+
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
- if (FLAG_concurrent_recompilation) {
- optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
- optimizing_compiler_thread_->Start();
- }
-
const bool create_heap_objects = (des == NULL);
if (create_heap_objects && !heap_.CreateHeapObjects()) {
V8::FatalProcessOutOfMemory("heap object creation");
if (create_heap_objects) heap_.CreateStubsRequiringBuiltins();
+ // Set default value if not yet set.
+ // TODO(yangguo): move this to ResourceConstraints::ConfigureDefaults
+ // once ResourceConstraints becomes an argument to the Isolate constructor.
+ if (max_available_threads_ < 1) {
+ // Choose the default between 1 and 4.
+ max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4), 1);
+ }
+
+ num_sweeper_threads_ = SweeperThread::NumberOfThreads(max_available_threads_);
+
+ if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
+ PrintF("Concurrent recompilation has been disabled for tracing.\n");
+ } else if (OptimizingCompilerThread::Enabled(max_available_threads_)) {
+ optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
+ optimizing_compiler_thread_->Start();
+ }
+
+ if (num_sweeper_threads_ > 0) {
+ sweeper_thread_ = new SweeperThread*[num_sweeper_threads_];
+ for (int i = 0; i < num_sweeper_threads_; i++) {
+ sweeper_thread_[i] = new SweeperThread(this);
+ sweeper_thread_[i]->Start();
+ }
+ }
+
// Only preallocate on the first initialization.
if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
// Start the thread which will set aside some memory.
NewStringAddStub::InstallDescriptors(this);
}
- if (FLAG_sweeper_threads > 0) {
- sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i] = new SweeperThread(this);
- sweeper_thread_[i]->Start();
- }
- }
-
initialized_from_snapshot_ = (des != NULL);
return true;
};
-class SystemThreadManager {
- public:
- enum ParallelSystemComponent {
- PARALLEL_SWEEPING,
- CONCURRENT_SWEEPING,
- CONCURRENT_RECOMPILATION
- };
-
- static int NumberOfParallelSystemThreads(ParallelSystemComponent type);
-
- static const int kMaxThreads = 4;
-};
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
bool IsDeferredHandle(Object** location);
#endif // DEBUG
+ void set_max_available_threads(int value) {
+ max_available_threads_ = value;
+ }
+
+ bool concurrent_recompilation_enabled() {
+ // Thread is only available with flag enabled.
+ ASSERT(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL;
+ }
+
+ bool concurrent_osr_enabled() {
+ // Thread is only available with flag enabled.
+ ASSERT(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL && FLAG_concurrent_osr;
+ }
+
OptimizingCompilerThread* optimizing_compiler_thread() {
return optimizing_compiler_thread_;
}
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- // TODO(svenpanne) This method is on death row...
- static v8::Isolate* GetDefaultIsolateForLocking();
+ bool num_sweeper_threads() {
+ return num_sweeper_threads_;
+ }
SweeperThread** sweeper_threads() {
return sweeper_thread_;
}
+ // PreInits and returns a default isolate. Needed when a new thread tries
+ // to create a Locker for the first time (the lock itself is in the isolate).
+ // TODO(svenpanne) This method is on death row...
+ static v8::Isolate* GetDefaultIsolateForLocking();
+
int id() const { return static_cast<int>(id_); }
HStatistics* GetHStatistics();
DeferredHandles* deferred_handles_head_;
OptimizingCompilerThread* optimizing_compiler_thread_;
SweeperThread** sweeper_thread_;
+ int num_sweeper_threads_;
+
+ // TODO(yangguo): This will become obsolete once ResourceConstraints
+ // becomes an argument to Isolate constructor.
+ int max_available_threads_;
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
void MarkCompactCollector::StartSweeperThreads() {
sweeping_pending_ = true;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
}
void MarkCompactCollector::WaitUntilSweepingCompleted() {
ASSERT(sweeping_pending_ == true);
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
sweeping_pending_ = false;
intptr_t MarkCompactCollector::
StealMemoryFromSweeperThreads(PagedSpace* space) {
intptr_t freed_bytes = 0;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
}
space->AddToAccountingStats(freed_bytes);
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
- if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ if (isolate()->num_sweeper_threads() > 0) {
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ }
if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(!shared()->is_generator());
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
PrintName();
ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queueing ");
PrintName();
#ifdef DEBUG
+bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
+ return isolate->concurrent_recompilation_enabled() &&
+ isolate->optimizing_compiler_thread()->IsOptimizerThread();
+}
+
+
bool OptimizingCompilerThread::IsOptimizerThread() {
- if (!FLAG_concurrent_recompilation) return false;
LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
AddToOsrBuffer(NULL);
}
+ static bool Enabled(int max_available) {
+ return (FLAG_concurrent_recompilation && max_available > 1);
+ }
+
#ifdef DEBUG
+ static bool IsOptimizerThread(Isolate* isolate);
bool IsOptimizerThread();
#endif
}
- if (FLAG_concurrent_recompilation && !isolate_->bootstrapper()->IsActive()) {
- if (FLAG_concurrent_osr &&
+ if (isolate_->concurrent_recompilation_enabled() &&
+ !isolate_->bootstrapper()->IsActive()) {
+ if (isolate_->concurrent_osr_enabled() &&
isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
// Do not attempt regular recompilation if we already queued this for OSR.
// TODO(yangguo): This is necessary so that we don't install optimized
return isolate->heap()->undefined_value();
}
function->shared()->code()->set_profiler_ticks(0);
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(isolate->concurrent_recompilation_enabled());
if (!Compiler::RecompileConcurrent(function)) {
function->ReplaceCode(function->shared()->code());
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported) {
HandleScope scope(isolate);
- return FLAG_concurrent_recompilation
+ return isolate->concurrent_recompilation_enabled()
? isolate->heap()->true_value() : isolate->heap()->false_value();
}
}
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_concurrent_recompilation && sync_with_compiler_thread) {
+ if (isolate->concurrent_recompilation_enabled() &&
+ sync_with_compiler_thread) {
while (function->IsInRecompileQueue()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
Handle<Code> result = Handle<Code>::null();
BailoutId ast_id = BailoutId::None();
- if (FLAG_concurrent_osr) {
+ if (isolate->concurrent_osr_enabled()) {
if (isolate->optimizing_compiler_thread()->
IsQueuedForOSR(function, pc_offset)) {
// Still waiting for the optimizing compiler thread to finish. Carry on.
void SweeperThread::WaitForSweeperThread() {
end_sweeping_semaphore_.Wait();
}
+
+
+int SweeperThread::NumberOfThreads(int max_available) {
+ if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0;
+ if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads;
+ if (FLAG_concurrent_sweeping) return max_available - 1;
+ ASSERT(FLAG_parallel_sweeping);
+ return max_available;
+}
+
} } // namespace v8::internal
void WaitForSweeperThread();
intptr_t StealMemory(PagedSpace* space);
+ static int NumberOfThreads(int max_available);
+
private:
Isolate* isolate_;
Heap* heap_;
FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
- if (FLAG_concurrent_recompilation &&
- (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs)) {
- FLAG_concurrent_recompilation = false;
- FLAG_concurrent_osr = false;
- PrintF("Concurrent recompilation has been disabled for tracing.\n");
- }
-
- if (FLAG_sweeper_threads <= 0) {
- if (FLAG_concurrent_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::CONCURRENT_SWEEPING);
- } else if (FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_SWEEPING);
- }
- if (FLAG_sweeper_threads == 0) {
- FLAG_concurrent_sweeping = false;
- FLAG_parallel_sweeping = false;
- }
- } else if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = 0;
- }
-
- if (FLAG_concurrent_recompilation &&
- SystemThreadManager::NumberOfParallelSystemThreads(
- SystemThreadManager::CONCURRENT_RECOMPILATION) == 0) {
- FLAG_concurrent_recompilation = false;
- FLAG_concurrent_osr = false;
- }
-
Sampler::SetUp();
CPU::SetUp();
OS::PostSetUp();
// Utility class to set --allow-natives-syntax and --nouse-inlining when
// constructed and return to their default state when destroyed.
-class AllowNativesSyntaxNoInliningNoConcurrent {
+class AllowNativesSyntaxNoInlining {
public:
- AllowNativesSyntaxNoInliningNoConcurrent()
+ AllowNativesSyntaxNoInlining()
: allow_natives_syntax_(i::FLAG_allow_natives_syntax),
- use_inlining_(i::FLAG_use_inlining),
- concurrent_recompilation_(i::FLAG_concurrent_recompilation) {
+ use_inlining_(i::FLAG_use_inlining) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_use_inlining = false;
- i::FLAG_concurrent_recompilation = false;
}
- ~AllowNativesSyntaxNoInliningNoConcurrent() {
+ ~AllowNativesSyntaxNoInlining() {
i::FLAG_allow_natives_syntax = allow_natives_syntax_;
i::FLAG_use_inlining = use_inlining_;
- i::FLAG_concurrent_recompilation = concurrent_recompilation_;
}
private:
bool allow_natives_syntax_;
bool use_inlining_;
- bool concurrent_recompilation_;
};
TEST(DeoptimizeBinaryOperationADDString) {
+ i::FLAG_concurrent_recompilation = false;
+ AllowNativesSyntaxNoInlining options;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* f_source = "function f(x, y) { return x + y; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
binary_op);
char* f_source = f_source_buffer.start();
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert binary op stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
TEST(DeoptimizeBinaryOperationADD) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
TEST(DeoptimizeBinaryOperationSUB) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
TEST(DeoptimizeBinaryOperationMUL) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
TEST(DeoptimizeBinaryOperationDIV) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
TEST(DeoptimizeBinaryOperationMOD) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
TEST(DeoptimizeCompare) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* f_source = "function f(x, y) { return x < y; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert compare ic
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
TEST(DeoptimizeLoadICStoreIC) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
TEST(DeoptimizeLoadICStoreICNested) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
// after the first time the accessor is fired. We use external string
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
- FLAG_use_ic = false; // ICs retain objects.
- FLAG_concurrent_recompilation = false;
v8::HandleScope scope(CcTest::isolate());
SourceResource* resource = new SourceResource(i::StrDup(source));
{
TEST(ReleaseStackTraceData) {
+ FLAG_use_ic = false; // ICs retain objects.
+ FLAG_concurrent_recompilation = false;
CcTest::InitializeVM();
static const char* source1 = "var error = null; "
/* Normal Error */ "try { "