double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
- math_exp_data_mutex = OS::CreateMutex();
+ math_exp_data_mutex = new Mutex();
}
// Early return?
if (math_exp_data_initialized) return;
- math_exp_data_mutex->Lock();
+ LockGuard<Mutex> lock_guard(math_exp_data_mutex);
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
math_exp_data_initialized = true;
}
- math_exp_data_mutex->Unlock();
}
void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
if (head_ == NULL) {
ASSERT(tail_ == NULL);
head_ = event;
RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
ASSERT(head_ != NULL);
RemoteDebuggerEvent* result = head_;
head_ = head_->next();
explicit RemoteDebugger(Isolate* isolate, int port)
: isolate_(isolate),
port_(port),
- event_access_(i::OS::CreateMutex()),
event_available_(i::OS::CreateSemaphore(0)),
head_(NULL), tail_(NULL) {}
void Run();
// Linked list of events from debugged V8 and from keyboard input. Access to
// the list is guarded by a mutex and a semaphore signals new items in the
// list.
- i::Mutex* event_access_;
+ i::Mutex event_access_;
i::Semaphore* event_available_;
RemoteDebuggerEvent* head_;
RemoteDebuggerEvent* tail_;
i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
-i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
+i::Mutex Shell::context_mutex_;
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
- i::ScopedLock lock(context_mutex_);
+ i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
"-------------+\n");
delete [] counters;
}
- delete context_mutex_;
delete counters_file_;
delete counter_map_;
#endif // V8_SHARED
static CounterCollection local_counters_;
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
- static i::Mutex* context_mutex_;
+ static i::Mutex context_mutex_;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
"Remote debugging session already active\r\n";
void DebuggerAgent::CreateSession(Socket* client) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// If another session is already established terminate this one.
if (session_ != NULL) {
void DebuggerAgent::CloseSession() {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Terminate the session.
if (session_ != NULL) {
void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Forward the message handling to the session.
if (session_ != NULL) {
}
// Terminate the session.
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
ASSERT(session == session_);
if (session == session_) {
session_->Shutdown();
isolate_(Isolate::Current()),
name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
- session_access_(OS::CreateMutex()), session_(NULL),
+ session_(NULL),
terminate_now_(OS::CreateSemaphore(0)),
listening_(OS::CreateSemaphore(0)) {
ASSERT(isolate_->debugger_agent_instance() == NULL);
int port_; // Port to use for the agent.
Socket* server_; // Server socket for listen/accept.
bool terminate_; // Termination flag.
- Mutex* session_access_; // Mutex guarging access to session_.
+ RecursiveMutex session_access_; // Mutex guarding access to session_.
DebuggerAgentSession* session_; // Current active session if any.
Semaphore* terminate_now_; // Semaphore to signal termination.
Semaphore* listening_;
message_handler_(NULL),
debugger_unload_pending_(false),
host_dispatch_handler_(NULL),
- dispatch_handler_access_(OS::CreateMutex()),
debug_message_dispatch_handler_(NULL),
message_dispatch_helper_thread_(NULL),
host_dispatch_micros_(100 * 1000),
Debugger::~Debugger() {
- delete dispatch_handler_access_;
- dispatch_handler_access_ = 0;
delete command_received_;
command_received_ = 0;
}
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
message_handler_ = handler;
ListenersChanged();
void Debugger::SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
if (provide_locker && message_dispatch_helper_thread_ == NULL) {
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
if (message_handler_ != NULL) {
message_handler_(message);
MessageDispatchHelperThread* dispatch_thread;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
dispatch_thread = message_dispatch_helper_thread_;
}
bool Debugger::IsDebuggerActive() {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
return message_handler_ != NULL ||
!event_listener_.is_null() ||
void Debugger::CallMessageDispatchHandler() {
v8::Debug::DebugMessageDispatchHandler handler;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
handler = Debugger::debug_message_dispatch_handler_;
}
if (handler != NULL) {
LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
- : logger_(logger), queue_(size) {
- lock_ = OS::CreateMutex();
-}
-
-
-LockingCommandMessageQueue::~LockingCommandMessageQueue() {
- delete lock_;
-}
+ : logger_(logger), queue_(size) {}
bool LockingCommandMessageQueue::IsEmpty() const {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
return queue_.IsEmpty();
}
CommandMessage LockingCommandMessageQueue::Get() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
CommandMessage result = queue_.Get();
logger_->DebugEvent("Get", result.text());
return result;
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Put(message);
logger_->DebugEvent("Put", message.text());
}
void LockingCommandMessageQueue::Clear() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Clear();
}
MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
: Thread("v8:MsgDispHelpr"),
isolate_(isolate), sem_(OS::CreateSemaphore(0)),
- mutex_(OS::CreateMutex()), already_signalled_(false) {
+ already_signalled_(false) {
}
MessageDispatchHelperThread::~MessageDispatchHelperThread() {
- delete mutex_;
delete sem_;
}
void MessageDispatchHelperThread::Schedule() {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
if (already_signalled_) {
return;
}
while (true) {
sem_->Wait();
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
already_signalled_ = false;
}
{
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
LockingCommandMessageQueue(Logger* logger, int size);
- ~LockingCommandMessageQueue();
bool IsEmpty() const;
CommandMessage Get();
void Put(const CommandMessage& message);
private:
Logger* logger_;
CommandMessageQueue queue_;
- Mutex* lock_;
+ mutable Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
friend void ForceUnloadDebugger(); // In test-debug.cc
inline bool EventActive(v8::DebugEvent event) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access_);
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
Handle<Object> event_data);
void ListenersChanged();
- Mutex* debugger_access_; // Mutex guarding debugger variables.
+ RecursiveMutex* debugger_access_; // Mutex guarding debugger variables.
Handle<Object> event_listener_; // Global handle to listener.
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
v8::Debug::MessageHandler2 message_handler_;
bool debugger_unload_pending_; // Was message handler cleared?
v8::Debug::HostDispatchHandler host_dispatch_handler_;
- Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
+ Mutex dispatch_handler_access_; // Mutex guarding dispatch handler.
v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
MessageDispatchHelperThread* message_dispatch_helper_thread_;
int host_dispatch_micros_;
Isolate* isolate_;
Semaphore* const sem_;
- Mutex* const mutex_;
+ Mutex mutex_;
bool already_signalled_;
DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
DisallowHeapAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
store_buffer()->SetUp();
- if (FLAG_concurrent_recompilation) relocation_mutex_ = OS::CreateMutex();
+ if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
#ifdef DEBUG
relocation_mutex_locked_by_optimizer_thread_ = false;
#endif // DEBUG
void Heap::CheckpointObjectStats() {
- ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
+ LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
Counters* counters = isolate()->counters();
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
counters->count_of_##name()->Increment( \
void CheckpointObjectStats();
- // We don't use a ScopedLock here since we want to lock the heap
+ // We don't use a LockGuard here since we want to lock the heap
// only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
#ifdef DEBUG
Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
#endif // DEBUG
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
+RecursiveMutex Isolate::process_wide_mutex_;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
ASSERT(!thread_id.Equals(ThreadId::Invalid()));
PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
thread_data_table_->Insert(per_thread);
ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
if (per_thread == NULL) {
per_thread = AllocatePerIsolateThreadData(thread_id);
ThreadId thread_id) {
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
}
return per_thread;
void Isolate::EnsureDefaultIsolate() {
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
compilation_cache_(NULL),
counters_(NULL),
code_range_(NULL),
- // Must be initialized early to allow v8::SetResourceConstraints calls.
- break_access_(OS::CreateMutex()),
debugger_initialized_(false),
- // Must be initialized early to allow v8::Debug calls.
- debugger_access_(OS::CreateMutex()),
logger_(NULL),
stats_table_(NULL),
stub_cache_(NULL),
Deinit();
- { ScopedLock lock(process_wide_mutex_);
+ { LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
thread_data_table_->RemoveAllThreads(this);
}
delete handle_scope_implementer_;
handle_scope_implementer_ = NULL;
- delete break_access_;
- break_access_ = NULL;
- delete debugger_access_;
- debugger_access_ = NULL;
delete compilation_cache_;
compilation_cache_ = NULL;
void Isolate::InitializeDebugger() {
#ifdef ENABLE_DEBUGGER_SUPPORT
- ScopedLock lock(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access());
if (NoBarrier_Load(&debugger_initialized_)) return;
InitializeLoggingAndCounters();
debug_ = new Debug(this);
static void EnterDefaultIsolate();
// Mutex for serializing access to break control structures.
- Mutex* break_access() { return break_access_; }
+ RecursiveMutex* break_access() { return &break_access_; }
// Mutex for serializing access to debugger.
- Mutex* debugger_access() { return debugger_access_; }
+ RecursiveMutex* debugger_access() { return &debugger_access_; }
Address get_address_from_id(AddressId id);
// This mutex protects highest_thread_id_, thread_data_table_ and
// default_isolate_.
- static Mutex* process_wide_mutex_;
+ static RecursiveMutex process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
- Mutex* break_access_;
+ RecursiveMutex break_access_;
Atomic32 debugger_initialized_;
- Mutex* debugger_access_;
+ RecursiveMutex debugger_access_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
}
~ExecutionAccess() { Unlock(isolate_); }
- static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
- static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
+ static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
+ static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
static bool TryLock(Isolate* isolate) {
- return isolate->break_access_->TryLock();
+ return isolate->break_access()->TryLock();
}
private:
Log::Log(Logger* logger)
: is_stopped_(false),
output_handle_(NULL),
- mutex_(NULL),
message_buffer_(NULL),
logger_(logger) {
}
void Log::Initialize(const char* log_file_name) {
- mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
// --log-all enables all the log flags.
DeleteArray(message_buffer_);
message_buffer_ = NULL;
- delete mutex_;
- mutex_ = NULL;
-
is_stopped_ = false;
return result;
}
Log::MessageBuilder::MessageBuilder(Log* log)
: log_(log),
- sl(log_->mutex_),
+ lock_guard_(&log_->mutex_),
pos_(0) {
ASSERT(log_->message_buffer_ != NULL);
}
private:
Log* log_;
- ScopedLock sl;
+ LockGuard<Mutex> lock_guard_;
int pos_;
};
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
- Mutex* mutex_;
+ Mutex mutex_;
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
- { ScopedLock lock(thread_id_mutex_);
+ { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
thread_id_ = ThreadId::Current().ToInteger();
}
#endif
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
- ScopedLock mark_and_queue(install_mutex_);
+ LockGuard<Mutex> mark_and_queue(&install_mutex_);
{ Heap::RelocationLock relocation_lock(isolate_->heap());
AllowHandleDereference ahd;
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
OptimizingCompiler* compiler;
while (true) {
{ // Memory barrier to ensure marked functions are queued.
- ScopedLock marked_and_queued(install_mutex_);
+ LockGuard<Mutex> marked_and_queued(&install_mutex_);
if (!output_queue_.Dequeue(&compiler)) return;
}
Compiler::InstallOptimizedCode(compiler);
#ifdef DEBUG
bool OptimizingCompilerThread::IsOptimizerThread() {
if (!FLAG_concurrent_recompilation) return false;
- ScopedLock lock(thread_id_mutex_);
+ LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
#endif
#include "atomicops.h"
#include "flags.h"
#include "platform.h"
+#include "platform/mutex.h"
#include "platform/time.h"
#include "unbound-queue-inl.h"
Thread("OptimizingCompilerThread"),
#ifdef DEBUG
thread_id_(0),
- thread_id_mutex_(OS::CreateMutex()),
#endif
isolate_(isolate),
stop_semaphore_(OS::CreateSemaphore(0)),
- input_queue_semaphore_(OS::CreateSemaphore(0)),
- install_mutex_(OS::CreateMutex()) {
+ input_queue_semaphore_(OS::CreateSemaphore(0)) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
}
#endif
~OptimizingCompilerThread() {
- delete install_mutex_;
delete input_queue_semaphore_;
delete stop_semaphore_;
#ifdef DEBUG
- delete thread_id_mutex_;
#endif
}
#ifdef DEBUG
int thread_id_;
- Mutex* thread_id_mutex_;
+ Mutex thread_id_mutex_;
#endif
Isolate* isolate_;
Semaphore* input_queue_semaphore_;
UnboundQueue<OptimizingCompiler*> input_queue_;
UnboundQueue<OptimizingCompiler*> output_queue_;
- Mutex* install_mutex_;
+ Mutex install_mutex_;
volatile AtomicWord stop_thread_;
volatile Atomic32 queue_length_;
TimeDelta time_spent_compiling_;
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
}
-class POSIXMutex : public Mutex {
- public:
- POSIXMutex() {
- pthread_mutexattr_t attr;
- memset(&attr, 0, sizeof(attr));
- int result = pthread_mutexattr_init(&attr);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attr);
- ASSERT(result == 0);
- result = pthread_mutexattr_destroy(&attr);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~POSIXMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() { return pthread_mutex_lock(&mutex_); }
-
- virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new POSIXMutex();
-}
-
-
// ----------------------------------------------------------------------------
// POSIX socket support.
//
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
// ----------------------------------------------------------------------------
-// Win32 mutex support.
-//
-// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
-// faster than Win32 Mutex objects because they are implemented using user mode
-// atomic instructions. Therefore we only do ring transitions if there is lock
-// contention.
-
-class Win32Mutex : public Mutex {
- public:
- Win32Mutex() { InitializeCriticalSection(&cs_); }
-
- virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
-
- virtual int Lock() {
- EnterCriticalSection(&cs_);
- return 0;
- }
-
- virtual int Unlock() {
- LeaveCriticalSection(&cs_);
- return 0;
- }
-
-
- virtual bool TryLock() {
- // Returns non-zero if critical section is entered successfully entered.
- return TryEnterCriticalSection(&cs_);
- }
-
- private:
- CRITICAL_SECTION cs_; // Critical section used for mutex
-};
-
-
-Mutex* OS::CreateMutex() {
- return new Win32Mutex();
-}
-
-
-// ----------------------------------------------------------------------------
// Win32 semaphore support.
//
// On Win32 semaphores are implemented using Win32 Semaphore objects. The
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srand(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
#include <cstdarg>
-#include "lazy-instance.h"
+#include "platform/mutex.h"
#include "utils.h"
#include "v8globals.h"
namespace internal {
class Semaphore;
-class Mutex;
double ceiling(double x);
double modulo(double x, double y);
static int StackWalk(Vector<StackFrame> frames);
- // Factory method for creating platform dependent Mutex.
- // Please use delete to reclaim the storage for the returned Mutex.
- static Mutex* CreateMutex();
-
// Factory method for creating platform dependent Semaphore.
// Please use delete to reclaim the storage for the returned Semaphore.
static Semaphore* CreateSemaphore(int count);
// ----------------------------------------------------------------------------
-// Mutex
-//
-// Mutexes are used for serializing access to non-reentrant sections of code.
-// The implementations of mutex should allow for nested/recursive locking.
-
-class Mutex {
- public:
- virtual ~Mutex() {}
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- virtual int Lock() = 0;
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- virtual int Unlock() = 0;
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- virtual bool TryLock() = 0;
-};
-
-struct CreateMutexTrait {
- static Mutex* Create() {
- return OS::CreateMutex();
- }
-};
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-// void my_function() {
-// ScopedLock my_lock(my_mutex.Pointer());
-// // Do something.
-// }
-//
-typedef LazyDynamicInstance<
- Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-// ----------------------------------------------------------------------------
-// ScopedLock
-//
-// Stack-allocated ScopedLocks provide block-scoped locking and
-// unlocking of a mutex.
-class ScopedLock {
- public:
- explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
- ASSERT(mutex_ != NULL);
- mutex_->Lock();
- }
- ~ScopedLock() {
- mutex_->Unlock();
- }
-
- private:
- Mutex* mutex_;
- DISALLOW_COPY_AND_ASSIGN(ScopedLock);
-};
-
-
-// ----------------------------------------------------------------------------
// Socket
//
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/mutex.h"
+
+#include <cerrno>
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_POSIX
+
+static V8_INLINE(void InitializeNativeHandle(pthread_mutex_t* mutex)) {
+ int result;
+#if defined(DEBUG)
+ // Use an error checking mutex in debug mode.
+ pthread_mutexattr_t attr;
+ result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+#else
+ // Use a fast mutex (default attributes).
+ result = pthread_mutex_init(mutex, NULL);
+#endif // defined(DEBUG)
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex)) {
+ pthread_mutexattr_t attr;
+ int result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(void DestroyNativeHandle(pthread_mutex_t* mutex)) {
+ int result = pthread_mutex_destroy(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(void LockNativeHandle(pthread_mutex_t* mutex)) {
+ int result = pthread_mutex_lock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(void UnlockNativeHandle(pthread_mutex_t* mutex)) {
+ int result = pthread_mutex_unlock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(bool TryLockNativeHandle(pthread_mutex_t* mutex)) {
+ int result = pthread_mutex_trylock(mutex);
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+static V8_INLINE(void InitializeNativeHandle(CRITICAL_SECTION* cs)) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE(void InitializeRecursiveNativeHandle(CRITICAL_SECTION* cs)) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE(void DestroyNativeHandle(CRITICAL_SECTION* cs)) {
+ DeleteCriticalSection(cs);
+}
+
+
+static V8_INLINE(void LockNativeHandle(CRITICAL_SECTION* cs)) {
+ EnterCriticalSection(cs);
+}
+
+
+static V8_INLINE(void UnlockNativeHandle(CRITICAL_SECTION* cs)) {
+ LeaveCriticalSection(cs);
+}
+
+
+static V8_INLINE(bool TryLockNativeHandle(CRITICAL_SECTION* cs)) {
+ return TryEnterCriticalSection(cs);
+}
+
+#endif // V8_OS_POSIX
+
+
+Mutex::Mutex() {
+ InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+Mutex::~Mutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void Mutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ ASSERT_EQ(0, level_);
+ level_++;
+#endif
+}
+
+
+void Mutex::Unlock() {
+#ifdef DEBUG
+ ASSERT_EQ(1, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool Mutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ ASSERT_EQ(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+
+RecursiveMutex::RecursiveMutex() {
+ InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+RecursiveMutex::~RecursiveMutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void RecursiveMutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+ ASSERT_LT(0, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_MUTEX_H_
+#define V8_PLATFORM_MUTEX_H_
+
+#include "lazy-instance.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_POSIX
+#include <pthread.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A mutex offers
+// exclusive, non-recursive ownership semantics:
+// - A calling thread owns a mutex from the time that it successfully calls
+// either |Lock()| or |TryLock()| until it calls |Unlock()|.
+// - When a thread owns a mutex, all other threads will block (for calls to
+// |Lock()|) or receive a |false| return value (for |TryLock()|) if they
+// attempt to claim ownership of the mutex.
+// A calling thread must not own the mutex prior to calling |Lock()| or
+// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
+// while still owned by some thread. The Mutex class is non-copyable.
+
+class Mutex V8_FINAL {
+ public:
+ Mutex();
+ ~Mutex();
+
+ // Locks the given mutex. If the mutex is currently unlocked, it becomes
+ // locked and owned by the calling thread, and immediately. If the mutex
+ // is already locked by another thread, suspends the calling thread until
+ // the mutex is unlocked.
+ void Lock();
+
+ // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+ // the calling thread on entrance.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+ typedef CRITICAL_SECTION NativeHandle;
+#endif
+
+ NativeHandle& native_handle() V8_WARN_UNUSED_RESULT {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const V8_WARN_UNUSED_RESULT {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<Mutex,
+ DefaultConstructTrait<Mutex>,
+ ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// RecursiveMutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A recursive
+// mutex offers exclusive, recursive ownership semantics:
+// - A calling thread owns a recursive mutex for a period of time that starts
+// when it successfully calls either |Lock()| or |TryLock()|. During this
+// period, the thread may make additional calls to |Lock()| or |TryLock()|.
+// The period of ownership ends when the thread makes a matching number of
+// calls to |Unlock()|.
+// - When a thread owns a recursive mutex, all other threads will block (for
+// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
+// they attempt to claim ownership of the recursive mutex.
+// - The maximum number of times that a recursive mutex may be locked is
+// unspecified, but after that number is reached, calls to |Lock()| will
+// probably abort the process and calls to |TryLock()| return false.
+// The behavior of a program is undefined if a recursive mutex is destroyed
+// while still owned by some thread. The RecursiveMutex class is non-copyable.
+
+class RecursiveMutex V8_FINAL {
+ public:
+ RecursiveMutex();
+ ~RecursiveMutex();
+
+ // Locks the mutex. If another thread has already locked the mutex, a call to
+ // |Lock()| will block execution until the lock is acquired. A thread may call
+ // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
+ // after the thread makes a matching number of calls to |Unlock()|.
+ // The behavior is undefined if the mutex is not unlocked before being
+ // destroyed, i.e. some thread still owns it.
+ void Lock();
+
+ // Unlocks the mutex if its level of ownership is 1 (there was exactly one
+ // more call to |Lock()| than there were calls to unlock() made by this
+ // thread), reduces the level of ownership by 1 otherwise. The mutex must be
+ // locked by the current thread of execution, otherwise, the behavior is
+ // undefined.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+ typedef Mutex::NativeHandle NativeHandle;
+
+ NativeHandle& native_handle() V8_WARN_UNUSED_RESULT {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const V8_WARN_UNUSED_RESULT {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
+};
+
+
+// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<RecursiveMutex,
+ DefaultConstructTrait<RecursiveMutex>,
+ ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+
+#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// LockGuard
+//
+// This class is a mutex wrapper that provides a convenient RAII-style mechanism
+// for owning a mutex for the duration of a scoped block.
+// When a LockGuard object is created, it attempts to take ownership of the
+// mutex it is given. When control leaves the scope in which the LockGuard
+// object was created, the LockGuard is destructed and the mutex is released.
+// The LockGuard class is non-copyable.
+
+template <typename Mutex>
+class LockGuard V8_FINAL {
+ public:
+ explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
+ ~LockGuard() { mutex_->Unlock(); }
+
+ private:
+ Mutex* mutex_;
+
+ LockGuard(const LockGuard<Mutex>& other) V8_DELETE;
+ LockGuard<Mutex>& operator=(const LockGuard<Mutex>& other) V8_DELETE;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_MUTEX_H_
class Clock V8_FINAL {
public:
Clock() : initial_time_(CurrentWallclockTime()),
- initial_ticks_(TimeTicks::Now()),
- mutex_(OS::CreateMutex()) {}
-
- ~Clock() { delete mutex_; }
+ initial_ticks_(TimeTicks::Now()) {}
Time Now() {
// This must be executed under lock.
- ScopedLock sl(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
// Calculate the time elapsed since we started our timer.
TimeDelta elapsed = TimeTicks::Now() - initial_ticks_;
}
Time NowFromSystemTime() {
- ScopedLock sl(mutex_);
+ // This must be executed under lock.
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Resynchronize with the wallclock.
initial_ticks_ = TimeTicks::Now();
initial_time_ = CurrentWallclockTime();
return initial_time_;
TimeTicks initial_ticks_;
Time initial_time_;
- Mutex* mutex_;
+ Mutex mutex_;
};
class RolloverProtectedTickClock V8_FINAL : public TickClock {
public:
- RolloverProtectedTickClock()
- : mutex_(OS::CreateMutex()), last_seen_now_(0), rollover_ms_(1) {
- // We initialize rollover_ms_ to 1 to ensure that we will never
- // return 0 from TimeTicks::HighResNow() and TimeTicks::Now() below.
- }
- virtual ~RolloverProtectedTickClock() { delete mutex_; }
+ // We initialize rollover_ms_ to 1 to ensure that we will never
+ // return 0 from TimeTicks::HighResNow() and TimeTicks::Now() below.
+ RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+ virtual ~RolloverProtectedTickClock() {}
virtual int64_t Now() V8_OVERRIDE {
- ScopedLock sl(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
// We use timeGetTime() to implement TimeTicks::Now(), which rolls over
// every ~49.7 days. We try to track rollover ourselves, which works if
// TimeTicks::Now() is called at least every 49 days.
}
private:
- Mutex* mutex_;
+ Mutex mutex_;
DWORD last_seen_now_;
int64_t rollover_ms_;
};
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; mutex_ = NULL; }
static void AddActiveSampler(Sampler* sampler) {
bool need_to_start = false;
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
static void RemoveActiveSampler(Sampler* sampler) {
SamplerThread* instance_to_remove = NULL;
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
ASSERT(sampler->IsActive());
bool removed = instance_->active_samplers_.RemoveElement(sampler);
virtual void Run() {
while (true) {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
if (active_samplers_.is_empty()) break;
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
- ScopedLock lock_target(mutex_);
- ScopedLock lock_source(category->mutex());
+ LockGuard<Mutex> target_lock_guard(mutex());
+ LockGuard<Mutex> source_lock_guard(category->mutex());
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
#include "hashmap.h"
#include "list.h"
#include "log.h"
+#include "platform/mutex.h"
#include "v8utils.h"
namespace v8 {
FreeListCategory() :
top_(NULL),
end_(NULL),
- mutex_(OS::CreateMutex()),
available_(0) {}
- ~FreeListCategory() {
- delete mutex_;
- }
-
intptr_t Concatenate(FreeListCategory* category);
void Reset();
int available() const { return available_; }
void set_available(int available) { available_ = available; }
- Mutex* mutex() { return mutex_; }
+ Mutex* mutex() { return &mutex_; }
#ifdef DEBUG
intptr_t SumFreeList();
private:
FreeListNode* top_;
FreeListNode* end_;
- Mutex* mutex_;
+ Mutex mutex_;
// Total available bytes in all blocks of this free list category.
int available_;
state[i] = FLAG_random_seed;
} else if (entropy_source != NULL) {
uint32_t val;
- ScopedLock lock(entropy_mutex.Pointer());
+ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
state[i] = val;
} else {
class MessageLocation;
class VirtualMemory;
class Mutex;
+class RecursiveMutex;
typedef bool (*WeakSlotCallback)(Object** pointer);
void ThreadManager::Lock() {
- mutex_->Lock();
+ mutex_.Lock();
mutex_owner_ = ThreadId::Current();
ASSERT(IsLockedByCurrentThread());
}
void ThreadManager::Unlock() {
mutex_owner_ = ThreadId::Invalid();
- mutex_->Unlock();
+ mutex_.Unlock();
}
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
ThreadManager::ThreadManager()
- : mutex_(OS::CreateMutex()),
- mutex_owner_(ThreadId::Invalid()),
+ : mutex_owner_(ThreadId::Invalid()),
lazily_archived_thread_(ThreadId::Invalid()),
lazily_archived_thread_state_(NULL),
free_anchor_(NULL),
ThreadManager::~ThreadManager() {
- delete mutex_;
DeleteThreadStateList(free_anchor_);
DeleteThreadStateList(in_use_anchor_);
}
void EagerlyArchiveThread();
- Mutex* mutex_;
+ Mutex mutex_;
ThreadId mutex_owner_;
ThreadId lazily_archived_thread_;
ThreadState* lazily_archived_thread_state_;
#undef ANY
#undef IGNORE
#undef GetObject
-#undef CreateMutex
#undef CreateSemaphore
#undef Yield
'test-lockers.cc',
'test-log.cc',
'test-mark-compact.cc',
+ 'test-mutex.cc',
'test-object-observe.cc',
'test-parsing.cc',
'test-platform.cc',
private:
int num_threads_;
int num_blocked_;
- v8::internal::Mutex* lock_;
+ v8::internal::Mutex lock_;
v8::internal::Semaphore* sem_;
bool invalid_;
};
ThreadBarrier::ThreadBarrier(int num_threads)
: num_threads_(num_threads), num_blocked_(0) {
- lock_ = OS::CreateMutex();
sem_ = OS::CreateSemaphore(0);
invalid_ = false; // A barrier may only be used once. Then it is invalid.
}
// Do not call, due to race condition with Wait().
// Could be resolved with Pthread condition variables.
ThreadBarrier::~ThreadBarrier() {
- lock_->Lock();
- delete lock_;
delete sem_;
}
void ThreadBarrier::Wait() {
- lock_->Lock();
+ lock_.Lock();
CHECK(!invalid_);
if (num_blocked_ == num_threads_ - 1) {
// Signal and unblock all waiting threads.
invalid_ = true;
printf("BARRIER\n\n");
fflush(stdout);
- lock_->Unlock();
+ lock_.Unlock();
} else { // Wait for the semaphore.
++num_blocked_;
- lock_->Unlock(); // Potential race condition with destructor because
+ lock_.Unlock(); // Potential race condition with destructor because
sem_->Wait(); // these two lines are not atomic.
}
}
using namespace ::v8::internal;
-// Simple test of locking logic
-TEST(Simple) {
- Mutex* mutex = OS::CreateMutex();
- CHECK_EQ(0, mutex->Lock()); // acquire the lock with the right token
- CHECK_EQ(0, mutex->Unlock()); // can unlock with the right token
- delete mutex;
-}
-
-
-TEST(MultiLock) {
- Mutex* mutex = OS::CreateMutex();
- CHECK_EQ(0, mutex->Lock());
- CHECK_EQ(0, mutex->Unlock());
- delete mutex;
-}
-
-
-TEST(ShallowLock) {
- Mutex* mutex = OS::CreateMutex();
- CHECK_EQ(0, mutex->Lock());
- CHECK_EQ(0, mutex->Unlock());
- CHECK_EQ(0, mutex->Lock());
- CHECK_EQ(0, mutex->Unlock());
- delete mutex;
-}
-
-
TEST(SemaphoreTimeout) {
bool ok;
Semaphore* sem = OS::CreateSemaphore(0);
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <cstdlib>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "platform/mutex.h"
+
+using namespace ::v8::internal;
+
+
+TEST(LockGuardMutex) {
+ Mutex mutex;
+ { LockGuard<Mutex> lock_guard(&mutex);
+ }
+ { LockGuard<Mutex> lock_guard(&mutex);
+ }
+}
+
+
+TEST(LockGuardRecursiveMutex) {
+ RecursiveMutex recursive_mutex;
+ { LockGuard<RecursiveMutex> lock_guard(&recursive_mutex);
+ }
+ { LockGuard<RecursiveMutex> lock_guard1(&recursive_mutex);
+ LockGuard<RecursiveMutex> lock_guard2(&recursive_mutex);
+ }
+}
+
+
+TEST(LockGuardLazyMutex) {
+ LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
+ { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer());
+ }
+ { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer());
+ }
+}
+
+
+TEST(LockGuardLazyRecursiveMutex) {
+ LazyRecursiveMutex lazy_recursive_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+ { LockGuard<RecursiveMutex> lock_guard(lazy_recursive_mutex.Pointer());
+ }
+ { LockGuard<RecursiveMutex> lock_guard1(lazy_recursive_mutex.Pointer());
+ LockGuard<RecursiveMutex> lock_guard2(lazy_recursive_mutex.Pointer());
+ }
+}
+
+
+TEST(MultipleMutexes) {
+ Mutex mutex1;
+ Mutex mutex2;
+ Mutex mutex3;
+ // Order 1
+ mutex1.Lock();
+ mutex2.Lock();
+ mutex3.Lock();
+ mutex1.Unlock();
+ mutex2.Unlock();
+ mutex3.Unlock();
+ // Order 2
+ mutex1.Lock();
+ mutex2.Lock();
+ mutex3.Lock();
+ mutex3.Unlock();
+ mutex2.Unlock();
+ mutex1.Unlock();
+}
+
+
+TEST(MultipleRecursiveMutexes) {
+ RecursiveMutex recursive_mutex1;
+ RecursiveMutex recursive_mutex2;
+ // Order 1
+ recursive_mutex1.Lock();
+ recursive_mutex2.Lock();
+ CHECK(recursive_mutex1.TryLock());
+ CHECK(recursive_mutex2.TryLock());
+ recursive_mutex1.Unlock();
+ recursive_mutex1.Unlock();
+ recursive_mutex2.Unlock();
+ recursive_mutex2.Unlock();
+ // Order 2
+ recursive_mutex1.Lock();
+ CHECK(recursive_mutex1.TryLock());
+ recursive_mutex2.Lock();
+ CHECK(recursive_mutex2.TryLock());
+ recursive_mutex2.Unlock();
+ recursive_mutex1.Unlock();
+ recursive_mutex2.Unlock();
+ recursive_mutex1.Unlock();
+}
int count = 0;
int last_count = -1;
do {
- CHECK_EQ(0, mutex->Lock());
+ LockGuard<Mutex> lock_guard(mutex);
count = busy_lock_counter;
- CHECK_EQ(0, mutex->Unlock());
yield();
} while (count % 2 == rem && count < kLockCounterLimit);
if (count >= kLockCounterLimit) break;
- CHECK_EQ(0, mutex->Lock());
+ LockGuard<Mutex> lock_guard(mutex);
CHECK_EQ(count, busy_lock_counter);
CHECK(last_count == -1 || count == last_count + 1);
busy_lock_counter++;
last_count = count;
- CHECK_EQ(0, mutex->Unlock());
yield();
}
}
// increment a variable.
TEST(BusyLock) {
pthread_t other;
- Mutex* mutex = OS::CreateMutex();
+ Mutex mutex;
int thread_created = pthread_create(&other,
NULL,
&RunTestBusyLock,
- mutex);
+ &mutex);
CHECK_EQ(0, thread_created);
- LoopIncrement(mutex, 1);
+ LoopIncrement(&mutex, 1);
pthread_join(other, NULL);
- delete mutex;
}
'../../src/platform/time.h',
'../../src/platform-posix.h',
'../../src/platform.h',
+ '../../src/platform/mutex.cc',
+ '../../src/platform/mutex.h',
'../../src/preparse-data-format.h',
'../../src/preparse-data.cc',
'../../src/preparse-data.h',