"src/cpu-profiler-inl.h",
"src/cpu-profiler.cc",
"src/cpu-profiler.h",
- "src/cpu.cc",
- "src/cpu.h",
"src/data-flow.cc",
"src/data-flow.h",
"src/date.cc",
"src/ostreams.h",
"src/parser.cc",
"src/parser.h",
- "src/platform/elapsed-timer.h",
- "src/platform/time.cc",
- "src/platform/time.h",
- "src/platform.h",
- "src/platform/condition-variable.cc",
- "src/platform/condition-variable.h",
- "src/platform/mutex.cc",
- "src/platform/mutex.h",
- "src/platform/semaphore.cc",
- "src/platform/semaphore.h",
"src/preparse-data-format.h",
"src/preparse-data.cc",
"src/preparse-data.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
- "src/utils/random-number-generator.cc",
- "src/utils/random-number-generator.h",
"src/v8.cc",
"src/v8.h",
- "src/v8checks.h",
"src/v8memory.h",
"src/v8threads.cc",
"src/v8threads.h",
defines = []
deps = [ ":v8_libbase" ]
- if (is_posix) {
- sources += [
- "src/platform-posix.cc"
- ]
- }
-
if (is_linux) {
- sources += [
- "src/platform-linux.cc"
- ]
-
- libs = [ "rt" ]
if (v8_compress_startup_data == "bz2") {
libs += [ "bz2" ]
}
- } else if (is_android) {
- defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
-
- if (build_os == "mac") {
- if (current_toolchain == host_toolchain) {
- sources += [ "src/platform-macos.cc" ]
- } else {
- sources += [ "src/platform-linux.cc" ]
- }
- } else {
- sources += [ "src/platform-linux.cc" ]
- if (current_toolchain == host_toolchain) {
- defines += [ "V8_LIBRT_NOT_AVAILABLE" ]
- }
- }
- } else if (is_mac) {
- sources += [ "src/platform-macos.cc" ]
- } else if (is_win) {
- # TODO(jochen): Add support for cygwin.
- sources += [
- "src/platform-win32.cc",
- "src/win32-math.cc",
- "src/win32-math.h",
- ]
-
- defines += [ "_CRT_RAND_S" ] # for rand_s()
-
- libs = [ "winmm.lib", "ws2_32.lib" ]
}
- # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
-
-
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
if (is_win) {
"src/base/atomicops_internals_x86_gcc.h",
"src/base/atomicops_internals_x86_msvc.h",
"src/base/build_config.h",
+ "src/base/cpu.cc",
+ "src/base/cpu.h",
"src/base/lazy-instance.h",
+ "src/base/logging.cc",
+ "src/base/logging.h",
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
+ "src/base/platform/elapsed-timer.h",
+ "src/base/platform/time.cc",
+ "src/base/platform/time.h",
+ "src/base/platform/condition-variable.cc",
+ "src/base/platform/condition-variable.h",
+ "src/base/platform/mutex.cc",
+ "src/base/platform/mutex.h",
+ "src/base/platform/platform.h",
+ "src/base/platform/semaphore.cc",
+ "src/base/platform/semaphore.h",
"src/base/safe_conversions.h",
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
"src/base/safe_math_impl.h",
- "src/base/win32-headers.h",
+ "src/base/utils/random-number-generator.cc",
+ "src/base/utils/random-number-generator.h",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
+
+ defines = []
+
+ if (is_posix) {
+ sources += [
+ "src/base/platform/platform-posix.cc"
+ ]
+ }
+
+ if (is_linux) {
+ sources += [
+ "src/base/platform/platform-linux.cc"
+ ]
+
+ libs = [ "rt" ]
+ } else if (is_android) {
+ defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
+
+ if (build_os == "mac") {
+ if (current_toolchain == host_toolchain) {
+ sources += [ "src/base/platform/platform-macos.cc" ]
+ } else {
+ sources += [ "src/base/platform/platform-linux.cc" ]
+ }
+ } else {
+ sources += [ "src/base/platform/platform-linux.cc" ]
+ if (current_toolchain == host_toolchain) {
+ defines += [ "V8_LIBRT_NOT_AVAILABLE" ]
+ }
+ }
+ } else if (is_mac) {
+ sources += [ "src/base/platform/platform-macos.cc" ]
+ } else if (is_win) {
+ # TODO(jochen): Add support for cygwin.
+ sources += [
+ "src/base/platform/platform-win32.cc",
+ "src/base/platform/win32-headers.h",
+ "src/base/platform/win32-math.cc",
+ "src/base/platform/win32-math.h",
+ ]
+
+ defines += [ "_CRT_RAND_S" ] # for rand_s()
+
+ libs = [ "winmm.lib", "ws2_32.lib" ]
+ }
+
+ # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
}
###############################################################################
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
- OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
+ base::OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
if (tracker != NULL) {
AllocationTracker::FunctionInfo* info =
tracker->function_info_list()[function_info_index_];
- OS::Print("%s #%u", info->name, id_);
+ base::OS::Print("%s #%u", info->name, id_);
} else {
- OS::Print("%u #%u", function_info_index_, id_);
+ base::OS::Print("%u #%u", function_info_index_, id_);
}
- OS::Print("\n");
+ base::OS::Print("\n");
indent += 2;
for (int i = 0; i < children_.length(); i++) {
children_[i]->Print(indent, tracker);
void AllocationTraceTree::Print(AllocationTracker* tracker) {
- OS::Print("[AllocationTraceTree:]\n");
- OS::Print("Total size | Allocation count | Function id | id\n");
+ base::OS::Print("[AllocationTraceTree:]\n");
+ base::OS::Print("Total size | Allocation count | Function id | id\n");
root()->Print(0, tracker);
}
#include "src/allocation.h"
#include <stdlib.h> // For free, malloc.
-#include "src/checks.h"
-#include "src/platform.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
#include "src/utils.h"
#if V8_LIBC_BIONIC
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
#include "src/assert-scope.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/messages.h"
#include "src/natives.h"
#include "src/parser.h"
-#include "src/platform.h"
-#include "src/platform/time.h"
#include "src/profile-generator-inl.h"
#include "src/property.h"
#include "src/property-details.h"
#include "src/simulator.h"
#include "src/snapshot.h"
#include "src/unicode-inl.h"
-#include "src/utils/random-number-generator.h"
#include "src/v8threads.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
i::Isolate* isolate = i::Isolate::Current();
FatalErrorCallback callback = isolate->exception_behavior();
if (callback == NULL) {
- i::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n",
- location, message);
- i::OS::Abort();
+ base::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n", location,
+ message);
+ base::OS::Abort();
} else {
callback(location, message);
}
EXCEPTION_PREAMBLE(isolate);
has_pending_exception = !i::Execution::ToNumber(
isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
+ EXCEPTION_BAILOUT_CHECK(isolate, base::OS::nan_value());
}
return num->Number();
}
void v8::V8::SetEntropySource(EntropySource entropy_source) {
- i::RandomNumberGenerator::SetEntropySource(entropy_source);
+ base::RandomNumberGenerator::SetEntropySource(entropy_source);
}
LOG_API(i_isolate, "Date::New");
if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
- time = i::OS::nan_value();
+ time = base::OS::nan_value();
}
ENTER_V8(i_isolate);
EXCEPTION_PREAMBLE(i_isolate);
ASSERT(internal_isolate->IsInitialized());
if (std::isnan(value)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
- value = i::OS::nan_value();
+ value = base::OS::nan_value();
}
ENTER_V8(internal_isolate);
i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
int64_t CpuProfile::GetSampleTimestamp(int index) const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return (profile->sample_timestamp(index) - i::TimeTicks()).InMicroseconds();
+ return (profile->sample_timestamp(index) - base::TimeTicks())
+ .InMicroseconds();
}
int64_t CpuProfile::GetStartTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return (profile->start_time() - i::TimeTicks()).InMicroseconds();
+ return (profile->start_time() - base::TimeTicks()).InMicroseconds();
}
int64_t CpuProfile::GetEndTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return (profile->end_time() - i::TimeTicks()).InMicroseconds();
+ return (profile->end_time() - base::TimeTicks()).InMicroseconds();
}
void CpuProfiler::SetSamplingInterval(int us) {
ASSERT(us >= 0);
return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
- i::TimeDelta::FromMicroseconds(us));
+ base::TimeDelta::FromMicroseconds(us));
}
#include "src/arm/assembler-arm.h"
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/debug.h"
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
+ // CpuFeatures::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc, 2 * kInstrSize);
+ CpuFeatures::FlushICache(pc, 2 * kInstrSize);
}
}
}
#else // __arm__
// Probe for additional features at runtime.
- CPU cpu;
+ base::CPU cpu;
if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
// Use movw/movt for QUALCOMM ARMv7 cores.
- if (FLAG_enable_movw_movt && cpu.implementer() == CPU::QUALCOMM) {
+ if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
}
// ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
- if (cpu.implementer() == CPU::ARM && (cpu.part() == CPU::ARM_CORTEX_A5 ||
- cpu.part() == CPU::ARM_CORTEX_A9)) {
+ if (cpu.implementer() == base::CPU::ARM &&
+ (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
+ cpu.part() == base::CPU::ARM_CORTEX_A9)) {
cache_line_size_ = 32;
}
#endif
#ifdef __arm__
- arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
+ arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
#elif USE_EABI_HARDFLOAT
arm_float_abi = "hard";
#else
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
#ifdef __arm__
- bool eabi_hardfloat = OS::ArmUsingHardFloat();
+ bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
bool eabi_hardfloat = true;
#else
}
// Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+ CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
}
break;
}
ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+ CpuFeatures::FlushICache(stub->instruction_start(),
+ 2 * Assembler::kInstrSize);
}
private:
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#else
if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
CodeDesc desc;
masm.GetCode(&desc);
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
return &std::sqrt;
#else
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CPU::FlushICache(sequence, young_length);
+ CpuFeatures::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
#if V8_TARGET_ARCH_ARM
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/macro-assembler.h"
#include "src/simulator.h" // for cache flushing.
namespace internal {
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
if (size == 0) return;
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
+#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
namespace v8 {
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
+ return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_ARM
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// provides more information.
#if V8_HOST_ARCH_ARM
if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
}
// Check that the code was patched as expected.
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
#ifdef __arm__
- return OS::ArmUsingHardFloat();
+ return base::OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
return true;
#else
__ mov(r1, Operand(masm_->CodeObject()));
// We need to make room for the return address on the stack.
- int stack_alignment = OS::ActivationFrameAlignment();
+ int stack_alignment = base::OS::ActivationFrameAlignment();
ASSERT(IsAligned(stack_alignment, kPointerSize));
__ sub(sp, sp, Operand(stack_alignment));
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
- v8::internal::OS::DebugBreak();
+ v8::base::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
if (argc == 2) {
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
#include "src/arm64/assembler-arm64.h"
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/debug.h"
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
+ // CpuFeatures::FlushICache(pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64-inl.h"
+#include "src/base/cpu.h"
namespace v8 {
namespace internal {
// csp will always be aligned if it is enabled by probing at runtime.
if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
} else {
- CPU cpu;
- if (FLAG_enable_always_align_csp && (cpu.implementer() == CPU::NVIDIA ||
- FLAG_debug_code)) {
+ base::CPU cpu;
+ if (FLAG_enable_always_align_csp &&
+ (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
supported_ |= 1u << ALWAYS_ALIGN_CSP;
}
}
}
// Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * kInstructionSize);
+ CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize);
}
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
-#include "src/cpu.h"
#include "src/globals.h"
#include "src/serialize.h"
#include "src/utils.h"
ASSERT(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
- CPU::FlushICache(buffer_, length);
+ CpuFeatures::FlushICache(buffer_, length);
}
static const int kMovInt64NInstrs = 4;
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/utils-arm64.h"
-#include "src/cpu.h"
+#include "src/assembler.h"
namespace v8 {
namespace internal {
};
-void CPU::FlushICache(void* address, size_t length) {
+void CpuFeatures::FlushICache(void* address, size_t length) {
if (length == 0) return;
#ifdef USE_SIMULATOR
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/disasm-arm64.h"
+#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
namespace v8 {
namespace internal {
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
+ return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_ARM64
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
if (FLAG_trace_sim) {
va_list arguments;
va_start(arguments, format);
- OS::VFPrint(stream_, format, arguments);
+ base::OS::VFPrint(stream_, format, arguments);
va_end(arguments);
}
}
uintptr_t original_stack = sp();
uintptr_t entry_stack = original_stack -
stack_args.size() * sizeof(stack_args[0]);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
}
char * stack = reinterpret_cast<char*>(entry_stack);
std::vector<int64_t>::const_iterator it;
// gdb -------------------------------------------------------------------
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("Relinquishing control to gdb.\n");
- OS::DebugBreak();
+ base::OS::DebugBreak();
PrintF("Regaining control from gdb.\n");
// sysregs ---------------------------------------------------------------
abort();
} else {
- OS::DebugBreak();
+ base::OS::DebugBreak();
}
break;
}
#include <cmath>
#include "src/api.h"
+#include "src/base/cpu.h"
#include "src/base/lazy-instance.h"
+#include "src/base/platform/platform.h"
#include "src/builtins.h"
#include "src/counters.h"
-#include "src/cpu.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/ic.h"
#include "src/isolate-inl.h"
#include "src/jsregexp.h"
-#include "src/platform.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
#include "src/runtime.h"
// Common double constants.
struct DoubleConstant BASE_EMBEDDED {
- double min_int;
- double one_half;
- double minus_one_half;
- double minus_zero;
- double zero;
- double uint8_max_value;
- double negative_infinity;
- double canonical_non_hole_nan;
- double the_hole_nan;
- double uint32_bias;
+double min_int;
+double one_half;
+double minus_one_half;
+double minus_zero;
+double zero;
+double uint8_max_value;
+double negative_infinity;
+double canonical_non_hole_nan;
+double the_hole_nan;
+double uint32_bias;
};
static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
static bool math_exp_data_initialized = false;
-static Mutex* math_exp_data_mutex = NULL;
+static base::Mutex* math_exp_data_mutex = NULL;
static double* math_exp_constants_array = NULL;
static double* math_exp_log_table_array = NULL;
double_constants.minus_zero = -0.0;
double_constants.uint8_max_value = 255;
double_constants.zero = 0.0;
- double_constants.canonical_non_hole_nan = OS::nan_value();
+ double_constants.canonical_non_hole_nan = base::OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
double_constants.uint32_bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
- math_exp_data_mutex = new Mutex();
+ math_exp_data_mutex = new base::Mutex();
}
// Early return?
if (math_exp_data_initialized) return;
- LockGuard<Mutex> lock_guard(math_exp_data_mutex);
+ base::LockGuard<base::Mutex> lock_guard(math_exp_data_mutex);
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(CpuFeatures::FlushICache)));
}
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
- return OS::nan_value();
+ return base::OS::nan_value();
}
return std::pow(x, y);
}
static void PrintTarget();
static void PrintFeatures();
+ // Flush instruction cache.
+ static void FlushICache(void* start, size_t size);
+
private:
// Platform-dependent implementation.
static void ProbeImpl(bool cross_compile);
#define V8_ASSERT_SCOPE_H_
#include "src/allocation.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "src/utils.h"
namespace v8 {
static PerThreadAssertData* GetAssertData() {
return reinterpret_cast<PerThreadAssertData*>(
- Thread::GetThreadLocal(thread_local_key));
+ base::Thread::GetThreadLocal(thread_local_key));
}
- static Thread::LocalStorageKey thread_local_key;
+ static base::Thread::LocalStorageKey thread_local_key;
PerThreadAssertData* data_;
friend class Isolate;
private:
static void SetThreadLocalData(PerThreadAssertData* data) {
- Thread::SetThreadLocal(thread_local_key, data);
+ base::Thread::SetThreadLocal(thread_local_key, data);
}
};
#define USING_BSD_ABI
#endif
+// Number of bits to represent the page size for paged spaces. The value of 20
+// gives 1Mb bytes per page.
+const int kPageSizeBits = 20;
+
#endif // V8_BASE_BUILD_CONFIG_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/cpu.h"
+
+#if V8_LIBC_MSVCRT
+#include <intrin.h> // __cpuid()
+#endif
+#if V8_OS_POSIX
+#include <unistd.h> // sysconf()
+#endif
+#if V8_OS_QNX
+#include <sys/syspage.h> // cpuinfo
+#endif
+
+#include <ctype.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+
+#include "src/base/logging.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h" // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+// Define __cpuid() for non-MSVC libraries.
+#if !V8_LIBC_MSVCRT
+
+static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
+#if defined(__i386__) && defined(__pic__)
+ // Make sure to preserve ebx, which contains the pointer
+ // to the GOT in case we're generating PIC.
+ __asm__ volatile (
+ "mov %%ebx, %%edi\n\t"
+ "cpuid\n\t"
+ "xchg %%edi, %%ebx\n\t"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+#else
+ __asm__ volatile (
+ "cpuid \n\t"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+#endif // defined(__i386__) && defined(__pic__)
+}
+
+#endif // !V8_LIBC_MSVCRT
+
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS
+
+#if V8_OS_LINUX
+
+#if V8_HOST_ARCH_ARM
+
+// See <uapi/asm/hwcap.h> kernel header.
+/*
+ * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ */
+#define HWCAP_SWP (1 << 0)
+#define HWCAP_HALF (1 << 1)
+#define HWCAP_THUMB (1 << 2)
+#define HWCAP_26BIT (1 << 3) /* Play it safe */
+#define HWCAP_FAST_MULT (1 << 4)
+#define HWCAP_FPA (1 << 5)
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP (1 << 7)
+#define HWCAP_JAVA (1 << 8)
+#define HWCAP_IWMMXT (1 << 9)
+#define HWCAP_CRUNCH (1 << 10)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+#define HWCAP_TLS (1 << 15)
+#define HWCAP_VFPv4 (1 << 16)
+#define HWCAP_IDIVA (1 << 17)
+#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
+#define HWCAP_LPAE (1 << 20)
+
+#define AT_HWCAP 16
+
+// Read the ELF HWCAP flags by parsing /proc/self/auxv.
+static uint32_t ReadELFHWCaps() {
+ uint32_t result = 0;
+ FILE* fp = fopen("/proc/self/auxv", "r");
+ if (fp != NULL) {
+ struct { uint32_t tag; uint32_t value; } entry;
+ for (;;) {
+ size_t n = fread(&entry, sizeof(entry), 1, fp);
+ if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
+ break;
+ }
+ if (entry.tag == AT_HWCAP) {
+ result = entry.value;
+ break;
+ }
+ }
+ fclose(fp);
+ }
+ return result;
+}
+
+#endif // V8_HOST_ARCH_ARM
+
+// Extract the information exposed by the kernel via /proc/cpuinfo.
+class CPUInfo V8_FINAL BASE_EMBEDDED {
+ public:
+ CPUInfo() : datalen_(0) {
+ // Get the size of the cpuinfo file by reading it until the end. This is
+ // required because files under /proc do not always return a valid size
+ // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
+ static const char PATHNAME[] = "/proc/cpuinfo";
+ FILE* fp = fopen(PATHNAME, "r");
+ if (fp != NULL) {
+ for (;;) {
+ char buffer[256];
+ size_t n = fread(buffer, 1, sizeof(buffer), fp);
+ if (n == 0) {
+ break;
+ }
+ datalen_ += n;
+ }
+ fclose(fp);
+ }
+
+ // Read the contents of the cpuinfo file.
+ data_ = new char[datalen_ + 1];
+ fp = fopen(PATHNAME, "r");
+ if (fp != NULL) {
+ for (size_t offset = 0; offset < datalen_; ) {
+ size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
+ if (n == 0) {
+ break;
+ }
+ offset += n;
+ }
+ fclose(fp);
+ }
+
+ // Zero-terminate the data.
+ data_[datalen_] = '\0';
+ }
+
+ ~CPUInfo() {
+ delete[] data_;
+ }
+
+ // Extract the content of a the first occurence of a given field in
+ // the content of the cpuinfo file and return it as a heap-allocated
+ // string that must be freed by the caller using delete[].
+ // Return NULL if not found.
+ char* ExtractField(const char* field) const {
+ ASSERT(field != NULL);
+
+ // Look for first field occurence, and ensure it starts the line.
+ size_t fieldlen = strlen(field);
+ char* p = data_;
+ for (;;) {
+ p = strstr(p, field);
+ if (p == NULL) {
+ return NULL;
+ }
+ if (p == data_ || p[-1] == '\n') {
+ break;
+ }
+ p += fieldlen;
+ }
+
+ // Skip to the first colon followed by a space.
+ p = strchr(p + fieldlen, ':');
+ if (p == NULL || !isspace(p[1])) {
+ return NULL;
+ }
+ p += 2;
+
+ // Find the end of the line.
+ char* q = strchr(p, '\n');
+ if (q == NULL) {
+ q = data_ + datalen_;
+ }
+
+ // Copy the line into a heap-allocated buffer.
+ size_t len = q - p;
+ char* result = new char[len + 1];
+ if (result != NULL) {
+ memcpy(result, p, len);
+ result[len] = '\0';
+ }
+ return result;
+ }
+
+ private:
+ char* data_;
+ size_t datalen_;
+};
+
+#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+
+// Checks that a space-separated list of items contains one given 'item'.
+static bool HasListItem(const char* list, const char* item) {
+ ssize_t item_len = strlen(item);
+ const char* p = list;
+ if (p != NULL) {
+ while (*p != '\0') {
+ // Skip whitespace.
+ while (isspace(*p)) ++p;
+
+ // Find end of current list item.
+ const char* q = p;
+ while (*q != '\0' && !isspace(*q)) ++q;
+
+ if (item_len == q - p && memcmp(p, item, item_len) == 0) {
+ return true;
+ }
+
+ // Skip to next item.
+ p = q;
+ }
+ }
+ return false;
+}
+
+#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+
+#endif // V8_OS_LINUX
+
+#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+CPU::CPU() : stepping_(0),
+ model_(0),
+ ext_model_(0),
+ family_(0),
+ ext_family_(0),
+ type_(0),
+ implementer_(0),
+ architecture_(0),
+ part_(0),
+ has_fpu_(false),
+ has_cmov_(false),
+ has_sahf_(false),
+ has_mmx_(false),
+ has_sse_(false),
+ has_sse2_(false),
+ has_sse3_(false),
+ has_ssse3_(false),
+ has_sse41_(false),
+ has_sse42_(false),
+ has_idiva_(false),
+ has_neon_(false),
+ has_thumb2_(false),
+ has_vfp_(false),
+ has_vfp3_(false),
+ has_vfp3_d32_(false) {
+ memcpy(vendor_, "Unknown", 8);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ int cpu_info[4];
+
+ // __cpuid with an InfoType argument of 0 returns the number of
+ // valid Ids in CPUInfo[0] and the CPU identification string in
+ // the other three array elements. The CPU identification string is
+ // not in linear order. The code below arranges the information
+ // in a human readable form. The human readable order is CPUInfo[1] |
+ // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+ // before using memcpy to copy these three array elements to cpu_string.
+ __cpuid(cpu_info, 0);
+ unsigned num_ids = cpu_info[0];
+ std::swap(cpu_info[2], cpu_info[3]);
+ memcpy(vendor_, cpu_info + 1, 12);
+ vendor_[12] = '\0';
+
+ // Interpret CPU feature information.
+ if (num_ids > 0) {
+ __cpuid(cpu_info, 1);
+ stepping_ = cpu_info[0] & 0xf;
+ model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+ family_ = (cpu_info[0] >> 8) & 0xf;
+ type_ = (cpu_info[0] >> 12) & 0x3;
+ ext_model_ = (cpu_info[0] >> 16) & 0xf;
+ ext_family_ = (cpu_info[0] >> 20) & 0xff;
+ has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
+ has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
+ has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+ has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+ has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+ has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+ has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+ has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+ has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ }
+
+#if V8_HOST_ARCH_IA32
+ // SAHF is always available in compat/legacy mode,
+ has_sahf_ = true;
+#else
+ // Query extended IDs.
+ __cpuid(cpu_info, 0x80000000);
+ unsigned num_ext_ids = cpu_info[0];
+
+ // Interpret extended CPU feature information.
+ if (num_ext_ids > 0x80000000) {
+ __cpuid(cpu_info, 0x80000001);
+ // SAHF must be probed in long mode.
+ has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
+ }
+#endif
+
+#elif V8_HOST_ARCH_ARM
+
+#if V8_OS_LINUX
+
+ CPUInfo cpu_info;
+
+ // Extract implementor from the "CPU implementer" field.
+ char* implementer = cpu_info.ExtractField("CPU implementer");
+ if (implementer != NULL) {
+ char* end ;
+ implementer_ = strtol(implementer, &end, 0);
+ if (end == implementer) {
+ implementer_ = 0;
+ }
+ delete[] implementer;
+ }
+
+ // Extract part number from the "CPU part" field.
+ char* part = cpu_info.ExtractField("CPU part");
+ if (part != NULL) {
+ char* end ;
+ part_ = strtol(part, &end, 0);
+ if (end == part) {
+ part_ = 0;
+ }
+ delete[] part;
+ }
+
+ // Extract architecture from the "CPU Architecture" field.
+ // The list is well-known, unlike the the output of
+ // the 'Processor' field which can vary greatly.
+ // See the definition of the 'proc_arch' array in
+ // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+ // same file.
+ char* architecture = cpu_info.ExtractField("CPU architecture");
+ if (architecture != NULL) {
+ char* end;
+ architecture_ = strtol(architecture, &end, 10);
+ if (end == architecture) {
+ architecture_ = 0;
+ }
+ delete[] architecture;
+
+ // Unfortunately, it seems that certain ARMv6-based CPUs
+ // report an incorrect architecture number of 7!
+ //
+ // See http://code.google.com/p/android/issues/detail?id=10812
+ //
+ // We try to correct this by looking at the 'elf_format'
+ // field reported by the 'Processor' field, which is of the
+ // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+ // an ARMv6-one. For example, the Raspberry Pi is one popular
+ // ARMv6 device that reports architecture 7.
+ if (architecture_ == 7) {
+ char* processor = cpu_info.ExtractField("Processor");
+ if (HasListItem(processor, "(v6l)")) {
+ architecture_ = 6;
+ }
+ delete[] processor;
+ }
+ }
+
+ // Try to extract the list of CPU features from ELF hwcaps.
+ uint32_t hwcaps = ReadELFHWCaps();
+ if (hwcaps != 0) {
+ has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
+ has_neon_ = (hwcaps & HWCAP_NEON) != 0;
+ has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
+ has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
+ has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
+ (hwcaps & HWCAP_VFPD32) != 0));
+ } else {
+ // Try to fallback to "Features" CPUInfo field.
+ char* features = cpu_info.ExtractField("Features");
+ has_idiva_ = HasListItem(features, "idiva");
+ has_neon_ = HasListItem(features, "neon");
+ has_thumb2_ = HasListItem(features, "thumb2");
+ has_vfp_ = HasListItem(features, "vfp");
+ if (HasListItem(features, "vfpv3d16")) {
+ has_vfp3_ = true;
+ } else if (HasListItem(features, "vfpv3")) {
+ has_vfp3_ = true;
+ has_vfp3_d32_ = true;
+ }
+ delete[] features;
+ }
+
+ // Some old kernels will report vfp not vfpv3. Here we make an attempt
+ // to detect vfpv3 by checking for vfp *and* neon, since neon is only
+ // available on architectures with vfpv3. Checking neon on its own is
+ // not enough as it is possible to have neon without vfp.
+ if (has_vfp_ && has_neon_) {
+ has_vfp3_ = true;
+ }
+
+ // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
+ if (architecture_ < 7 && has_vfp3_) {
+ architecture_ = 7;
+ }
+
+ // ARMv7 implies Thumb2.
+ if (architecture_ >= 7) {
+ has_thumb2_ = true;
+ }
+
+ // The earliest architecture with Thumb2 is ARMv6T2.
+ if (has_thumb2_ && architecture_ < 6) {
+ architecture_ = 6;
+ }
+
+ // We don't support any FPUs other than VFP.
+ has_fpu_ = has_vfp_;
+
+#elif V8_OS_QNX
+
+ uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags;
+ if (cpu_flags & ARM_CPU_FLAG_V7) {
+ architecture_ = 7;
+ has_thumb2_ = true;
+ } else if (cpu_flags & ARM_CPU_FLAG_V6) {
+ architecture_ = 6;
+ // QNX doesn't say if Thumb2 is available.
+ // Assume false for the architectures older than ARMv7.
+ }
+ ASSERT(architecture_ >= 6);
+ has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
+ has_vfp_ = has_fpu_;
+ if (cpu_flags & ARM_CPU_FLAG_NEON) {
+ has_neon_ = true;
+ has_vfp3_ = has_vfp_;
+#ifdef ARM_CPU_FLAG_VFP_D32
+ has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0;
+#endif
+ }
+ has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0;
+
+#endif // V8_OS_LINUX
+
+#elif V8_HOST_ARCH_MIPS
+
+ // Simple detection of FPU at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to MIPS (early 2010), no similar
+ // facility is universally available on the MIPS architectures,
+ // so it's up to individual OSes to provide such.
+ CPUInfo cpu_info;
+ char* cpu_model = cpu_info.ExtractField("cpu model");
+ has_fpu_ = HasListItem(cpu_model, "FPU");
+ delete[] cpu_model;
+
+#elif V8_HOST_ARCH_ARM64
+
+ CPUInfo cpu_info;
+
+ // Extract implementor from the "CPU implementer" field.
+ char* implementer = cpu_info.ExtractField("CPU implementer");
+ if (implementer != NULL) {
+ char* end ;
+ implementer_ = strtol(implementer, &end, 0);
+ if (end == implementer) {
+ implementer_ = 0;
+ }
+ delete[] implementer;
+ }
+
+ // Extract part number from the "CPU part" field.
+ char* part = cpu_info.ExtractField("CPU part");
+ if (part != NULL) {
+ char* end ;
+ part_ = strtol(part, &end, 0);
+ if (end == part) {
+ part_ = 0;
+ }
+ delete[] part;
+ }
+
+#endif
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2006-2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module contains the architecture-specific code. This make the rest of
+// the code less dependent on differences between different processor
+// architecture.
+// The classes have the same definition for all architectures. The
+// implementation for a particular architecture is put in cpu_<arch>.cc.
+// The build system then uses the implementation for the target architecture.
+//
+
+#ifndef V8_BASE_CPU_H_
+#define V8_BASE_CPU_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// CPU
+//
+// Query information about the processor.
+//
+// This class also has static methods for the architecture specific functions.
+// Add methods here to cope with differences between the supported
+// architectures. For each architecture the file cpu_<arch>.cc contains the
+// implementation of these static functions.
+
+class CPU V8_FINAL {
+ public:
+ CPU();
+
+ // x86 CPUID information
+ const char* vendor() const { return vendor_; }
+ int stepping() const { return stepping_; }
+ int model() const { return model_; }
+ int ext_model() const { return ext_model_; }
+ int family() const { return family_; }
+ int ext_family() const { return ext_family_; }
+ int type() const { return type_; }
+
+ // arm implementer/part information
+ int implementer() const { return implementer_; }
+ static const int ARM = 0x41;
+ static const int NVIDIA = 0x4e;
+ static const int QUALCOMM = 0x51;
+ int architecture() const { return architecture_; }
+ int part() const { return part_; }
+ static const int ARM_CORTEX_A5 = 0xc05;
+ static const int ARM_CORTEX_A7 = 0xc07;
+ static const int ARM_CORTEX_A8 = 0xc08;
+ static const int ARM_CORTEX_A9 = 0xc09;
+ static const int ARM_CORTEX_A12 = 0xc0c;
+ static const int ARM_CORTEX_A15 = 0xc0f;
+
+ // General features
+ bool has_fpu() const { return has_fpu_; }
+
+ // x86 features
+ bool has_cmov() const { return has_cmov_; }
+ bool has_sahf() const { return has_sahf_; }
+ bool has_mmx() const { return has_mmx_; }
+ bool has_sse() const { return has_sse_; }
+ bool has_sse2() const { return has_sse2_; }
+ bool has_sse3() const { return has_sse3_; }
+ bool has_ssse3() const { return has_ssse3_; }
+ bool has_sse41() const { return has_sse41_; }
+ bool has_sse42() const { return has_sse42_; }
+
+ // arm features
+ bool has_idiva() const { return has_idiva_; }
+ bool has_neon() const { return has_neon_; }
+ bool has_thumb2() const { return has_thumb2_; }
+ bool has_vfp() const { return has_vfp_; }
+ bool has_vfp3() const { return has_vfp3_; }
+ bool has_vfp3_d32() const { return has_vfp3_d32_; }
+
+ private:
+ char vendor_[13];
+ int stepping_;
+ int model_;
+ int ext_model_;
+ int family_;
+ int ext_family_;
+ int type_;
+ int implementer_;
+ int architecture_;
+ int part_;
+ bool has_fpu_;
+ bool has_cmov_;
+ bool has_sahf_;
+ bool has_mmx_;
+ bool has_sse_;
+ bool has_sse2_;
+ bool has_sse3_;
+ bool has_ssse3_;
+ bool has_sse41_;
+ bool has_sse42_;
+ bool has_idiva_;
+ bool has_neon_;
+ bool has_thumb2_;
+ bool has_vfp_;
+ bool has_vfp3_;
+ bool has_vfp3_d32_;
+};
+
+} } // namespace v8::base
+
+#endif // V8_BASE_CPU_H_
--- /dev/null
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/logging.h"
+
+#if V8_LIBC_GLIBC || V8_OS_BSD
+# include <cxxabi.h>
+# include <execinfo.h>
+#elif V8_OS_QNX
+# include <backtrace.h>
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+// Attempts to dump a backtrace (if supported).
+void DumpBacktrace() {
+#if V8_LIBC_GLIBC || V8_OS_BSD
+ void* trace[100];
+ int size = backtrace(trace, ARRAY_SIZE(trace));
+ char** symbols = backtrace_symbols(trace, size);
+ OS::PrintError("\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ OS::PrintError("(empty)\n");
+ } else if (symbols == NULL) {
+ OS::PrintError("(no symbols)\n");
+ } else {
+ for (int i = 1; i < size; ++i) {
+ OS::PrintError("%2d: ", i);
+ char mangled[201];
+ if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
+ int status;
+ size_t length;
+ char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+ OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ free(demangled);
+ } else {
+ OS::PrintError("??\n");
+ }
+ }
+ }
+ free(symbols);
+#elif V8_OS_QNX
+ char out[1024];
+ bt_accessor_t acc;
+ bt_memmap_t memmap;
+ bt_init_accessor(&acc, BT_SELF);
+ bt_load_memmap(&acc, &memmap);
+ bt_sprn_memmap(&memmap, out, sizeof(out));
+ OS::PrintError(out);
+ bt_addr_t trace[100];
+ int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace));
+ OS::PrintError("\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ OS::PrintError("(empty)\n");
+ } else {
+ bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
+ out, sizeof(out), NULL);
+ OS::PrintError(out);
+ }
+ bt_unload_memmap(&memmap);
+ bt_release_accessor(&acc);
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+}
+
+} } // namespace v8::base
+
+
+// Contains protection against recursive calls (faults while handling faults).
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+ fflush(stdout);
+ fflush(stderr);
+ v8::base::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file,
+ line);
+ va_list arguments;
+ va_start(arguments, format);
+ v8::base::OS::VPrintError(format, arguments);
+ va_end(arguments);
+ v8::base::OS::PrintError("\n#\n");
+ v8::base::DumpBacktrace();
+ fflush(stderr);
+ v8::base::OS::Abort();
+}
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_LOGGING_H_
+#define V8_BASE_LOGGING_H_
+
+#include <string.h>
+
+#include "include/v8stdint.h"
+#include "src/base/build_config.h"
+
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+
+
+// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
+// development, but they should not be relied on in the final product.
+#ifdef DEBUG
+#define FATAL(msg) \
+ V8_Fatal(__FILE__, __LINE__, "%s", (msg))
+#define UNIMPLEMENTED() \
+ V8_Fatal(__FILE__, __LINE__, "unimplemented code")
+#define UNREACHABLE() \
+ V8_Fatal(__FILE__, __LINE__, "unreachable code")
+#else
+#define FATAL(msg) \
+ V8_Fatal("", 0, "%s", (msg))
+#define UNIMPLEMENTED() \
+ V8_Fatal("", 0, "unimplemented code")
+#define UNREACHABLE() ((void) 0)
+#endif
+
+
+// The CHECK macro checks that the given condition is true; if not, it
+// prints a message to stderr and aborts.
+#define CHECK(condition) do { \
+ if (!(condition)) { \
+ V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
+ } \
+ } while (0)
+
+
+// Helper function used by the CHECK_EQ function when given int
+// arguments. Should not be called directly.
+inline void CheckEqualsHelper(const char* file, int line,
+ const char* expected_source, int expected,
+ const char* value_source, int value) {
+ if (expected != value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+// Helper function used by the CHECK_EQ function when given int64_t
+// arguments. Should not be called directly.
+inline void CheckEqualsHelper(const char* file, int line,
+ const char* expected_source,
+ int64_t expected,
+ const char* value_source,
+ int64_t value) {
+ if (expected != value) {
+ // Print int64_t values in hex, as two int32s,
+ // to avoid platform-dependencies.
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n#"
+ " Expected: 0x%08x%08x\n# Found: 0x%08x%08x",
+ expected_source, value_source,
+ static_cast<uint32_t>(expected >> 32),
+ static_cast<uint32_t>(expected),
+ static_cast<uint32_t>(value >> 32),
+ static_cast<uint32_t>(value));
+ }
+}
+
+
+// Helper function used by the CHECK_NE function when given int
+// arguments. Should not be called directly.
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* unexpected_source,
+ int unexpected,
+ const char* value_source,
+ int value) {
+ if (unexpected == value) {
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
+ unexpected_source, value_source, value);
+ }
+}
+
+
+// Helper function used by the CHECK function when given string
+// arguments. Should not be called directly.
+inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const char* expected,
+ const char* value_source,
+ const char* value) {
+ if ((expected == NULL && value != NULL) ||
+ (expected != NULL && value == NULL) ||
+ (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const char* expected,
+ const char* value_source,
+ const char* value) {
+ if (expected == value ||
+ (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
+ expected_source, value_source, value);
+ }
+}
+
+
+// Helper function used by the CHECK function when given pointer
+// arguments. Should not be called directly.
+inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const void* expected,
+ const char* value_source,
+ const void* value) {
+ if (expected != value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
+ expected_source, value_source,
+ expected, value);
+ }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ const void* expected,
+ const char* value_source,
+ const void* value) {
+ if (expected == value) {
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
+ expected_source, value_source, value);
+ }
+}
+
+
+// Helper function used by the CHECK function when given floating
+// point arguments. Should not be called directly.
+inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ double expected,
+ const char* value_source,
+ double value) {
+ // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+ volatile double* exp = new double[1];
+ *exp = expected;
+ volatile double* val = new double[1];
+ *val = value;
+ if (*exp != *val) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
+ expected_source, value_source, *exp, *val);
+ }
+ delete[] exp;
+ delete[] val;
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ int64_t expected,
+ const char* value_source,
+ int64_t value) {
+ if (expected == value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ double expected,
+ const char* value_source,
+ double value) {
+ // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+ volatile double* exp = new double[1];
+ *exp = expected;
+ volatile double* val = new double[1];
+ *val = value;
+ if (*exp == *val) {
+ V8_Fatal(file, line,
+ "CHECK_NE(%s, %s) failed\n# Value: %f",
+ expected_source, value_source, *val);
+ }
+ delete[] exp;
+ delete[] val;
+}
+
+
+#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
+ #expected, expected, #value, value)
+
+
+#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
+ #unexpected, unexpected, #value, value)
+
+
+#define CHECK_GT(a, b) CHECK((a) > (b))
+#define CHECK_GE(a, b) CHECK((a) >= (b))
+#define CHECK_LT(a, b) CHECK((a) < (b))
+#define CHECK_LE(a, b) CHECK((a) <= (b))
+
+
+namespace v8 {
+namespace base {
+
+// Exposed for making debugging easier (to see where your function is being
+// called, just add a call to DumpBacktrace).
+void DumpBacktrace();
+
+} } // namespace v8::base
+
+
+// The ASSERT macro is equivalent to CHECK except that it only
+// generates code in debug builds.
+#ifdef DEBUG
+#define ASSERT_RESULT(expr) CHECK(expr)
+#define ASSERT(condition) CHECK(condition)
+#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
+#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
+#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
+#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
+#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
+#else
+#define ASSERT_RESULT(expr) (expr)
+#define ASSERT(condition) ((void) 0)
+#define ASSERT_EQ(v1, v2) ((void) 0)
+#define ASSERT_NE(v1, v2) ((void) 0)
+#define ASSERT_GE(v1, v2) ((void) 0)
+#define ASSERT_LT(v1, v2) ((void) 0)
+#define ASSERT_LE(v1, v2) ((void) 0)
+#endif
+
+#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
+
+// "Extra checks" are lightweight checks that are enabled in some release
+// builds.
+#ifdef ENABLE_EXTRA_CHECKS
+#define EXTRA_CHECK(condition) CHECK(condition)
+#else
+#define EXTRA_CHECK(condition) ((void) 0)
+#endif
+
+#endif // V8_BASE_LOGGING_H_
#include "include/v8stdint.h"
#include "src/base/build_config.h"
+#include "src/base/logging.h"
// The expression OFFSET_OF(type, field) computes the byte-offset
#define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+// Returns true iff x is a power of 2. Cannot be used with the maximally
+// negative value of the type T (the -1 overflows).
+template <typename T>
+inline bool IsPowerOf2(T x) {
+ return IS_POWER_OF_TWO(x);
+}
+
+
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
// write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+
+// Compute the 0-relative offset of some absolute value x of type T.
+// This allows conversion of Addresses and integral types into
+// 0-relative int offsets.
+template <typename T>
+inline intptr_t OffsetFrom(T x) {
+ return x - static_cast<T>(0);
+}
+
+
+// Compute the absolute value of type T for some 0-relative offset x.
+// This allows conversion of 0-relative int offsets into Addresses and
+// integral types.
+template <typename T>
+inline T AddressFrom(intptr_t x) {
+ return static_cast<T>(static_cast<T>(0) + x);
+}
+
+
+// Return the largest multiple of m which is <= x.
+template <typename T>
+inline T RoundDown(T x, intptr_t m) {
+ ASSERT(IsPowerOf2(m));
+ return AddressFrom<T>(OffsetFrom(x) & -m);
+}
+
+
+// Return the smallest multiple of m which is >= x.
+template <typename T>
+inline T RoundUp(T x, intptr_t m) {
+ return RoundDown<T>(static_cast<T>(x + m - 1), m);
+}
+
+
+// Increment a pointer until it has the specified alignment.
+// This works like RoundUp, but it works correctly on pointer types where
+// sizeof(*pointer) might not be 1.
+template<class T>
+T AlignUp(T pointer, size_t alignment) {
+ ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
+ return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
+}
+
+
+template <typename T, typename U>
+inline bool IsAligned(T value, U alignment) {
+ return (value & (alignment - 1)) == 0;
+}
+
+
+// Returns the smallest power of two which is >= x. If you pass in a
+// number that is already a power of two, it is returned as is.
+// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
+// figure 3-3, page 48, where the function is called clp2.
+inline uint32_t RoundUpToPowerOf2(uint32_t x) {
+ ASSERT(x <= 0x80000000u);
+ x = x - 1;
+ x = x | (x >> 1);
+ x = x | (x >> 2);
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >> 16);
+ return x + 1;
+}
+
+
+inline uint32_t RoundDownToPowerOf2(uint32_t x) {
+ uint32_t rounded_up = RoundUpToPowerOf2(x);
+ if (rounded_up > x) return rounded_up >> 1;
+ return rounded_up;
+}
+
#endif // V8_BASE_MACROS_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/condition-variable.h"
+
+#include <errno.h>
+#include <time.h>
+
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_POSIX
+
+ConditionVariable::ConditionVariable() {
+ // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+ // hack to support cross-compiling Chrome for Android in AOSP. Remove
+ // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+ (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+ // On Free/Net/OpenBSD and Linux with glibc we can change the time
+ // source for pthread_cond_timedwait() to use the monotonic clock.
+ pthread_condattr_t attr;
+ int result = pthread_condattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+ ASSERT_EQ(0, result);
+ result = pthread_cond_init(&native_handle_, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_condattr_destroy(&attr);
+#else
+ int result = pthread_cond_init(&native_handle_, NULL);
+#endif
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+ConditionVariable::~ConditionVariable() {
+ int result = pthread_cond_destroy(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::NotifyOne() {
+ int result = pthread_cond_signal(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::NotifyAll() {
+ int result = pthread_cond_broadcast(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+ mutex->AssertHeldAndUnmark();
+ int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
+ ASSERT_EQ(0, result);
+ USE(result);
+ mutex->AssertUnheldAndMark();
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+ struct timespec ts;
+ int result;
+ mutex->AssertHeldAndUnmark();
+#if V8_OS_MACOSX
+ // Mac OS X provides pthread_cond_timedwait_relative_np(), which does
+ // not depend on the real time clock, which is what you really WANT here!
+ ts = rel_time.ToTimespec();
+ ASSERT_GE(ts.tv_sec, 0);
+ ASSERT_GE(ts.tv_nsec, 0);
+ result = pthread_cond_timedwait_relative_np(
+ &native_handle_, &mutex->native_handle(), &ts);
+#else
+ // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+ // hack to support cross-compiling Chrome for Android in AOSP. Remove
+ // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+ (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+ // On Free/Net/OpenBSD and Linux with glibc we can change the time
+ // source for pthread_cond_timedwait() to use the monotonic clock.
+ result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ Time now = Time::FromTimespec(ts);
+#else
+ // The timeout argument to pthread_cond_timedwait() is in absolute time.
+ Time now = Time::NowFromSystemTime();
+#endif
+ Time end_time = now + rel_time;
+ ASSERT_GE(end_time, now);
+ ts = end_time.ToTimespec();
+ result = pthread_cond_timedwait(
+ &native_handle_, &mutex->native_handle(), &ts);
+#endif // V8_OS_MACOSX
+ mutex->AssertUnheldAndMark();
+ if (result == ETIMEDOUT) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+struct ConditionVariable::Event {
+ Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
+ ASSERT(handle_ != NULL);
+ }
+
+ ~Event() {
+ BOOL ok = ::CloseHandle(handle_);
+ ASSERT(ok);
+ USE(ok);
+ }
+
+ bool WaitFor(DWORD timeout_ms) {
+ DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
+ if (result == WAIT_OBJECT_0) {
+ return true;
+ }
+ ASSERT(result == WAIT_TIMEOUT);
+ return false;
+ }
+
+ HANDLE handle_;
+ Event* next_;
+ HANDLE thread_;
+ volatile bool notified_;
+};
+
+
+ConditionVariable::NativeHandle::~NativeHandle() {
+ ASSERT(waitlist_ == NULL);
+
+ while (freelist_ != NULL) {
+ Event* event = freelist_;
+ freelist_ = event->next_;
+ delete event;
+ }
+}
+
+
+ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Grab an event from the free list or create a new one.
+ Event* event = freelist_;
+ if (event != NULL) {
+ freelist_ = event->next_;
+ } else {
+ event = new Event;
+ }
+ event->thread_ = GetCurrentThread();
+ event->notified_ = false;
+
+#ifdef DEBUG
+ // The event must not be on the wait list.
+ for (Event* we = waitlist_; we != NULL; we = we->next_) {
+ ASSERT_NE(event, we);
+ }
+#endif
+
+ // Prepend the event to the wait list.
+ event->next_ = waitlist_;
+ waitlist_ = event;
+
+ return event;
+}
+
+
+void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Remove the event from the wait list.
+ for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
+ ASSERT_NE(NULL, *wep);
+ if (*wep == event) {
+ *wep = event->next_;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ // The event must not be on the free list.
+ for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
+ ASSERT_NE(event, fe);
+ }
+#endif
+
+ // Reset the event.
+ BOOL ok = ::ResetEvent(event->handle_);
+ ASSERT(ok);
+ USE(ok);
+
+ // Insert the event into the free list.
+ event->next_ = freelist_;
+ freelist_ = event;
+
+ // Forward signals delivered after the timeout to the next waiting event.
+ if (!result && event->notified_ && waitlist_ != NULL) {
+ ok = ::SetEvent(waitlist_->handle_);
+ ASSERT(ok);
+ USE(ok);
+ waitlist_->notified_ = true;
+ }
+}
+
+
+ConditionVariable::ConditionVariable() {}
+
+
+ConditionVariable::~ConditionVariable() {}
+
+
+void ConditionVariable::NotifyOne() {
+ // Notify the thread with the highest priority in the waitlist
+ // that was not already signalled.
+ LockGuard<Mutex> lock_guard(native_handle_.mutex());
+ Event* highest_event = NULL;
+ int highest_priority = std::numeric_limits<int>::min();
+ for (Event* event = native_handle().waitlist();
+ event != NULL;
+ event = event->next_) {
+ if (event->notified_) {
+ continue;
+ }
+ int priority = GetThreadPriority(event->thread_);
+ ASSERT_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
+ if (priority >= highest_priority) {
+ highest_priority = priority;
+ highest_event = event;
+ }
+ }
+ if (highest_event != NULL) {
+ ASSERT(!highest_event->notified_);
+ ::SetEvent(highest_event->handle_);
+ highest_event->notified_ = true;
+ }
+}
+
+
+void ConditionVariable::NotifyAll() {
+ // Notify all threads on the waitlist.
+ LockGuard<Mutex> lock_guard(native_handle_.mutex());
+ for (Event* event = native_handle().waitlist();
+ event != NULL;
+ event = event->next_) {
+ if (!event->notified_) {
+ ::SetEvent(event->handle_);
+ event->notified_ = true;
+ }
+ }
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+ // Create and setup the wait event.
+ Event* event = native_handle_.Pre();
+
+ // Release the user mutex.
+ mutex->Unlock();
+
+ // Wait on the wait event.
+ while (!event->WaitFor(INFINITE))
+ ;
+
+ // Reaquire the user mutex.
+ mutex->Lock();
+
+ // Release the wait event (we must have been notified).
+ ASSERT(event->notified_);
+ native_handle_.Post(event, true);
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+ // Create and setup the wait event.
+ Event* event = native_handle_.Pre();
+
+ // Release the user mutex.
+ mutex->Unlock();
+
+ // Wait on the wait event.
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ bool result = false;
+ while (true) {
+ int64_t msec = (end - now).InMilliseconds();
+ if (msec >= static_cast<int64_t>(INFINITE)) {
+ result = event->WaitFor(INFINITE - 1);
+ if (result) {
+ break;
+ }
+ now = TimeTicks::Now();
+ } else {
+ result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
+ break;
+ }
+ }
+
+ // Reaquire the user mutex.
+ mutex->Lock();
+
+ // Release the wait event.
+ ASSERT(!result || event->notified_);
+ native_handle_.Post(event, result);
+
+ return result;
+}
+
+#endif // V8_OS_POSIX
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
+#define V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
+
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace base {
+
+// Forward declarations.
+class ConditionVariableEvent;
+class TimeDelta;
+
+// -----------------------------------------------------------------------------
+// ConditionVariable
+//
+// This class is a synchronization primitive that can be used to block a thread,
+// or multiple threads at the same time, until:
+// - a notification is received from another thread,
+// - a timeout expires, or
+// - a spurious wakeup occurs
+// Any thread that intends to wait on a ConditionVariable has to acquire a lock
+// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release
+// the mutex and suspend the execution of the calling thread. When the condition
+// variable is notified, the thread is awakened, and the mutex is reacquired.
+
+class ConditionVariable V8_FINAL {
+ public:
+ ConditionVariable();
+ ~ConditionVariable();
+
+ // If any threads are waiting on this condition variable, calling
+ // |NotifyOne()| unblocks one of the waiting threads.
+ void NotifyOne();
+
+ // Unblocks all threads currently waiting for this condition variable.
+ void NotifyAll();
+
+ // |Wait()| causes the calling thread to block until the condition variable is
+ // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks
+ // the current executing thread, and adds it to the list of threads waiting on
+ // this condition variable. The thread will be unblocked when |NotifyAll()| or
+ // |NotifyOne()| is executed. It may also be unblocked spuriously. When
+ // unblocked, regardless of the reason, the lock on the mutex is reacquired
+ // and |Wait()| exits.
+ void Wait(Mutex* mutex);
+
+ // Atomically releases the mutex, blocks the current executing thread, and
+ // adds it to the list of threads waiting on this condition variable. The
+ // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed,
+ // or when the relative timeout |rel_time| expires. It may also be unblocked
+ // spuriously. When unblocked, regardless of the reason, the lock on the mutex
+ // is reacquired and |WaitFor()| exits. Returns true if the condition variable
+ // was notified prior to the timeout.
+ bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_cond_t NativeHandle;
+#elif V8_OS_WIN
+ struct Event;
+ class NativeHandle V8_FINAL {
+ public:
+ NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
+ ~NativeHandle();
+
+ Event* Pre() V8_WARN_UNUSED_RESULT;
+ void Post(Event* event, bool result);
+
+ Mutex* mutex() { return &mutex_; }
+ Event* waitlist() { return waitlist_; }
+
+ private:
+ Event* waitlist_;
+ Event* freelist_;
+ Mutex mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeHandle);
+ };
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+
+// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyConditionVariable my_condvar =
+// LAZY_CONDITION_VARIABLE_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> lock_guard(&my_mutex);
+// my_condvar.Pointer()->Wait(&my_mutex);
+// }
+typedef LazyStaticInstance<
+ ConditionVariable, DefaultConstructTrait<ConditionVariable>,
+ ThreadSafeInitOnceTrait>::type LazyConditionVariable;
+
+#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+} } // namespace v8::base
+
+#endif // V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_ELAPSED_TIMER_H_
+#define V8_BASE_PLATFORM_ELAPSED_TIMER_H_
+
+#include "src/base/logging.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+class ElapsedTimer V8_FINAL {
+ public:
+#ifdef DEBUG
+ ElapsedTimer() : started_(false) {}
+#endif
+
+ // Starts this timer. Once started a timer can be checked with
+ // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
+ // This method must not be called on an already started timer.
+ void Start() {
+ ASSERT(!IsStarted());
+ start_ticks_ = Now();
+#ifdef DEBUG
+ started_ = true;
+#endif
+ ASSERT(IsStarted());
+ }
+
+ // Stops this timer. Must not be called on a timer that was not
+ // started before.
+ void Stop() {
+ ASSERT(IsStarted());
+ start_ticks_ = TimeTicks();
+#ifdef DEBUG
+ started_ = false;
+#endif
+ ASSERT(!IsStarted());
+ }
+
+ // Returns |true| if this timer was started previously.
+ bool IsStarted() const {
+ ASSERT(started_ || start_ticks_.IsNull());
+ ASSERT(!started_ || !start_ticks_.IsNull());
+ return !start_ticks_.IsNull();
+ }
+
+ // Restarts the timer and returns the time elapsed since the previous start.
+ // This method is equivalent to obtaining the elapsed time with |Elapsed()|
+ // and then starting the timer again, but does so in one single operation,
+ // avoiding the need to obtain the clock value twice. It may only be called
+ // on a previously started timer.
+ TimeDelta Restart() {
+ ASSERT(IsStarted());
+ TimeTicks ticks = Now();
+ TimeDelta elapsed = ticks - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ start_ticks_ = ticks;
+ ASSERT(IsStarted());
+ return elapsed;
+ }
+
+ // Returns the time elapsed since the previous start. This method may only
+ // be called on a previously started timer.
+ TimeDelta Elapsed() const {
+ ASSERT(IsStarted());
+ TimeDelta elapsed = Now() - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ return elapsed;
+ }
+
+ // Returns |true| if the specified |time_delta| has elapsed since the
+ // previous start, or |false| if not. This method may only be called on
+ // a previously started timer.
+ bool HasExpired(TimeDelta time_delta) const {
+ ASSERT(IsStarted());
+ return Elapsed() >= time_delta;
+ }
+
+ private:
+ static V8_INLINE TimeTicks Now() {
+ TimeTicks now = TimeTicks::HighResolutionNow();
+ ASSERT(!now.IsNull());
+ return now;
+ }
+
+ TimeTicks start_ticks_;
+#ifdef DEBUG
+ bool started_;
+#endif
+};
+
+} } // namespace v8::base
+
+#endif // V8_BASE_PLATFORM_ELAPSED_TIMER_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/mutex.h"
+
+#include <errno.h>
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_POSIX
+
+static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
+ int result;
+#if defined(DEBUG)
+ // Use an error checking mutex in debug mode.
+ pthread_mutexattr_t attr;
+ result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+#else
+ // Use a fast mutex (default attributes).
+ result = pthread_mutex_init(mutex, NULL);
+#endif // defined(DEBUG)
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
+ pthread_mutexattr_t attr;
+ int result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_destroy(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_lock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_unlock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_trylock(mutex);
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
+ DeleteCriticalSection(cs);
+}
+
+
+static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
+ EnterCriticalSection(cs);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
+ LeaveCriticalSection(cs);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
+ return TryEnterCriticalSection(cs);
+}
+
+#endif // V8_OS_POSIX
+
+
+Mutex::Mutex() {
+ InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+Mutex::~Mutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void Mutex::Lock() {
+ LockNativeHandle(&native_handle_);
+ AssertUnheldAndMark();
+}
+
+
+void Mutex::Unlock() {
+ AssertHeldAndUnmark();
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool Mutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+ AssertUnheldAndMark();
+ return true;
+}
+
+
+RecursiveMutex::RecursiveMutex() {
+ InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+RecursiveMutex::~RecursiveMutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void RecursiveMutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+ ASSERT_LT(0, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_MUTEX_H_
+#define V8_BASE_PLATFORM_MUTEX_H_
+
+#include "src/base/lazy-instance.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+#include "src/base/logging.h"
+
+#if V8_OS_POSIX
+#include <pthread.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A mutex offers
+// exclusive, non-recursive ownership semantics:
+// - A calling thread owns a mutex from the time that it successfully calls
+// either |Lock()| or |TryLock()| until it calls |Unlock()|.
+// - When a thread owns a mutex, all other threads will block (for calls to
+// |Lock()|) or receive a |false| return value (for |TryLock()|) if they
+// attempt to claim ownership of the mutex.
+// A calling thread must not own the mutex prior to calling |Lock()| or
+// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
+// while still owned by some thread. The Mutex class is non-copyable.
+
+class Mutex V8_FINAL {
+ public:
+ Mutex();
+ ~Mutex();
+
+ // Locks the given mutex. If the mutex is currently unlocked, it becomes
+ // locked and owned by the calling thread, and immediately. If the mutex
+ // is already locked by another thread, suspends the calling thread until
+ // the mutex is unlocked.
+ void Lock();
+
+ // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+ // the calling thread on entrance.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+ typedef CRITICAL_SECTION NativeHandle;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ V8_INLINE void AssertHeldAndUnmark() {
+#ifdef DEBUG
+ ASSERT_EQ(1, level_);
+ level_--;
+#endif
+ }
+
+ V8_INLINE void AssertUnheldAndMark() {
+#ifdef DEBUG
+ ASSERT_EQ(0, level_);
+ level_++;
+#endif
+ }
+
+ friend class ConditionVariable;
+
+ DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
+ ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// RecursiveMutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A recursive
+// mutex offers exclusive, recursive ownership semantics:
+// - A calling thread owns a recursive mutex for a period of time that starts
+// when it successfully calls either |Lock()| or |TryLock()|. During this
+// period, the thread may make additional calls to |Lock()| or |TryLock()|.
+// The period of ownership ends when the thread makes a matching number of
+// calls to |Unlock()|.
+// - When a thread owns a recursive mutex, all other threads will block (for
+// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
+// they attempt to claim ownership of the recursive mutex.
+// - The maximum number of times that a recursive mutex may be locked is
+// unspecified, but after that number is reached, calls to |Lock()| will
+// probably abort the process and calls to |TryLock()| return false.
+// The behavior of a program is undefined if a recursive mutex is destroyed
+// while still owned by some thread. The RecursiveMutex class is non-copyable.
+
+class RecursiveMutex V8_FINAL {
+ public:
+ RecursiveMutex();
+ ~RecursiveMutex();
+
+ // Locks the mutex. If another thread has already locked the mutex, a call to
+ // |Lock()| will block execution until the lock is acquired. A thread may call
+ // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
+ // after the thread makes a matching number of calls to |Unlock()|.
+ // The behavior is undefined if the mutex is not unlocked before being
+ // destroyed, i.e. some thread still owns it.
+ void Lock();
+
+ // Unlocks the mutex if its level of ownership is 1 (there was exactly one
+ // more call to |Lock()| than there were calls to unlock() made by this
+ // thread), reduces the level of ownership by 1 otherwise. The mutex must be
+ // locked by the current thread of execution, otherwise, the behavior is
+ // undefined.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+ typedef Mutex::NativeHandle NativeHandle;
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
+};
+
+
+// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<RecursiveMutex,
+ DefaultConstructTrait<RecursiveMutex>,
+ ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+
+#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// LockGuard
+//
+// This class is a mutex wrapper that provides a convenient RAII-style mechanism
+// for owning a mutex for the duration of a scoped block.
+// When a LockGuard object is created, it attempts to take ownership of the
+// mutex it is given. When control leaves the scope in which the LockGuard
+// object was created, the LockGuard is destructed and the mutex is released.
+// The LockGuard class is non-copyable.
+
+template <typename Mutex>
+class LockGuard V8_FINAL {
+ public:
+ explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
+ ~LockGuard() { mutex_->Unlock(); }
+
+ private:
+ Mutex* mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockGuard);
+};
+
+} } // namespace v8::base
+
+#endif // V8_BASE_PLATFORM_MUTEX_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Cygwin goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <errno.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdarg.h>
+#include <strings.h> // index
+#include <sys/mman.h> // mmap & munmap
+#include <sys/time.h>
+#include <unistd.h> // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/win32-headers.h"
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return tzname[0]; // The location of the timezone string on Cygwin.
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ // On Cygwin, struct tm does not contain a tm_gmtoff field.
+ time_t utc = time(NULL);
+ ASSERT(utc != -1);
+ struct tm* loc = localtime(&utc);
+ ASSERT(loc != NULL);
+ // time - localtime includes any daylight savings offset, so subtract it.
+ return static_cast<double>((mktime(loc) - utc) * msPerSecond -
+ (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddresses> result;
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE* fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) return result;
+
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+ // This loop will terminate once the scanning hits an EOF.
+ while (true) {
+ uintptr_t start, end;
+ char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+ int c;
+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+ // Found a read-only executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/'));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
+ if (c == '/') {
+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' character.
+ lib_name[strlen(lib_name) - 1] = '\0';
+ } else {
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ }
+ result.push_back(SharedLibraryAddress(lib_name, start, end));
+ } else {
+ // Entry not describing executable data. Skip to end of line to set up
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
+ }
+ }
+ free(lib_name);
+ fclose(fp);
+ return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+ // Nothing to do on Cygwin.
+}
+
+
+// The VirtualMemory implementation is taken from platform-win32.cc.
+// The mmap-based virtual memory implementation as it is used on most posix
+// platforms does not work well because Cygwin does not support MAP_FIXED.
+// This causes VirtualMemory::Commit to not always commit the memory region
+// specified.
+
+static void* GetRandomAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+ // The address range used to randomize RWX allocations in OS::Allocate
+ // Try not to map pages into the default range that windows loads DLLs
+ // Use a multiple of 64k to prevent committing unused memory.
+ // Note: This does not guarantee RWX regions will be within the
+ // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+ static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+ static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+ uintptr_t address =
+ (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
+ address &= kAllocationRandomAddressMax;
+ return reinterpret_cast<void *>(address);
+ }
+ return NULL;
+}
+
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+ LPVOID base = NULL;
+
+ if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+ // For exectutable pages try and randomize the allocation address
+ for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+ base = VirtualAlloc(GetRandomAddr(), size, action, protection);
+ }
+ }
+
+ // After three attempts give up and let the OS find an address to use.
+ if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+ return base;
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ ASSERT(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != NULL) {
+ request_size = size;
+ ASSERT(base == static_cast<uint8_t*>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ }
+ address_ = address;
+ size_ = request_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address_, size_);
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ ASSERT(IsReserved());
+ return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ if (NULL == VirtualAlloc(address,
+ OS::CommitPageSize(),
+ MEM_COMMIT,
+ PAGE_NOACCESS)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for FreeBSD goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+
+#include <sys/fcntl.h> // open
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <sys/types.h> // mmap & munmap
+#include <unistd.h> // getpagesize
+// If you don't have execinfo.h then you need devel/libexecinfo from ports.
+#include <errno.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <strings.h> // index
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+static unsigned StringToLong(char* buffer) {
+ return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ static const int MAP_LENGTH = 1024;
+ int fd = open("/proc/self/maps", O_RDONLY);
+ if (fd < 0) return result;
+ while (true) {
+ char addr_buffer[11];
+ addr_buffer[0] = '0';
+ addr_buffer[1] = 'x';
+ addr_buffer[10] = 0;
+ int result = read(fd, addr_buffer + 2, 8);
+ if (result < 8) break;
+ unsigned start = StringToLong(addr_buffer);
+ result = read(fd, addr_buffer + 2, 1);
+ if (result < 1) break;
+ if (addr_buffer[2] != '-') break;
+ result = read(fd, addr_buffer + 2, 8);
+ if (result < 8) break;
+ unsigned end = StringToLong(addr_buffer);
+ char buffer[MAP_LENGTH];
+ int bytes_read = -1;
+ do {
+ bytes_read++;
+ if (bytes_read >= MAP_LENGTH - 1)
+ break;
+ result = read(fd, buffer + bytes_read, 1);
+ if (result < 1) break;
+ } while (buffer[bytes_read] != '\n');
+ buffer[bytes_read] = 0;
+ // Ignore mappings that are not executable.
+ if (buffer[3] != 'x') continue;
+ char* start_of_path = index(buffer, '/');
+ // There may be no filename in this line. Skip to next.
+ if (start_of_path == NULL) continue;
+ buffer[bytes_read] = 0;
+ result.push_back(SharedLibraryAddress(start_of_path, start, end));
+ }
+ close(fd);
+ return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Linux goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+// Ubuntu Dapper requires memory pages to be marked as
+// executable. Otherwise, OS raises an exception when executing code
+// in that page.
+#include <errno.h>
+#include <fcntl.h> // open
+#include <stdarg.h>
+#include <strings.h> // index
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <sys/types.h> // mmap & munmap
+#include <unistd.h> // sysconf
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h> // NOLINT
+#endif
+
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+#ifdef __arm__
+
+bool OS::ArmUsingHardFloat() {
+ // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+ // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+ // We use these as well as a couple of other defines to statically determine
+ // what FP ABI used.
+ // GCC versions 4.4 and below don't support hard-fp.
+ // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+ // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+ return true;
+#else
+ return false;
+#endif
+
+#elif GCC_VERSION < 40500
+ return false;
+
+#else
+#if defined(__ARM_PCS_VFP)
+ return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+ !defined(__VFP_FP__)
+ return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for." \
+ "Please report it on this issue" \
+ "http://code.google.com/p/v8/issues/detail?id=2140"
+
+#endif
+#endif
+#undef GCC_VERSION
+}
+
+#endif // def __arm__
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = OS::GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) OS::Free(memory_, size_);
+ fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE* fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) return result;
+
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+ // This loop will terminate once the scanning hits an EOF.
+ while (true) {
+ uintptr_t start, end;
+ char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+ int c;
+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+ // Found a read-only executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
+ if ((c == '/') || (c == '[')) {
+ // Push the '/' or '[' back into the stream to be read below.
+ ungetc(c, fp);
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' or '[' character.
+ lib_name[strlen(lib_name) - 1] = '\0';
+ } else {
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ }
+ result.push_back(SharedLibraryAddress(lib_name, start, end));
+ } else {
+ // Entry not describing executable data. Skip to end of line to set up
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
+ }
+ }
+ free(lib_name);
+ fclose(fp);
+ return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+ // Support for ll_prof.py.
+ //
+ // The Linux profiler built into the kernel logs all mmap's with
+ // PROT_EXEC so that analysis tools can properly attribute ticks. We
+ // do a mmap with a name known by ll_prof.py and immediately munmap
+ // it. This injects a GC marker into the stream of events generated
+ // by the kernel and allows us to synchronize V8 code log and the
+ // kernel log.
+ int size = sysconf(_SC_PAGESIZE);
+ FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
+ if (f == NULL) {
+ OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
+ OS::Abort();
+ }
+ void* addr = mmap(OS::GetRandomMmapAddr(),
+ size,
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ PROT_READ,
+#else
+ PROT_READ | PROT_EXEC,
+#endif
+ MAP_PRIVATE,
+ fileno(f),
+ 0);
+ ASSERT(addr != MAP_FAILED);
+ OS::Free(addr, size);
+ fclose(f);
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(address_, size_);
+#endif
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+#if defined(LEAK_SANITIZER)
+ __lsan_register_root_region(result, size);
+#endif
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+#if defined(__native_client__)
+ // The Native Client port of V8 uses an interpreter,
+ // so code pages don't need PROT_EXEC.
+ int prot = PROT_READ | PROT_WRITE;
+#else
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+#endif
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+#if defined(LEAK_SANITIZER)
+ __lsan_unregister_root_region(base, size);
+#endif
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ return true;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for MacOS goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <dlfcn.h>
+#include <mach/mach_init.h>
+#include <mach-o/dyld.h>
+#include <mach-o/getsect.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <AvailabilityMacros.h>
+
+#include <errno.h>
+#include <libkern/OSAtomic.h>
+#include <mach/mach.h>
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <mach/vm_statistics.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(OS::GetRandomMmapAddr(),
+ msize,
+ prot,
+ MAP_PRIVATE | MAP_ANON,
+ kMmapFd,
+ kMmapFdOffset);
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) OS::Free(memory_, size_);
+ fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ unsigned int images_count = _dyld_image_count();
+ for (unsigned int i = 0; i < images_count; ++i) {
+ const mach_header* header = _dyld_get_image_header(i);
+ if (header == NULL) continue;
+#if V8_HOST_ARCH_X64
+ uint64_t size;
+ char* code_ptr = getsectdatafromheader_64(
+ reinterpret_cast<const mach_header_64*>(header),
+ SEG_TEXT,
+ SECT_TEXT,
+ &size);
+#else
+ unsigned int size;
+ char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+#endif
+ if (code_ptr == NULL) continue;
+ const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
+ const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
+ result.push_back(
+ SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
+ }
+ return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* address,
+ size_t size,
+ bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+ return mmap(address,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+ return munmap(address, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ return false;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for OpenBSD and NetBSD goes here. For the
+// POSIX-compatible parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h> // open
+#include <stdarg.h>
+#include <strings.h> // index
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <sys/types.h> // mmap & munmap
+#include <unistd.h> // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = OS::GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) OS::Free(memory_, size_);
+ fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE* fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) return result;
+
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+ // This loop will terminate once the scanning hits an EOF.
+ while (true) {
+ uintptr_t start, end;
+ char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+ int c;
+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+ // Found a read-only executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/'));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
+ if (c == '/') {
+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' character.
+ lib_name[strlen(lib_name) - 1] = '\0';
+ } else {
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ }
+ result.push_back(SharedLibraryAddress(lib_name, start, end));
+ } else {
+ // Entry not describing executable data. Skip to end of line to set up
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
+ }
+ }
+ free(lib_name);
+ fclose(fp);
+ return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+ // Support for ll_prof.py.
+ //
+ // The Linux profiler built into the kernel logs all mmap's with
+ // PROT_EXEC so that analysis tools can properly attribute ticks. We
+ // do a mmap with a name known by ll_prof.py and immediately munmap
+ // it. This injects a GC marker into the stream of events generated
+ // by the kernel and allows us to synchronize V8 code log and the
+ // kernel log.
+ int size = sysconf(_SC_PAGESIZE);
+ FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
+ if (f == NULL) {
+ OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
+ OS::Abort();
+ }
+ void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+ fileno(f), 0);
+ ASSERT(addr != MAP_FAILED);
+ OS::Free(addr, size);
+ fclose(f);
+}
+
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for POSIX goes here. This is not a platform on its
+// own, but contains the parts which are the same across the POSIX platforms
+// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#include <pthread_np.h> // for pthread_set_name_np
+#endif
+#include <sched.h> // for sched_yield
+#include <time.h>
+#include <unistd.h>
+
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#if defined(__linux__)
+#include <sys/prctl.h> // NOLINT, for prctl
+#endif
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/sysctl.h> // NOLINT, for sysctl
+#endif
+
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <netinet/in.h>
+
+#undef MAP_TYPE
+
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#define LOG_TAG "v8"
+#include <android/log.h> // NOLINT
+#endif
+
+#include <cmath>
+#include <cstdlib>
+
+#include "src/base/lazy-instance.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/utils/random-number-generator.h"
+
+#ifdef V8_FAST_TLS_SUPPORTED
+#include "src/base/atomicops.h"
+#endif
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+// 0 is never a valid thread id.
+const pthread_t kNoThread = (pthread_t) 0;
+
+bool g_hard_abort = false;
+
+const char* g_gc_fake_mmap = NULL;
+
+} // namespace
+
+
+int OS::NumberOfProcessorsOnline() {
+ return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
+}
+
+
+// Maximum size of the virtual memory. 0 means there is no artificial
+// limit.
+
+intptr_t OS::MaxVirtualMemory() {
+ struct rlimit limit;
+ int result = getrlimit(RLIMIT_DATA, &limit);
+ if (result != 0) return 0;
+#if V8_OS_NACL
+ // The NaCl compiler doesn't like resource.h constants.
+ if (static_cast<int>(limit.rlim_cur) == -1) return 0;
+#else
+ if (limit.rlim_cur == RLIM_INFINITY) return 0;
+#endif
+ return limit.rlim_cur;
+}
+
+
+uint64_t OS::TotalPhysicalMemory() {
+#if V8_OS_MACOSX
+ int mib[2];
+ mib[0] = CTL_HW;
+ mib[1] = HW_MEMSIZE;
+ int64_t size = 0;
+ size_t len = sizeof(size);
+ if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(size);
+#elif V8_OS_FREEBSD
+ int pages, page_size;
+ size_t size = sizeof(pages);
+ sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
+ sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(pages) * page_size;
+#elif V8_OS_CYGWIN
+ MEMORYSTATUS memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatus(&memory_info)) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(memory_info.dwTotalPhys);
+#elif V8_OS_QNX
+ struct stat stat_buf;
+ if (stat("/proc", &stat_buf) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(stat_buf.st_size);
+#else
+ intptr_t pages = sysconf(_SC_PHYS_PAGES);
+ intptr_t page_size = sysconf(_SC_PAGESIZE);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(pages) * page_size;
+#endif
+}
+
+
+int OS::ActivationFrameAlignment() {
+#if V8_TARGET_ARCH_ARM
+ // On EABI ARM targets this is required for fp correctness in the
+ // runtime system.
+ return 8;
+#elif V8_TARGET_ARCH_MIPS
+ return 8;
+#else
+ // Otherwise we just assume 16 byte alignment, i.e.:
+ // - With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
+ // see "Mac OS X ABI Function Call Guide"
+ return 16;
+#endif
+}
+
+
+intptr_t OS::CommitPageSize() {
+ static intptr_t page_size = getpagesize();
+ return page_size;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): munmap has a return value which is ignored here.
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+// Get rid of writable permission on code allocations.
+void OS::ProtectCode(void* address, const size_t size) {
+#if V8_OS_CYGWIN
+ DWORD old_protect;
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+#elif V8_OS_NACL
+ // The Native Client port of V8 uses an interpreter, so
+ // code pages don't need PROT_EXEC.
+ mprotect(address, size, PROT_READ);
+#else
+ mprotect(address, size, PROT_READ | PROT_EXEC);
+#endif
+}
+
+
+// Create guard pages.
+void OS::Guard(void* address, const size_t size) {
+#if V8_OS_CYGWIN
+ DWORD oldprotect;
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+#else
+ mprotect(address, size, PROT_NONE);
+#endif
+}
+
+
+static LazyInstance<RandomNumberGenerator>::type
+ platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+ const char* const gc_fake_mmap) {
+ if (random_seed) {
+ platform_random_number_generator.Pointer()->SetSeed(random_seed);
+ }
+ g_hard_abort = hard_abort;
+ g_gc_fake_mmap = gc_fake_mmap;
+}
+
+
+const char* OS::GetGCFakeMMapFile() {
+ return g_gc_fake_mmap;
+}
+
+
+void* OS::GetRandomMmapAddr() {
+#if V8_OS_NACL
+ // TODO(bradchen): restore randomization once Native Client gets
+ // smarter about using mmap address hints.
+ // See http://code.google.com/p/nativeclient/issues/3341
+ return NULL;
+#endif
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER)
+ // Dynamic tools do not support custom mmap addresses.
+ return NULL;
+#endif
+ uintptr_t raw_addr;
+ platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
+ sizeof(raw_addr));
+#if V8_TARGET_ARCH_X64
+ // Currently available CPUs have 48 bits of virtual addressing. Truncate
+ // the hint address to 46 bits to give the kernel a fighting chance of
+ // fulfilling our placement request.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+ raw_addr &= 0x3ffff000;
+
+# ifdef __sun
+ // For our Solaris/illumos mmap hint, we pick a random address in the bottom
+ // half of the top half of the address space (that is, the third quarter).
+ // Because we do not MAP_FIXED, this will be treated only as a hint -- the
+ // system will not fail to mmap() because something else happens to already
+ // be mapped at our random address. We deliberately set the hint high enough
+ // to get well above the system's break (that is, the heap); Solaris and
+ // illumos will try the hint and if that fails allocate as if there were
+ // no hint at all. The high hint prevents the break from getting hemmed in
+ // at low values, ceding half of the address space to the system heap.
+ raw_addr += 0x80000000;
+# else
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+ // 10.6 and 10.7.
+ raw_addr += 0x20000000;
+# endif
+#endif
+ return reinterpret_cast<void*>(raw_addr);
+}
+
+
+size_t OS::AllocateAlignment() {
+ return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+}
+
+
+void OS::Sleep(int milliseconds) {
+ useconds_t ms = static_cast<useconds_t>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+ if (g_hard_abort) {
+ V8_IMMEDIATE_CRASH();
+ }
+ // Redirect to std abort to signal abnormal program termination.
+ abort();
+}
+
+
+void OS::DebugBreak() {
+#if V8_HOST_ARCH_ARM
+ asm("bkpt 0");
+#elif V8_HOST_ARCH_ARM64
+ asm("brk 0");
+#elif V8_HOST_ARCH_MIPS
+ asm("break");
+#elif V8_HOST_ARCH_IA32
+#if defined(__native_client__)
+ asm("hlt");
+#else
+ asm("int $3");
+#endif // __native_client__
+#elif V8_HOST_ARCH_X64
+ asm("int $3");
+#else
+#error Unsupported host architecture.
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+// Math functions
+
+double OS::nan_value() {
+ // NAN from math.h is defined in C99 and not in POSIX.
+ return NAN;
+}
+
+
+int OS::GetCurrentProcessId() {
+ return static_cast<int>(getpid());
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX date/time support.
+//
+
+int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
+ struct rusage usage;
+
+ if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
+ *secs = usage.ru_utime.tv_sec;
+ *usecs = usage.ru_utime.tv_usec;
+ return 0;
+}
+
+
+double OS::TimeCurrentMillis() {
+ return Time::Now().ToJsTime();
+}
+
+
+class TimezoneCache {};
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return NULL;
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+ ASSERT(cache == NULL);
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+ ASSERT(cache == NULL);
+}
+
+
+double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
+ if (std::isnan(time)) return nan_value();
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return nan_value();
+ return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
+}
+
+
+int OS::GetLastError() {
+ return errno;
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX stdio support.
+//
+
+FILE* OS::FOpen(const char* path, const char* mode) {
+ FILE* file = fopen(path, mode);
+ if (file == NULL) return NULL;
+ struct stat file_stat;
+ if (fstat(fileno(file), &file_stat) != 0) return NULL;
+ bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
+ if (is_regular_file) return file;
+ fclose(file);
+ return NULL;
+}
+
+
+bool OS::Remove(const char* path) {
+ return (remove(path) == 0);
+}
+
+
+FILE* OS::OpenTemporaryFile() {
+ return tmpfile();
+}
+
+
+const char* const OS::LogFileOpenMode = "w";
+
+
+void OS::Print(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrint(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
+ vprintf(format, args);
+#endif
+}
+
+
+void OS::FPrint(FILE* out, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VFPrint(out, format, args);
+ va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
+ vfprintf(out, format, args);
+#endif
+}
+
+
+void OS::PrintError(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrintError(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+ __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+#else
+ vfprintf(stderr, format, args);
+#endif
+}
+
+
+int OS::SNPrintF(char* str, int length, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = VSNPrintF(str, length, format, args);
+ va_end(args);
+ return result;
+}
+
+
+int OS::VSNPrintF(char* str,
+ int length,
+ const char* format,
+ va_list args) {
+ int n = vsnprintf(str, length, format, args);
+ if (n < 0 || n >= length) {
+ // If the length is zero, the assignment fails.
+ if (length > 0)
+ str[length - 1] = '\0';
+ return -1;
+ } else {
+ return n;
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX string support.
+//
+
+char* OS::StrChr(char* str, int c) {
+ return strchr(str, c);
+}
+
+
+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
+ strncpy(dest, src, n);
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX thread support.
+//
+
+class Thread::PlatformData {
+ public:
+ PlatformData() : thread_(kNoThread) {}
+ pthread_t thread_; // Thread handle for pthread.
+ // Synchronizes thread creation
+ Mutex thread_creation_mutex_;
+};
+
+Thread::Thread(const Options& options)
+ : data_(new PlatformData),
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
+ if (stack_size_ > 0 && stack_size_ < PTHREAD_STACK_MIN) {
+ stack_size_ = PTHREAD_STACK_MIN;
+ }
+ set_name(options.name());
+}
+
+
+Thread::~Thread() {
+ delete data_;
+}
+
+
+static void SetThreadName(const char* name) {
+#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
+ pthread_set_name_np(pthread_self(), name);
+#elif V8_OS_NETBSD
+ STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
+ pthread_setname_np(pthread_self(), "%s", name);
+#elif V8_OS_MACOSX
+ // pthread_setname_np is only available in 10.6 or later, so test
+ // for it at runtime.
+ int (*dynamic_pthread_setname_np)(const char*);
+ *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
+ dlsym(RTLD_DEFAULT, "pthread_setname_np");
+ if (dynamic_pthread_setname_np == NULL)
+ return;
+
+ // Mac OS X does not expose the length limit of the name, so hardcode it.
+ static const int kMaxNameLength = 63;
+ STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
+ dynamic_pthread_setname_np(name);
+#elif defined(PR_SET_NAME)
+ prctl(PR_SET_NAME,
+ reinterpret_cast<unsigned long>(name), // NOLINT
+ 0, 0, 0);
+#endif
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // We take the lock here to make sure that pthread_create finished first since
+ // we don't know which thread will run first (the original thread or the new
+ // one).
+ { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
+ SetThreadName(thread->name());
+ ASSERT(thread->data()->thread_ != kNoThread);
+ thread->NotifyStartedAndRun();
+ return NULL;
+}
+
+
+void Thread::set_name(const char* name) {
+ strncpy(name_, name, sizeof(name_));
+ name_[sizeof(name_) - 1] = '\0';
+}
+
+
+void Thread::Start() {
+ int result;
+ pthread_attr_t attr;
+ memset(&attr, 0, sizeof(attr));
+ result = pthread_attr_init(&attr);
+ ASSERT_EQ(0, result);
+ // Native client uses default stack size.
+#if !V8_OS_NACL
+ if (stack_size_ > 0) {
+ result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+ ASSERT_EQ(0, result);
+ }
+#endif
+ {
+ LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
+ result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ }
+ ASSERT_EQ(0, result);
+ result = pthread_attr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ ASSERT(data_->thread_ != kNoThread);
+ USE(result);
+}
+
+
+void Thread::Join() {
+ pthread_join(data_->thread_, NULL);
+}
+
+
+void Thread::YieldCPU() {
+ int result = sched_yield();
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
+#if V8_OS_CYGWIN
+ // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
+ // because pthread_key_t is a pointer type on Cygwin. This will probably not
+ // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+ intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
+ return static_cast<Thread::LocalStorageKey>(ptr_key);
+#else
+ return static_cast<Thread::LocalStorageKey>(pthread_key);
+#endif
+}
+
+
+static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
+#if V8_OS_CYGWIN
+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+ intptr_t ptr_key = static_cast<intptr_t>(local_key);
+ return reinterpret_cast<pthread_key_t>(ptr_key);
+#else
+ return static_cast<pthread_key_t>(local_key);
+#endif
+}
+
+
+#ifdef V8_FAST_TLS_SUPPORTED
+
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
+
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+ const size_t kBufferSize = 128;
+ char buffer[kBufferSize];
+ size_t buffer_size = kBufferSize;
+ int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+ if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+ V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ }
+ // The buffer now contains a string of the form XX.YY.ZZ, where
+ // XX is the major kernel version component.
+ // Make sure the buffer is 0-terminated.
+ buffer[kBufferSize - 1] = '\0';
+ char* period_pos = strchr(buffer, '.');
+ *period_pos = '\0';
+ int kernel_version_major =
+ static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
+ // The constants below are taken from pthreads.s from the XNU kernel
+ // sources archive at www.opensource.apple.com.
+ if (kernel_version_major < 11) {
+ // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+ // same offsets.
+#if V8_HOST_ARCH_IA32
+ kMacTlsBaseOffset = 0x48;
+#else
+ kMacTlsBaseOffset = 0x60;
+#endif
+ } else {
+ // 11.x.x (Lion) changed the offset.
+ kMacTlsBaseOffset = 0;
+ }
+
+ Release_Store(&tls_base_offset_initialized, 1);
+}
+
+
+static void CheckFastTls(Thread::LocalStorageKey key) {
+ void* expected = reinterpret_cast<void*>(0x1234CAFE);
+ Thread::SetThreadLocal(key, expected);
+ void* actual = Thread::GetExistingThreadLocal(key);
+ if (expected != actual) {
+ V8_Fatal(__FILE__, __LINE__,
+ "V8 failed to initialize fast TLS on current kernel");
+ }
+ Thread::SetThreadLocal(key, NULL);
+}
+
+#endif // V8_FAST_TLS_SUPPORTED
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+ bool check_fast_tls = false;
+ if (tls_base_offset_initialized == 0) {
+ check_fast_tls = true;
+ InitializeTlsBaseOffset();
+ }
+#endif
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ LocalStorageKey local_key = PthreadKeyToLocalKey(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+ // If we just initialized fast TLS support, make sure it works.
+ if (check_fast_tls) CheckFastTls(local_key);
+#endif
+ return local_key;
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ int result = pthread_key_delete(pthread_key);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+ int result = pthread_setspecific(pthread_key, value);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for QNX goes here. For the POSIX-compatible
+// parts the implementation is in platform-posix.cc.
+
+#include <backtrace.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <ucontext.h>
+
+// QNX requires memory pages to be marked as executable.
+// Otherwise, the OS raises an exception when executing code in that page.
+#include <errno.h>
+#include <fcntl.h> // open
+#include <stdarg.h>
+#include <strings.h> // index
+#include <sys/mman.h> // mmap & munmap
+#include <sys/procfs.h>
+#include <sys/stat.h> // open
+#include <sys/types.h> // mmap & munmap
+#include <unistd.h> // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+// 0 is never a valid thread id on Qnx since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+#ifdef __arm__
+
+bool OS::ArmUsingHardFloat() {
+ // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+ // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+ // We use these as well as a couple of other defines to statically determine
+ // what FP ABI used.
+ // GCC versions 4.4 and below don't support hard-fp.
+ // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+ // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+ return true;
+#else
+ return false;
+#endif
+
+#elif GCC_VERSION < 40500
+ return false;
+
+#else
+#if defined(__ARM_PCS_VFP)
+ return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+ !defined(__VFP_FP__)
+ return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for." \
+ "Please report it on this issue" \
+ "http://code.google.com/p/v8/issues/detail?id=2140"
+
+#endif
+#endif
+#undef GCC_VERSION
+}
+
+#endif // __arm__
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = OS::GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fileno(file),
+ 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) OS::Free(memory_, size_);
+ fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ std::vector<SharedLibraryAddress> result;
+ procfs_mapinfo *mapinfos = NULL, *mapinfo;
+ int proc_fd, num, i;
+
+ struct {
+ procfs_debuginfo info;
+ char buff[PATH_MAX];
+ } map;
+
+ char buf[PATH_MAX + 1];
+ snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
+
+ if ((proc_fd = open(buf, O_RDONLY)) == -1) {
+ close(proc_fd);
+ return result;
+ }
+
+ /* Get the number of map entries. */
+ if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
+ close(proc_fd);
+ return result;
+ }
+
+ mapinfos = reinterpret_cast<procfs_mapinfo *>(
+ malloc(num * sizeof(procfs_mapinfo)));
+ if (mapinfos == NULL) {
+ close(proc_fd);
+ return result;
+ }
+
+ /* Fill the map entries. */
+ if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
+ mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
+ free(mapinfos);
+ close(proc_fd);
+ return result;
+ }
+
+ for (i = 0; i < num; i++) {
+ mapinfo = mapinfos + i;
+ if (mapinfo->flags & MAP_ELF) {
+ map.info.vaddr = mapinfo->vaddr;
+ if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
+ continue;
+ }
+ result.push_back(SharedLibraryAddress(
+ map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
+ }
+ }
+ free(mapinfos);
+ close(proc_fd);
+ return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ return false;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#ifdef __sparc
+# error "V8 does not support the SPARC CPU architecture."
+#endif
+
+#include <dlfcn.h> // dladdr
+#include <errno.h>
+#include <ieeefp.h> // finite()
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h> // sigemptyset(), etc
+#include <sys/mman.h> // mmap()
+#include <sys/regset.h>
+#include <sys/stack.h> // for stack alignment
+#include <sys/time.h> // gettimeofday(), timeradd()
+#include <time.h>
+#include <ucontext.h> // walkstack(), getcontext()
+#include <unistd.h> // getpagesize(), usleep()
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+// It seems there is a bug in some Solaris distributions (experienced in
+// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
+// access signbit() despite the availability of other C99 math functions.
+#ifndef signbit
+namespace std {
+// Test sign - usually defined in math.h
+int signbit(double x) {
+ // We need to take care of the special case of both positive and negative
+ // versions of zero.
+ if (x == 0) {
+ return fpclass(x) & FP_NZERO;
+ } else {
+ // This won't detect negative NaN but that should be okay since we don't
+ // assume that behavior.
+ return x < 0;
+ }
+}
+} // namespace std
+#endif // signbit
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ if (std::isnan(time)) return "";
+ time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return tzname[0]; // The location of the timezone string on Solaris.
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ tzset();
+ return -static_cast<double>(timezone * msPerSecond);
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "r+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ return std::vector<SharedLibraryAddress>();
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ uint8_t* base = static_cast<uint8_t*>(reservation);
+ uint8_t* aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(OS::GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Win32.
+
+// Secure API functions are not available using MinGW with msvcrt.dll
+// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
+// disable definition of secure API functions in standard headers that
+// would conflict with our own implementation.
+#ifdef __MINGW32__
+#include <_mingw.h>
+#ifdef MINGW_HAS_SECURE_API
+#undef MINGW_HAS_SECURE_API
+#endif // MINGW_HAS_SECURE_API
+#endif // __MINGW32__
+
+#ifdef _MSC_VER
+#include <limits>
+#endif
+
+#include "src/base/win32-headers.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/utils/random-number-generator.h"
+
+#ifdef _MSC_VER
+
+// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
+// defined in strings.h.
+int strncasecmp(const char* s1, const char* s2, int n) {
+ return _strnicmp(s1, s2, n);
+}
+
+#endif // _MSC_VER
+
+
+// Extra functions for MinGW. Most of these are the _s functions which are in
+// the Microsoft Visual Studio C++ CRT.
+#ifdef __MINGW32__
+
+
+#ifndef __MINGW64_VERSION_MAJOR
+
+#define _TRUNCATE 0
+#define STRUNCATE 80
+
+inline void MemoryBarrier() {
+ int barrier = 0;
+ __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
+}
+
+#endif // __MINGW64_VERSION_MAJOR
+
+
+int localtime_s(tm* out_tm, const time_t* time) {
+ tm* posix_local_time_struct = localtime(time);
+ if (posix_local_time_struct == NULL) return 1;
+ *out_tm = *posix_local_time_struct;
+ return 0;
+}
+
+
+int fopen_s(FILE** pFile, const char* filename, const char* mode) {
+ *pFile = fopen(filename, mode);
+ return *pFile != NULL ? 0 : 1;
+}
+
+int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
+ const char* format, va_list argptr) {
+ ASSERT(count == _TRUNCATE);
+ return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
+}
+
+
+int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
+ CHECK(source != NULL);
+ CHECK(dest != NULL);
+ CHECK_GT(dest_size, 0);
+
+ if (count == _TRUNCATE) {
+ while (dest_size > 0 && *source != 0) {
+ *(dest++) = *(source++);
+ --dest_size;
+ }
+ if (dest_size == 0) {
+ *(dest - 1) = 0;
+ return STRUNCATE;
+ }
+ } else {
+ while (dest_size > 0 && count > 0 && *source != 0) {
+ *(dest++) = *(source++);
+ --dest_size;
+ --count;
+ }
+ }
+ CHECK_GT(dest_size, 0);
+ *dest = 0;
+ return 0;
+}
+
+#endif // __MINGW32__
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+bool g_hard_abort = false;
+
+} // namespace
+
+intptr_t OS::MaxVirtualMemory() {
+ return 0;
+}
+
+
+class TimezoneCache {
+ public:
+ TimezoneCache() : initialized_(false) { }
+
+ void Clear() {
+ initialized_ = false;
+ }
+
+ // Initialize timezone information. The timezone information is obtained from
+ // windows. If we cannot get the timezone information we fall back to CET.
+ void InitializeIfNeeded() {
+ // Just return if timezone information has already been initialized.
+ if (initialized_) return;
+
+ // Initialize POSIX time zone data.
+ _tzset();
+ // Obtain timezone information from operating system.
+ memset(&tzinfo_, 0, sizeof(tzinfo_));
+ if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
+ // If we cannot get timezone information we fall back to CET.
+ tzinfo_.Bias = -60;
+ tzinfo_.StandardDate.wMonth = 10;
+ tzinfo_.StandardDate.wDay = 5;
+ tzinfo_.StandardDate.wHour = 3;
+ tzinfo_.StandardBias = 0;
+ tzinfo_.DaylightDate.wMonth = 3;
+ tzinfo_.DaylightDate.wDay = 5;
+ tzinfo_.DaylightDate.wHour = 2;
+ tzinfo_.DaylightBias = -60;
+ }
+
+ // Make standard and DST timezone names.
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+ std_tz_name_, kTzNameSize, NULL, NULL);
+ std_tz_name_[kTzNameSize - 1] = '\0';
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+ dst_tz_name_, kTzNameSize, NULL, NULL);
+ dst_tz_name_[kTzNameSize - 1] = '\0';
+
+ // If OS returned empty string or resource id (like "@tzres.dll,-211")
+ // simply guess the name from the UTC bias of the timezone.
+ // To properly resolve the resource identifier requires a library load,
+ // which is not possible in a sandbox.
+ if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
+ OS::SNPrintF(std_tz_name_, kTzNameSize - 1,
+ "%s Standard Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
+ OS::SNPrintF(dst_tz_name_, kTzNameSize - 1,
+ "%s Daylight Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ // Timezone information initialized.
+ initialized_ = true;
+ }
+
+ // Guess the name of the timezone from the bias.
+ // The guess is very biased towards the northern hemisphere.
+ const char* GuessTimezoneNameFromBias(int bias) {
+ static const int kHour = 60;
+ switch (-bias) {
+ case -9*kHour: return "Alaska";
+ case -8*kHour: return "Pacific";
+ case -7*kHour: return "Mountain";
+ case -6*kHour: return "Central";
+ case -5*kHour: return "Eastern";
+ case -4*kHour: return "Atlantic";
+ case 0*kHour: return "GMT";
+ case +1*kHour: return "Central Europe";
+ case +2*kHour: return "Eastern Europe";
+ case +3*kHour: return "Russia";
+ case +5*kHour + 30: return "India";
+ case +8*kHour: return "China";
+ case +9*kHour: return "Japan";
+ case +12*kHour: return "New Zealand";
+ default: return "Local";
+ }
+ }
+
+
+ private:
+ static const int kTzNameSize = 128;
+ bool initialized_;
+ char std_tz_name_[kTzNameSize];
+ char dst_tz_name_[kTzNameSize];
+ TIME_ZONE_INFORMATION tzinfo_;
+ friend class Win32Time;
+};
+
+
+// ----------------------------------------------------------------------------
+// The Time class represents time on win32. A timestamp is represented as
+// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
+// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
+// January 1, 1970.
+
+class Win32Time {
+ public:
+ // Constructors.
+ Win32Time();
+ explicit Win32Time(double jstime);
+ Win32Time(int year, int mon, int day, int hour, int min, int sec);
+
+ // Convert timestamp to JavaScript representation.
+ double ToJSTime();
+
+ // Set timestamp to current time.
+ void SetToCurrentTime();
+
+ // Returns the local timezone offset in milliseconds east of UTC. This is
+ // the number of milliseconds you must add to UTC to get local time, i.e.
+ // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
+ // routine also takes into account whether daylight saving is effect
+ // at the time.
+ int64_t LocalOffset(TimezoneCache* cache);
+
+ // Returns the daylight savings time offset for the time in milliseconds.
+ int64_t DaylightSavingsOffset(TimezoneCache* cache);
+
+ // Returns a string identifying the current timezone for the
+ // timestamp taking into account daylight saving.
+ char* LocalTimezone(TimezoneCache* cache);
+
+ private:
+ // Constants for time conversion.
+ static const int64_t kTimeEpoc = 116444736000000000LL;
+ static const int64_t kTimeScaler = 10000;
+ static const int64_t kMsPerMinute = 60000;
+
+ // Constants for timezone information.
+ static const bool kShortTzNames = false;
+
+ // Return whether or not daylight savings time is in effect at this time.
+ bool InDST(TimezoneCache* cache);
+
+ // Accessor for FILETIME representation.
+ FILETIME& ft() { return time_.ft_; }
+
+ // Accessor for integer representation.
+ int64_t& t() { return time_.t_; }
+
+ // Although win32 uses 64-bit integers for representing timestamps,
+ // these are packed into a FILETIME structure. The FILETIME structure
+ // is just a struct representing a 64-bit integer. The TimeStamp union
+ // allows access to both a FILETIME and an integer representation of
+ // the timestamp.
+ union TimeStamp {
+ FILETIME ft_;
+ int64_t t_;
+ };
+
+ TimeStamp time_;
+};
+
+
+// Initialize timestamp to start of epoc.
+Win32Time::Win32Time() {
+ t() = 0;
+}
+
+
+// Initialize timestamp from a JavaScript timestamp.
+Win32Time::Win32Time(double jstime) {
+ t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
+}
+
+
+// Initialize timestamp from date/time components.
+Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
+ SYSTEMTIME st;
+ st.wYear = year;
+ st.wMonth = mon;
+ st.wDay = day;
+ st.wHour = hour;
+ st.wMinute = min;
+ st.wSecond = sec;
+ st.wMilliseconds = 0;
+ SystemTimeToFileTime(&st, &ft());
+}
+
+
+// Convert timestamp to JavaScript timestamp.
+double Win32Time::ToJSTime() {
+ return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
+}
+
+
+// Set timestamp to current time.
+void Win32Time::SetToCurrentTime() {
+ // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+ // Because we're fast, we like fast timers which have at least a
+ // 1ms resolution.
+ //
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for v8 wants fast
+ // timers, it can use timeBeginPeriod to increase the resolution.
+ //
+ // Using timeGetTime() has a drawback because it is a 32bit value
+ // and hence rolls-over every ~49days.
+ //
+ // To use the clock, we use GetSystemTimeAsFileTime as our base;
+ // and then use timeGetTime to extrapolate current time from the
+ // start time. To deal with rollovers, we resync the clock
+ // any time when more than kMaxClockElapsedTime has passed or
+ // whenever timeGetTime creates a rollover.
+
+ static bool initialized = false;
+ static TimeStamp init_time;
+ static DWORD init_ticks;
+ static const int64_t kHundredNanosecondsPerSecond = 10000000;
+ static const int64_t kMaxClockElapsedTime =
+ 60*kHundredNanosecondsPerSecond; // 1 minute
+
+ // If we are uninitialized, we need to resync the clock.
+ bool needs_resync = !initialized;
+
+ // Get the current time.
+ TimeStamp time_now;
+ GetSystemTimeAsFileTime(&time_now.ft_);
+ DWORD ticks_now = timeGetTime();
+
+ // Check if we need to resync due to clock rollover.
+ needs_resync |= ticks_now < init_ticks;
+
+ // Check if we need to resync due to elapsed time.
+ needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+ // Check if we need to resync due to backwards time change.
+ needs_resync |= time_now.t_ < init_time.t_;
+
+ // Resync the clock if necessary.
+ if (needs_resync) {
+ GetSystemTimeAsFileTime(&init_time.ft_);
+ init_ticks = ticks_now = timeGetTime();
+ initialized = true;
+ }
+
+ // Finally, compute the actual time. Why is this so hard.
+ DWORD elapsed = ticks_now - init_ticks;
+ this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
+// Return the local timezone offset in milliseconds east of UTC. This
+// takes into account whether daylight saving is in effect at the time.
+// Only times in the 32-bit Unix range may be passed to this function.
+// Also, adding the time-zone offset to the input must not overflow.
+// The function EquivalentTime() in date.js guarantees this.
+int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
+ cache->InitializeIfNeeded();
+
+ Win32Time rounded_to_second(*this);
+ rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
+ 1000 * kTimeScaler;
+ // Convert to local time using POSIX localtime function.
+ // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
+ // very slow. Other browsers use localtime().
+
+ // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
+ // POSIX seconds past 1/1/1970 0:00:00.
+ double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
+ if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
+ return 0;
+ }
+ // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
+ time_t posix_time = static_cast<time_t>(unchecked_posix_time);
+
+ // Convert to local time, as struct with fields for day, hour, year, etc.
+ tm posix_local_time_struct;
+ if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
+
+ if (posix_local_time_struct.tm_isdst > 0) {
+ return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
+ } else if (posix_local_time_struct.tm_isdst == 0) {
+ return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
+ } else {
+ return cache->tzinfo_.Bias * -kMsPerMinute;
+ }
+}
+
+
+// Return whether or not daylight savings time is in effect at this time.
+bool Win32Time::InDST(TimezoneCache* cache) {
+ cache->InitializeIfNeeded();
+
+ // Determine if DST is in effect at the specified time.
+ bool in_dst = false;
+ if (cache->tzinfo_.StandardDate.wMonth != 0 ||
+ cache->tzinfo_.DaylightDate.wMonth != 0) {
+ // Get the local timezone offset for the timestamp in milliseconds.
+ int64_t offset = LocalOffset(cache);
+
+ // Compute the offset for DST. The bias parameters in the timezone info
+ // are specified in minutes. These must be converted to milliseconds.
+ int64_t dstofs =
+ -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
+
+ // If the local time offset equals the timezone bias plus the daylight
+ // bias then DST is in effect.
+ in_dst = offset == dstofs;
+ }
+
+ return in_dst;
+}
+
+
+// Return the daylight savings time offset for this time.
+int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) {
+ return InDST(cache) ? 60 * kMsPerMinute : 0;
+}
+
+
+// Returns a string identifying the current timezone for the
+// timestamp taking into account daylight saving.
+char* Win32Time::LocalTimezone(TimezoneCache* cache) {
+ // Return the standard or DST time zone name based on whether daylight
+ // saving is in effect at the given time.
+ return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
+}
+
+
+// Returns the accumulated user time for thread.
+int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
+ FILETIME dummy;
+ uint64_t usertime;
+
+ // Get the amount of time that the thread has executed in user mode.
+ if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
+ reinterpret_cast<FILETIME*>(&usertime))) return -1;
+
+ // Adjust the resolution to micro-seconds.
+ usertime /= 10;
+
+ // Convert to seconds and microseconds
+ *secs = static_cast<uint32_t>(usertime / 1000000);
+ *usecs = static_cast<uint32_t>(usertime % 1000000);
+ return 0;
+}
+
+
+// Returns current time as the number of milliseconds since
+// 00:00:00 UTC, January 1, 1970.
+double OS::TimeCurrentMillis() {
+ return Time::Now().ToJsTime();
+}
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new TimezoneCache();
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+ delete cache;
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+ cache->Clear();
+}
+
+
+// Returns a string identifying the current timezone taking into
+// account daylight saving.
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ return Win32Time(time).LocalTimezone(cache);
+}
+
+
+// Returns the local time offset in milliseconds east of UTC without
+// taking daylight savings time into account.
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+ // Use current time, rounded to the millisecond.
+ Win32Time t(TimeCurrentMillis());
+ // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
+ return static_cast<double>(t.LocalOffset(cache) -
+ t.DaylightSavingsOffset(cache));
+}
+
+
+// Returns the daylight savings offset in milliseconds for the given
+// time.
+double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) {
+ int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
+ return static_cast<double>(offset);
+}
+
+
+int OS::GetLastError() {
+ return ::GetLastError();
+}
+
+
+int OS::GetCurrentProcessId() {
+ return static_cast<int>(::GetCurrentProcessId());
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 console output.
+//
+// If a Win32 application is linked as a console application it has a normal
+// standard output and standard error. In this case normal printf works fine
+// for output. However, if the application is linked as a GUI application,
+// the process doesn't have a console, and therefore (debugging) output is lost.
+// This is the case if we are embedded in a windows program (like a browser).
+// In order to be able to get debug output in this case the the debugging
+// facility using OutputDebugString. This output goes to the active debugger
+// for the process (if any). Else the output can be monitored using DBMON.EXE.
+
+enum OutputMode {
+ UNKNOWN, // Output method has not yet been determined.
+ CONSOLE, // Output is written to stdout.
+ ODS // Output is written to debug facility.
+};
+
+static OutputMode output_mode = UNKNOWN; // Current output mode.
+
+
+// Determine if the process has a console for output.
+static bool HasConsole() {
+ // Only check the first time. Eventual race conditions are not a problem,
+ // because all threads will eventually determine the same mode.
+ if (output_mode == UNKNOWN) {
+ // We cannot just check that the standard output is attached to a console
+ // because this would fail if output is redirected to a file. Therefore we
+ // say that a process does not have an output console if either the
+ // standard output handle is invalid or its file type is unknown.
+ if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
+ GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
+ output_mode = CONSOLE;
+ else
+ output_mode = ODS;
+ }
+ return output_mode == CONSOLE;
+}
+
+
+static void VPrintHelper(FILE* stream, const char* format, va_list args) {
+ if ((stream == stdout || stream == stderr) && !HasConsole()) {
+ // It is important to use safe print here in order to avoid
+ // overflowing the buffer. We might truncate the output, but this
+ // does not crash.
+ char buffer[4096];
+ OS::VSNPrintF(buffer, sizeof(buffer), format, args);
+ OutputDebugStringA(buffer);
+ } else {
+ vfprintf(stream, format, args);
+ }
+}
+
+
+FILE* OS::FOpen(const char* path, const char* mode) {
+ FILE* result;
+ if (fopen_s(&result, path, mode) == 0) {
+ return result;
+ } else {
+ return NULL;
+ }
+}
+
+
+bool OS::Remove(const char* path) {
+ return (DeleteFileA(path) != 0);
+}
+
+
+FILE* OS::OpenTemporaryFile() {
+ // tmpfile_s tries to use the root dir, don't use it.
+ char tempPathBuffer[MAX_PATH];
+ DWORD path_result = 0;
+ path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
+ if (path_result > MAX_PATH || path_result == 0) return NULL;
+ UINT name_result = 0;
+ char tempNameBuffer[MAX_PATH];
+ name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
+ if (name_result == 0) return NULL;
+ FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses.
+ if (result != NULL) {
+ Remove(tempNameBuffer); // Delete on close.
+ }
+ return result;
+}
+
+
+// Open log file in binary mode to avoid /n -> /r/n conversion.
+const char* const OS::LogFileOpenMode = "wb";
+
+
+// Print (debug) message to console.
+void OS::Print(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrint(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+ VPrintHelper(stdout, format, args);
+}
+
+
+void OS::FPrint(FILE* out, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VFPrint(out, format, args);
+ va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+ VPrintHelper(out, format, args);
+}
+
+
+// Print error message to console.
+void OS::PrintError(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ VPrintError(format, args);
+ va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+ VPrintHelper(stderr, format, args);
+}
+
+
+int OS::SNPrintF(char* str, int length, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = VSNPrintF(str, length, format, args);
+ va_end(args);
+ return result;
+}
+
+
+int OS::VSNPrintF(char* str, int length, const char* format, va_list args) {
+ int n = _vsnprintf_s(str, length, _TRUNCATE, format, args);
+ // Make sure to zero-terminate the string if the output was
+ // truncated or if there was an error.
+ if (n < 0 || n >= length) {
+ if (length > 0)
+ str[length - 1] = '\0';
+ return -1;
+ } else {
+ return n;
+ }
+}
+
+
+char* OS::StrChr(char* str, int c) {
+ return const_cast<char*>(strchr(str, c));
+}
+
+
+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
+ // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
+ size_t buffer_size = static_cast<size_t>(length);
+ if (n + 1 > buffer_size) // count for trailing '\0'
+ n = _TRUNCATE;
+ int result = strncpy_s(dest, length, src, n);
+ USE(result);
+ ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
+}
+
+
+#undef _TRUNCATE
+#undef STRUNCATE
+
+
+// Get the system's page size used by VirtualAlloc() or the next power
+// of two. The reason for always returning a power of two is that the
+// rounding up in OS::Allocate expects that.
+static size_t GetPageSize() {
+ static size_t page_size = 0;
+ if (page_size == 0) {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ page_size = RoundUpToPowerOf2(info.dwPageSize);
+ }
+ return page_size;
+}
+
+
+// The allocation alignment is the guaranteed alignment for
+// VirtualAlloc'ed blocks of memory.
+size_t OS::AllocateAlignment() {
+ static size_t allocate_alignment = 0;
+ if (allocate_alignment == 0) {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ allocate_alignment = info.dwAllocationGranularity;
+ }
+ return allocate_alignment;
+}
+
+
+static LazyInstance<RandomNumberGenerator>::type
+ platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+ const char* const gc_fake_mmap) {
+ if (random_seed) {
+ platform_random_number_generator.Pointer()->SetSeed(random_seed);
+ }
+ g_hard_abort = hard_abort;
+}
+
+
+void* OS::GetRandomMmapAddr() {
+ // The address range used to randomize RWX allocations in OS::Allocate
+ // Try not to map pages into the default range that windows loads DLLs
+ // Use a multiple of 64k to prevent committing unused memory.
+ // Note: This does not guarantee RWX regions will be within the
+ // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+ static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+ static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+ uintptr_t address =
+ (platform_random_number_generator.Pointer()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
+ address &= kAllocationRandomAddressMax;
+ return reinterpret_cast<void *>(address);
+}
+
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+ LPVOID base = NULL;
+
+ if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+ // For exectutable pages try and randomize the allocation address
+ for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+ base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
+ }
+ }
+
+ // After three attempts give up and let the OS find an address to use.
+ if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+ return base;
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ // VirtualAlloc rounds allocated size to page size automatically.
+ size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
+
+ // Windows XP SP2 allows Data Excution Prevention (DEP).
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+
+ LPVOID mbase = RandomizedVirtualAlloc(msize,
+ MEM_COMMIT | MEM_RESERVE,
+ prot);
+
+ if (mbase == NULL) return NULL;
+
+ ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
+
+ *allocated = msize;
+ return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): VirtualFree has a return value which is ignored here.
+ VirtualFree(address, 0, MEM_RELEASE);
+ USE(size);
+}
+
+
+intptr_t OS::CommitPageSize() {
+ return 4096;
+}
+
+
+void OS::ProtectCode(void* address, const size_t size) {
+ DWORD old_protect;
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+}
+
+
+void OS::Guard(void* address, const size_t size) {
+ DWORD oldprotect;
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+}
+
+
+void OS::Sleep(int milliseconds) {
+ ::Sleep(milliseconds);
+}
+
+
+void OS::Abort() {
+ if (g_hard_abort) {
+ V8_IMMEDIATE_CRASH();
+ }
+ // Make the MSVCRT do a silent abort.
+ raise(SIGABRT);
+}
+
+
+void OS::DebugBreak() {
+#ifdef _MSC_VER
+ // To avoid Visual Studio runtime support the following code can be used
+ // instead
+ // __asm { int 3 }
+ __debugbreak();
+#else
+ ::DebugBreak();
+#endif
+}
+
+
+class Win32MemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ Win32MemoryMappedFile(HANDLE file,
+ HANDLE file_mapping,
+ void* memory,
+ int size)
+ : file_(file),
+ file_mapping_(file_mapping),
+ memory_(memory),
+ size_(size) { }
+ virtual ~Win32MemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
+ private:
+ HANDLE file_;
+ HANDLE file_mapping_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ // Open a physical file
+ HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
+ if (file == INVALID_HANDLE_VALUE) return NULL;
+
+ int size = static_cast<int>(GetFileSize(file, NULL));
+
+ // Create a file mapping for the physical file
+ HANDLE file_mapping = CreateFileMapping(file, NULL,
+ PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+ if (file_mapping == NULL) return NULL;
+
+ // Map a view of the file into memory
+ void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+ return new Win32MemoryMappedFile(file, file_mapping, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ // Open a physical file
+ HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
+ if (file == NULL) return NULL;
+ // Create a file mapping for the physical file
+ HANDLE file_mapping = CreateFileMapping(file, NULL,
+ PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+ if (file_mapping == NULL) return NULL;
+ // Map a view of the file into memory
+ void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+ if (memory) memmove(memory, initial, size);
+ return new Win32MemoryMappedFile(file, file_mapping, memory, size);
+}
+
+
+Win32MemoryMappedFile::~Win32MemoryMappedFile() {
+ if (memory_ != NULL)
+ UnmapViewOfFile(memory_);
+ CloseHandle(file_mapping_);
+ CloseHandle(file_);
+}
+
+
+// The following code loads functions defined in DbhHelp.h and TlHelp32.h
+// dynamically. This is to avoid being depending on dbghelp.dll and
+// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
+// kernel32.dll at some point so loading functions defines in TlHelp32.h
+// dynamically might not be necessary any more - for some versions of Windows?).
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define DBGHELP_FUNCTION_LIST(V) \
+ V(SymInitialize) \
+ V(SymGetOptions) \
+ V(SymSetOptions) \
+ V(SymGetSearchPath) \
+ V(SymLoadModule64) \
+ V(StackWalk64) \
+ V(SymGetSymFromAddr64) \
+ V(SymGetLineFromAddr64) \
+ V(SymFunctionTableAccess64) \
+ V(SymGetModuleBase64)
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define TLHELP32_FUNCTION_LIST(V) \
+ V(CreateToolhelp32Snapshot) \
+ V(Module32FirstW) \
+ V(Module32NextW)
+
+// Define the decoration to use for the type and variable name used for
+// dynamically loaded DLL function..
+#define DLL_FUNC_TYPE(name) _##name##_
+#define DLL_FUNC_VAR(name) _##name
+
+// Define the type for each dynamically loaded DLL function. The function
+// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
+// from the Windows include files are redefined here to have the function
+// definitions to be as close to the ones in the original .h files as possible.
+#ifndef IN
+#define IN
+#endif
+#ifndef VOID
+#define VOID void
+#endif
+
+// DbgHelp isn't supported on MinGW yet
+#ifndef __MINGW32__
+// DbgHelp.h functions.
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
+ IN PSTR UserSearchPath,
+ IN BOOL fInvadeProcess);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
+ IN HANDLE hProcess,
+ OUT PSTR SearchPath,
+ IN DWORD SearchPathLength);
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
+ IN HANDLE hProcess,
+ IN HANDLE hFile,
+ IN PSTR ImageName,
+ IN PSTR ModuleName,
+ IN DWORD64 BaseOfDll,
+ IN DWORD SizeOfDll);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
+ DWORD MachineType,
+ HANDLE hProcess,
+ HANDLE hThread,
+ LPSTACKFRAME64 StackFrame,
+ PVOID ContextRecord,
+ PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+ PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+ PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+ PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
+ IN HANDLE hProcess,
+ IN DWORD64 qwAddr,
+ OUT PDWORD64 pdwDisplacement,
+ OUT PIMAGEHLP_SYMBOL64 Symbol);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
+ IN HANDLE hProcess,
+ IN DWORD64 qwAddr,
+ OUT PDWORD pdwDisplacement,
+ OUT PIMAGEHLP_LINE64 Line64);
+// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
+typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
+ HANDLE hProcess,
+ DWORD64 AddrBase); // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
+ HANDLE hProcess,
+ DWORD64 AddrBase); // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
+
+// TlHelp32.h functions.
+typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
+ DWORD dwFlags,
+ DWORD th32ProcessID);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
+ LPMODULEENTRY32W lpme);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
+ LPMODULEENTRY32W lpme);
+
+#undef IN
+#undef VOID
+
+// Declare a variable for each dynamically loaded DLL function.
+#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
+DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
+TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
+#undef DEF_DLL_FUNCTION
+
+// Load the functions. This function has a lot of "ugly" macros in order to
+// keep down code duplication.
+
+static bool LoadDbgHelpAndTlHelp32() {
+ static bool dbghelp_loaded = false;
+
+ if (dbghelp_loaded) return true;
+
+ HMODULE module;
+
+ // Load functions from the dbghelp.dll module.
+ module = LoadLibrary(TEXT("dbghelp.dll"));
+ if (module == NULL) {
+ return false;
+ }
+
+#define LOAD_DLL_FUNC(name) \
+ DLL_FUNC_VAR(name) = \
+ reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+ // Load functions from the kernel32.dll module (the TlHelp32.h function used
+ // to be in tlhelp32.dll but are now moved to kernel32.dll).
+ module = LoadLibrary(TEXT("kernel32.dll"));
+ if (module == NULL) {
+ return false;
+ }
+
+#define LOAD_DLL_FUNC(name) \
+ DLL_FUNC_VAR(name) = \
+ reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+ // Check that all functions where loaded.
+ bool result =
+#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
+
+DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
+TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
+
+#undef DLL_FUNC_LOADED
+ true;
+
+ dbghelp_loaded = result;
+ return result;
+ // NOTE: The modules are never unloaded and will stay around until the
+ // application is closed.
+}
+
+#undef DBGHELP_FUNCTION_LIST
+#undef TLHELP32_FUNCTION_LIST
+#undef DLL_FUNC_VAR
+#undef DLL_FUNC_TYPE
+
+
+// Load the symbols for generating stack traces.
+static std::vector<OS::SharedLibraryAddress> LoadSymbols(
+ HANDLE process_handle) {
+ static std::vector<OS::SharedLibraryAddress> result;
+
+ static bool symbols_loaded = false;
+
+ if (symbols_loaded) return result;
+
+ BOOL ok;
+
+ // Initialize the symbol engine.
+ ok = _SymInitialize(process_handle, // hProcess
+ NULL, // UserSearchPath
+ false); // fInvadeProcess
+ if (!ok) return result;
+
+ DWORD options = _SymGetOptions();
+ options |= SYMOPT_LOAD_LINES;
+ options |= SYMOPT_FAIL_CRITICAL_ERRORS;
+ options = _SymSetOptions(options);
+
+ char buf[OS::kStackWalkMaxNameLen] = {0};
+ ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
+ if (!ok) {
+ int err = GetLastError();
+ OS::Print("%d\n", err);
+ return result;
+ }
+
+ HANDLE snapshot = _CreateToolhelp32Snapshot(
+ TH32CS_SNAPMODULE, // dwFlags
+ GetCurrentProcessId()); // th32ProcessId
+ if (snapshot == INVALID_HANDLE_VALUE) return result;
+ MODULEENTRY32W module_entry;
+ module_entry.dwSize = sizeof(module_entry); // Set the size of the structure.
+ BOOL cont = _Module32FirstW(snapshot, &module_entry);
+ while (cont) {
+ DWORD64 base;
+ // NOTE the SymLoadModule64 function has the peculiarity of accepting a
+ // both unicode and ASCII strings even though the parameter is PSTR.
+ base = _SymLoadModule64(
+ process_handle, // hProcess
+ 0, // hFile
+ reinterpret_cast<PSTR>(module_entry.szExePath), // ImageName
+ reinterpret_cast<PSTR>(module_entry.szModule), // ModuleName
+ reinterpret_cast<DWORD64>(module_entry.modBaseAddr), // BaseOfDll
+ module_entry.modBaseSize); // SizeOfDll
+ if (base == 0) {
+ int err = GetLastError();
+ if (err != ERROR_MOD_NOT_FOUND &&
+ err != ERROR_INVALID_HANDLE) {
+ result.clear();
+ return result;
+ }
+ }
+ int lib_name_length = WideCharToMultiByte(
+ CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
+ std::string lib_name(lib_name_length, 0);
+ WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
+ lib_name_length, NULL, NULL);
+ result.push_back(OS::SharedLibraryAddress(
+ lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
+ reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
+ module_entry.modBaseSize)));
+ cont = _Module32NextW(snapshot, &module_entry);
+ }
+ CloseHandle(snapshot);
+
+ symbols_loaded = true;
+ return result;
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ // SharedLibraryEvents are logged when loading symbol information.
+ // Only the shared libraries loaded at the time of the call to
+ // GetSharedLibraryAddresses are logged. DLLs loaded after
+ // initialization are not accounted for.
+ if (!LoadDbgHelpAndTlHelp32()) return std::vector<OS::SharedLibraryAddress>();
+ HANDLE process_handle = GetCurrentProcess();
+ return LoadSymbols(process_handle);
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+uint64_t OS::TotalPhysicalMemory() {
+ MEMORYSTATUSEX memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatusEx(&memory_info)) {
+ UNREACHABLE();
+ return 0;
+ }
+
+ return static_cast<uint64_t>(memory_info.ullTotalPhys);
+}
+
+
+#else // __MINGW32__
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ return std::vector<OS::SharedLibraryAddress>();
+}
+
+
+void OS::SignalCodeMovingGC() { }
+#endif // __MINGW32__
+
+
+int OS::NumberOfProcessorsOnline() {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+}
+
+
+double OS::nan_value() {
+#ifdef _MSC_VER
+ return std::numeric_limits<double>::quiet_NaN();
+#else // _MSC_VER
+ return NAN;
+#endif // _MSC_VER
+}
+
+
+int OS::ActivationFrameAlignment() {
+#ifdef _WIN64
+ return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
+#elif defined(__MINGW32__)
+ // With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ return 16;
+#else
+ return 8; // Floating-point math runs faster with 8-byte alignment.
+#endif
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ ASSERT(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != NULL) {
+ request_size = size;
+ ASSERT(base == static_cast<uint8_t*>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ }
+ address_ = address;
+ size_ = request_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ ASSERT(IsReserved());
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ if (NULL == VirtualAlloc(address,
+ OS::CommitPageSize(),
+ MEM_COMMIT,
+ PAGE_NOACCESS)) {
+ return false;
+ }
+ return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 thread support.
+
+// Definition of invalid thread handle and id.
+static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
+
+// Entry point for threads. The supplied argument is a pointer to the thread
+// object. The entry function dispatches to the run method in the thread
+// object. It is important that this function has __stdcall calling
+// convention.
+static unsigned int __stdcall ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ thread->NotifyStartedAndRun();
+ return 0;
+}
+
+
+class Thread::PlatformData {
+ public:
+ explicit PlatformData(HANDLE thread) : thread_(thread) {}
+ HANDLE thread_;
+ unsigned thread_id_;
+};
+
+
+// Initialize a Win32 thread object. The thread has an invalid thread
+// handle until it is started.
+
+Thread::Thread(const Options& options)
+ : stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
+ data_ = new PlatformData(kNoThread);
+ set_name(options.name());
+}
+
+
+void Thread::set_name(const char* name) {
+ OS::StrNCpy(name_, sizeof(name_), name, strlen(name));
+ name_[sizeof(name_) - 1] = '\0';
+}
+
+
+// Close our own handle for the thread.
+Thread::~Thread() {
+ if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
+ delete data_;
+}
+
+
+// Create a new thread. It is important to use _beginthreadex() instead of
+// the Win32 function CreateThread(), because the CreateThread() does not
+// initialize thread specific structures in the C runtime library.
+void Thread::Start() {
+ data_->thread_ = reinterpret_cast<HANDLE>(
+ _beginthreadex(NULL,
+ static_cast<unsigned>(stack_size_),
+ ThreadEntry,
+ this,
+ 0,
+ &data_->thread_id_));
+}
+
+
+// Wait for thread to terminate.
+void Thread::Join() {
+ if (data_->thread_id_ != GetCurrentThreadId()) {
+ WaitForSingleObject(data_->thread_, INFINITE);
+ }
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ DWORD result = TlsAlloc();
+ ASSERT(result != TLS_OUT_OF_INDEXES);
+ return static_cast<LocalStorageKey>(result);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ BOOL result = TlsFree(static_cast<DWORD>(key));
+ USE(result);
+ ASSERT(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ return TlsGetValue(static_cast<DWORD>(key));
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
+ USE(result);
+ ASSERT(result);
+}
+
+
+
+void Thread::YieldCPU() {
+ Sleep(0);
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module contains the platform-specific code. This make the rest of the
+// code less dependent on operating system, compilers and runtime libraries.
+// This module does specifically not deal with differences between different
+// processor architecture.
+// The platform classes have the same definition for all platforms. The
+// implementation for a particular platform is put in platform_<os>.cc.
+// The build system then uses the implementation for the target platform.
+//
+// This design has been chosen because it is simple and fast. Alternatively,
+// the platform dependent classes could have been implemented using abstract
+// superclasses with virtual methods and having specializations for each
+// platform. This design was rejected because it was more complicated and
+// slower. It would require factory methods for selecting the right
+// implementation and the overhead of virtual methods for performance
+// sensitive like mutex locking/unlocking.
+
+#ifndef V8_BASE_PLATFORM_PLATFORM_H_
+#define V8_BASE_PLATFORM_PLATFORM_H_
+
+#include <stdarg.h>
+#include <string>
+#include <vector>
+
+#include "src/base/build_config.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+
+#ifdef __sun
+# ifndef signbit
+namespace std {
+int signbit(double x);
+}
+# endif
+#endif
+
+#if V8_OS_QNX
+#include "src/base/qnx-math.h"
+#endif
+
+// Microsoft Visual C++ specific stuff.
+#if V8_LIBC_MSVCRT
+
+#include "src/base/win32-headers.h"
+#include "src/base/win32-math.h"
+
+int strncasecmp(const char* s1, const char* s2, int n);
+
+// Visual C++ 2013 and higher implement this function.
+#if (_MSC_VER < 1800)
+inline int lrint(double flt) {
+ int intgr;
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+ __asm {
+ fld flt
+ fistp intgr
+ };
+#else
+ intgr = static_cast<int>(flt + 0.5);
+ if ((intgr & 1) != 0 && intgr - flt == 0.5) {
+ // If the number is halfway between two integers, round to the even one.
+ intgr--;
+ }
+#endif
+ return intgr;
+}
+#endif // _MSC_VER < 1800
+
+#endif // V8_LIBC_MSVCRT
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// Fast TLS support
+
+#ifndef V8_NO_FAST_TLS
+
+#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+ const intptr_t kTibInlineTlsOffset = 0xE10;
+ const intptr_t kTibExtraTlsOffset = 0xF94;
+ const intptr_t kMaxInlineSlots = 64;
+ const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
+ const intptr_t kPointerSize = sizeof(void*);
+ ASSERT(0 <= index && index < kMaxSlots);
+ if (index < kMaxInlineSlots) {
+ return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
+ kPointerSize * index));
+ }
+ intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
+ ASSERT(extra != 0);
+ return *reinterpret_cast<intptr_t*>(extra +
+ kPointerSize * (index - kMaxInlineSlots));
+}
+
+#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+extern intptr_t kMacTlsBaseOffset;
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+ intptr_t result;
+#if V8_HOST_ARCH_IA32
+ asm("movl %%gs:(%1,%2,4), %0;"
+ :"=r"(result) // Output must be a writable register.
+ :"r"(kMacTlsBaseOffset), "r"(index));
+#else
+ asm("movq %%gs:(%1,%2,8), %0;"
+ :"=r"(result)
+ :"r"(kMacTlsBaseOffset), "r"(index));
+#endif
+ return result;
+}
+
+#endif
+
+#endif // V8_NO_FAST_TLS
+
+
+class TimezoneCache;
+
+
+// ----------------------------------------------------------------------------
+// OS
+//
+// This class has static methods for the different platform specific
+// functions. Add methods here to cope with differences between the
+// supported platforms.
+
+class OS {
+ public:
+ // Initialize the OS class.
+ // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
+ // - hard_abort: If true, OS::Abort() will crash instead of aborting.
+ // - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
+ static void Initialize(int64_t random_seed,
+ bool hard_abort,
+ const char* const gc_fake_mmap);
+
+ // Returns the accumulated user time for thread. This routine
+ // can be used for profiling. The implementation should
+ // strive for high-precision timer resolution, preferable
+ // micro-second resolution.
+ static int GetUserTime(uint32_t* secs, uint32_t* usecs);
+
+ // Returns current time as the number of milliseconds since
+ // 00:00:00 UTC, January 1, 1970.
+ static double TimeCurrentMillis();
+
+ static TimezoneCache* CreateTimezoneCache();
+ static void DisposeTimezoneCache(TimezoneCache* cache);
+ static void ClearTimezoneCache(TimezoneCache* cache);
+
+ // Returns a string identifying the current time zone. The
+ // timestamp is used for determining if DST is in effect.
+ static const char* LocalTimezone(double time, TimezoneCache* cache);
+
+ // Returns the local time offset in milliseconds east of UTC without
+ // taking daylight savings time into account.
+ static double LocalTimeOffset(TimezoneCache* cache);
+
+ // Returns the daylight savings offset for the given time.
+ static double DaylightSavingsOffset(double time, TimezoneCache* cache);
+
+ // Returns last OS error.
+ static int GetLastError();
+
+ static FILE* FOpen(const char* path, const char* mode);
+ static bool Remove(const char* path);
+
+ // Opens a temporary file, the file is auto removed on close.
+ static FILE* OpenTemporaryFile();
+
+ // Log file open mode is platform-dependent due to line ends issues.
+ static const char* const LogFileOpenMode;
+
+ // Print output to console. This is mostly used for debugging output.
+ // On platforms that has standard terminal output, the output
+ // should go to stdout.
+ static void Print(const char* format, ...);
+ static void VPrint(const char* format, va_list args);
+
+ // Print output to a file. This is mostly used for debugging output.
+ static void FPrint(FILE* out, const char* format, ...);
+ static void VFPrint(FILE* out, const char* format, va_list args);
+
+ // Print error output to console. This is mostly used for error message
+ // output. On platforms that has standard terminal output, the output
+ // should go to stderr.
+ static void PrintError(const char* format, ...);
+ static void VPrintError(const char* format, va_list args);
+
+ // Allocate/Free memory used by JS heap. Pages are readable/writable, but
+ // they are not guaranteed to be executable unless 'executable' is true.
+ // Returns the address of allocated memory, or NULL if failed.
+ static void* Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable);
+ static void Free(void* address, const size_t size);
+
+ // This is the granularity at which the ProtectCode(...) call can set page
+ // permissions.
+ static intptr_t CommitPageSize();
+
+ // Mark code segments non-writable.
+ static void ProtectCode(void* address, const size_t size);
+
+ // Assign memory as a guard page so that access will cause an exception.
+ static void Guard(void* address, const size_t size);
+
+ // Generate a random address to be used for hinting mmap().
+ static void* GetRandomMmapAddr();
+
+ // Get the Alignment guaranteed by Allocate().
+ static size_t AllocateAlignment();
+
+ // Sleep for a number of milliseconds.
+ static void Sleep(const int milliseconds);
+
+ // Abort the current process.
+ static void Abort();
+
+ // Debug break.
+ static void DebugBreak();
+
+ // Walk the stack.
+ static const int kStackWalkError = -1;
+ static const int kStackWalkMaxNameLen = 256;
+ static const int kStackWalkMaxTextLen = 256;
+ struct StackFrame {
+ void* address;
+ char text[kStackWalkMaxTextLen];
+ };
+
+ class MemoryMappedFile {
+ public:
+ static MemoryMappedFile* open(const char* name);
+ static MemoryMappedFile* create(const char* name, int size, void* initial);
+ virtual ~MemoryMappedFile() { }
+ virtual void* memory() = 0;
+ virtual int size() = 0;
+ };
+
+ // Safe formatting print. Ensures that str is always null-terminated.
+ // Returns the number of chars written, or -1 if output was truncated.
+ static int SNPrintF(char* str, int length, const char* format, ...);
+ static int VSNPrintF(char* str,
+ int length,
+ const char* format,
+ va_list args);
+
+ static char* StrChr(char* str, int c);
+ static void StrNCpy(char* dest, int length, const char* src, size_t n);
+
+ // Support for the profiler. Can do nothing, in which case ticks
+ // occuring in shared libraries will not be properly accounted for.
+ struct SharedLibraryAddress {
+ SharedLibraryAddress(
+ const std::string& library_path, uintptr_t start, uintptr_t end)
+ : library_path(library_path), start(start), end(end) {}
+
+ std::string library_path;
+ uintptr_t start;
+ uintptr_t end;
+ };
+
+ static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses();
+
+ // Support for the profiler. Notifies the external profiling
+ // process that a code moving garbage collection starts. Can do
+ // nothing, in which case the code objects must not move (e.g., by
+ // using --never-compact) if accurate profiling is desired.
+ static void SignalCodeMovingGC();
+
+ // Returns the number of processors online.
+ static int NumberOfProcessorsOnline();
+
+ // The total amount of physical memory available on the current system.
+ static uint64_t TotalPhysicalMemory();
+
+ // Maximum size of the virtual memory. 0 means there is no artificial
+ // limit.
+ static intptr_t MaxVirtualMemory();
+
+ // Returns the double constant NAN
+ static double nan_value();
+
+ // Support runtime detection of whether the hard float option of the
+ // EABI is used.
+ static bool ArmUsingHardFloat();
+
+ // Returns the activation frame alignment constraint or zero if
+ // the platform doesn't care. Guaranteed to be a power of two.
+ static int ActivationFrameAlignment();
+
+ static int GetCurrentProcessId();
+
+ private:
+ static const int msPerSecond = 1000;
+
+#if V8_OS_POSIX
+ static const char* GetGCFakeMMapFile();
+#endif
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
+};
+
+// Represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or copy-contructing. This removes the reserved memory
+// from the original object.
+class VirtualMemory {
+ public:
+ // Empty VirtualMemory object, controlling no reserved memory.
+ VirtualMemory();
+
+ // Reserves virtual memory with size.
+ explicit VirtualMemory(size_t size);
+
+ // Reserves virtual memory containing an area of the given size that
+ // is aligned per alignment. This may not be at the position returned
+ // by address().
+ VirtualMemory(size_t size, size_t alignment);
+
+ // Releases the reserved memory, if any, controlled by this VirtualMemory
+ // object.
+ ~VirtualMemory();
+
+ // Returns whether the memory has been reserved.
+ bool IsReserved();
+
+ // Initialize or resets an embedded VirtualMemory object.
+ void Reset();
+
+ // Returns the start address of the reserved memory.
+ // If the memory was reserved with an alignment, this address is not
+ // necessarily aligned. The user might need to round it up to a multiple of
+ // the alignment to get the start of the aligned block.
+ void* address() {
+ ASSERT(IsReserved());
+ return address_;
+ }
+
+ // Returns the size of the reserved memory. The returned value is only
+ // meaningful when IsReserved() returns true.
+ // If the memory was reserved with an alignment, this size may be larger
+ // than the requested size.
+ size_t size() { return size_; }
+
+ // Commits real memory. Returns whether the operation succeeded.
+ bool Commit(void* address, size_t size, bool is_executable);
+
+ // Uncommit real memory. Returns whether the operation succeeded.
+ bool Uncommit(void* address, size_t size);
+
+ // Creates a single guard page at the given address.
+ bool Guard(void* address);
+
+ void Release() {
+ ASSERT(IsReserved());
+ // Notice: Order is important here. The VirtualMemory object might live
+ // inside the allocated region.
+ void* address = address_;
+ size_t size = size_;
+ Reset();
+ bool result = ReleaseRegion(address, size);
+ USE(result);
+ ASSERT(result);
+ }
+
+ // Assign control of the reserved region to a different VirtualMemory object.
+ // The old object is no longer functional (IsReserved() returns false).
+ void TakeControl(VirtualMemory* from) {
+ ASSERT(!IsReserved());
+ address_ = from->address_;
+ size_ = from->size_;
+ from->Reset();
+ }
+
+ static void* ReserveRegion(size_t size);
+
+ static bool CommitRegion(void* base, size_t size, bool is_executable);
+
+ static bool UncommitRegion(void* base, size_t size);
+
+ // Must be called with a base pointer that has been returned by ReserveRegion
+ // and the same size it was reserved with.
+ static bool ReleaseRegion(void* base, size_t size);
+
+ // Returns true if OS performs lazy commits, i.e. the memory allocation call
+ // defers actual physical memory allocation till the first memory access.
+ // Otherwise returns false.
+ static bool HasLazyCommits();
+
+ private:
+ void* address_; // Start address of the virtual memory.
+ size_t size_; // Size of the virtual memory.
+};
+
+
+// ----------------------------------------------------------------------------
+// Thread
+//
+// Thread objects are used for creating and running threads. When the start()
+// method is called the new thread starts running the run() method in the new
+// thread. The Thread object should not be deallocated before the thread has
+// terminated.
+
+class Thread {
+ public:
+ // Opaque data type for thread-local storage keys.
+ typedef int32_t LocalStorageKey;
+
+ class Options {
+ public:
+ Options() : name_("v8:<unknown>"), stack_size_(0) {}
+ Options(const char* name, int stack_size = 0)
+ : name_(name), stack_size_(stack_size) {}
+
+ const char* name() const { return name_; }
+ int stack_size() const { return stack_size_; }
+
+ private:
+ const char* name_;
+ int stack_size_;
+ };
+
+ // Create new thread.
+ explicit Thread(const Options& options);
+ virtual ~Thread();
+
+ // Start new thread by calling the Run() method on the new thread.
+ void Start();
+
+ // Start new thread and wait until Run() method is called on the new thread.
+ void StartSynchronously() {
+ start_semaphore_ = new Semaphore(0);
+ Start();
+ start_semaphore_->Wait();
+ delete start_semaphore_;
+ start_semaphore_ = NULL;
+ }
+
+ // Wait until thread terminates.
+ void Join();
+
+ inline const char* name() const {
+ return name_;
+ }
+
+ // Abstract method for run handler.
+ virtual void Run() = 0;
+
+ // Thread-local storage.
+ static LocalStorageKey CreateThreadLocalKey();
+ static void DeleteThreadLocalKey(LocalStorageKey key);
+ static void* GetThreadLocal(LocalStorageKey key);
+ static int GetThreadLocalInt(LocalStorageKey key) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
+ }
+ static void SetThreadLocal(LocalStorageKey key, void* value);
+ static void SetThreadLocalInt(LocalStorageKey key, int value) {
+ SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
+ }
+ static bool HasThreadLocal(LocalStorageKey key) {
+ return GetThreadLocal(key) != NULL;
+ }
+
+#ifdef V8_FAST_TLS_SUPPORTED
+ static inline void* GetExistingThreadLocal(LocalStorageKey key) {
+ void* result = reinterpret_cast<void*>(
+ InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
+ ASSERT(result == GetThreadLocal(key));
+ return result;
+ }
+#else
+ static inline void* GetExistingThreadLocal(LocalStorageKey key) {
+ return GetThreadLocal(key);
+ }
+#endif
+
+ // A hint to the scheduler to let another thread run.
+ static void YieldCPU();
+
+
+ // The thread name length is limited to 16 based on Linux's implementation of
+ // prctl().
+ static const int kMaxThreadNameLength = 16;
+
+ class PlatformData;
+ PlatformData* data() { return data_; }
+
+ void NotifyStartedAndRun() {
+ if (start_semaphore_) start_semaphore_->Signal();
+ Run();
+ }
+
+ private:
+ void set_name(const char* name);
+
+ PlatformData* data_;
+
+ char name_[kMaxThreadNameLength];
+ int stack_size_;
+ Semaphore* start_semaphore_;
+
+ DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+} } // namespace v8::base
+
+#endif // V8_BASE_PLATFORM_PLATFORM_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/semaphore.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#endif
+
+#include <errno.h>
+
+#include "src/base/logging.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_MACOSX
+
+Semaphore::Semaphore(int count) {
+ kern_return_t result = semaphore_create(
+ mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+ kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ kern_return_t result = semaphore_signal(native_handle_);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ while (true) {
+ kern_return_t result = semaphore_wait(native_handle_);
+ if (result == KERN_SUCCESS) return; // Semaphore was signalled.
+ ASSERT_EQ(KERN_ABORTED, result);
+ }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ while (true) {
+ mach_timespec_t ts;
+ if (now >= end) {
+ // Return immediately if semaphore was not signalled.
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ } else {
+ ts = (end - now).ToMachTimespec();
+ }
+ kern_return_t result = semaphore_timedwait(native_handle_, ts);
+ if (result == KERN_SUCCESS) return true; // Semaphore was signalled.
+ if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout.
+ ASSERT_EQ(KERN_ABORTED, result);
+ now = TimeTicks::Now();
+ }
+}
+
+#elif V8_OS_POSIX
+
+Semaphore::Semaphore(int count) {
+ ASSERT(count >= 0);
+ int result = sem_init(&native_handle_, 0, count);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+ int result = sem_destroy(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ int result = sem_post(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&native_handle_);
+ if (result == 0) return; // Semaphore was signalled.
+ // Signal caused spurious wakeup.
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(EINTR, errno);
+ }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ // Compute the time for end of timeout.
+ const Time time = Time::NowFromSystemTime() + rel_time;
+ const struct timespec ts = time.ToTimespec();
+
+ // Wait for semaphore signalled or timeout.
+ while (true) {
+ int result = sem_timedwait(&native_handle_, &ts);
+ if (result == 0) return true; // Semaphore was signalled.
+#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+ if (result > 0) {
+ // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1.
+ errno = result;
+ result = -1;
+ }
+#endif
+ if (result == -1 && errno == ETIMEDOUT) {
+ // Timed out while waiting for semaphore.
+ return false;
+ }
+ // Signal caused spurious wakeup.
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(EINTR, errno);
+ }
+}
+
+#elif V8_OS_WIN
+
+Semaphore::Semaphore(int count) {
+ ASSERT(count >= 0);
+ native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+ ASSERT(native_handle_ != NULL);
+}
+
+
+Semaphore::~Semaphore() {
+ BOOL result = CloseHandle(native_handle_);
+ ASSERT(result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ LONG dummy;
+ BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
+ ASSERT(result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ DWORD result = WaitForSingleObject(native_handle_, INFINITE);
+ ASSERT(result == WAIT_OBJECT_0);
+ USE(result);
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ while (true) {
+ int64_t msec = (end - now).InMilliseconds();
+ if (msec >= static_cast<int64_t>(INFINITE)) {
+ DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1);
+ if (result == WAIT_OBJECT_0) {
+ return true;
+ }
+ ASSERT(result == WAIT_TIMEOUT);
+ now = TimeTicks::Now();
+ } else {
+ DWORD result = WaitForSingleObject(
+ native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec));
+ if (result == WAIT_TIMEOUT) {
+ return false;
+ }
+ ASSERT(result == WAIT_OBJECT_0);
+ return true;
+ }
+ }
+}
+
+#endif // V8_OS_MACOSX
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_SEMAPHORE_H_
+#define V8_BASE_PLATFORM_SEMAPHORE_H_
+
+#include "src/base/lazy-instance.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/semaphore.h> // NOLINT
+#elif V8_OS_POSIX
+#include <semaphore.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+// Forward declarations.
+class TimeDelta;
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero, threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore V8_FINAL {
+ public:
+ explicit Semaphore(int count);
+ ~Semaphore();
+
+ // Increments the semaphore counter.
+ void Signal();
+
+ // Suspends the calling thread until the semaphore counter is non zero
+ // and then decrements the semaphore counter.
+ void Wait();
+
+ // Suspends the calling thread until the counter is non zero or the timeout
+ // time has passed. If timeout happens the return value is false and the
+ // counter is unchanged. Otherwise the semaphore counter is decremented and
+ // true is returned.
+ bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+
+#if V8_OS_MACOSX
+ typedef semaphore_t NativeHandle;
+#elif V8_OS_POSIX
+ typedef sem_t NativeHandle;
+#elif V8_OS_WIN
+ typedef HANDLE NativeHandle;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(Semaphore);
+};
+
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// // The following semaphore starts at 0.
+// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+// void my_function() {
+// // Do something with my_semaphore.Pointer().
+// }
+//
+
+template <int N>
+struct CreateSemaphoreTrait {
+ static Semaphore* Create() {
+ return new Semaphore(N);
+ }
+};
+
+template <int N>
+struct LazySemaphore {
+ typedef typename LazyDynamicInstance<Semaphore, CreateSemaphoreTrait<N>,
+ ThreadSafeInitOnceTrait>::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
+} } // namespace v8::base
+
+#endif // V8_BASE_PLATFORM_SEMAPHORE_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/time.h"
+
+#if V8_OS_POSIX
+#include <sys/time.h>
+#endif
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+
+#include <string.h>
+
+#if V8_OS_WIN
+#include "src/base/lazy-instance.h"
+#include "src/base/win32-headers.h"
+#endif
+#include "src/base/cpu.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+TimeDelta TimeDelta::FromDays(int days) {
+ return TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+
+TimeDelta TimeDelta::FromHours(int hours) {
+ return TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+
+TimeDelta TimeDelta::FromMinutes(int minutes) {
+ return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+
+TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
+ return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
+}
+
+
+TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
+ return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
+}
+
+
+TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
+ return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
+}
+
+
+int TimeDelta::InDays() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+
+int TimeDelta::InHours() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+
+int TimeDelta::InMinutes() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+
+double TimeDelta::InSecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+
+int64_t TimeDelta::InSeconds() const {
+ return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+
+double TimeDelta::InMillisecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InMilliseconds() const {
+ return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InNanoseconds() const {
+ return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+
+#if V8_OS_MACOSX
+
+TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
+ ASSERT_GE(ts.tv_nsec, 0);
+ ASSERT_LT(ts.tv_nsec,
+ static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct mach_timespec TimeDelta::ToMachTimespec() const {
+ struct mach_timespec ts;
+ ASSERT(delta_ >= 0);
+ ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+ Time::kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+#endif // V8_OS_MACOSX
+
+
+#if V8_OS_POSIX
+
+TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
+ ASSERT_GE(ts.tv_nsec, 0);
+ ASSERT_LT(ts.tv_nsec,
+ static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec TimeDelta::ToTimespec() const {
+ struct timespec ts;
+ ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+ Time::kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+#endif // V8_OS_POSIX
+
+
+#if V8_OS_WIN
+
+// We implement time using the high-resolution timers so that we can get
+// timeouts which are smaller than 10-15ms. To avoid any drift, we
+// periodically resync the internal clock to the system clock.
+class Clock V8_FINAL {
+ public:
+ Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
+
+ Time Now() {
+ // Time between resampling the un-granular clock for this API (1 minute).
+ const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
+
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Determine current time and ticks.
+ TimeTicks ticks = GetSystemTicks();
+ Time time = GetSystemTime();
+
+ // Check if we need to synchronize with the system clock due to a backwards
+ // time change or the amount of time elapsed.
+ TimeDelta elapsed = ticks - initial_ticks_;
+ if (time < initial_time_ || elapsed > kMaxElapsedTime) {
+ initial_ticks_ = ticks;
+ initial_time_ = time;
+ return time;
+ }
+
+ return initial_time_ + elapsed;
+ }
+
+ Time NowFromSystemTime() {
+ LockGuard<Mutex> lock_guard(&mutex_);
+ initial_ticks_ = GetSystemTicks();
+ initial_time_ = GetSystemTime();
+ return initial_time_;
+ }
+
+ private:
+ static TimeTicks GetSystemTicks() {
+ return TimeTicks::Now();
+ }
+
+ static Time GetSystemTime() {
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ return Time::FromFiletime(ft);
+ }
+
+ TimeTicks initial_ticks_;
+ Time initial_time_;
+ Mutex mutex_;
+};
+
+
+static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
+ ThreadSafeInitOnceTrait>::type clock =
+ LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+Time Time::Now() {
+ return clock.Pointer()->Now();
+}
+
+
+Time Time::NowFromSystemTime() {
+ return clock.Pointer()->NowFromSystemTime();
+}
+
+
+// Time between windows epoch and standard epoch.
+static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
+
+
+Time Time::FromFiletime(FILETIME ft) {
+ if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
+ return Time();
+ }
+ if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
+ ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
+ return Max();
+ }
+ int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
+ (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
+ return Time(us - kTimeToEpochInMicroseconds);
+}
+
+
+FILETIME Time::ToFiletime() const {
+ ASSERT(us_ >= 0);
+ FILETIME ft;
+ if (IsNull()) {
+ ft.dwLowDateTime = 0;
+ ft.dwHighDateTime = 0;
+ return ft;
+ }
+ if (IsMax()) {
+ ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
+ ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
+ return ft;
+ }
+ uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
+ ft.dwLowDateTime = static_cast<DWORD>(us);
+ ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
+ return ft;
+}
+
+#elif V8_OS_POSIX
+
+Time Time::Now() {
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ return FromTimeval(tv);
+}
+
+
+Time Time::NowFromSystemTime() {
+ return Now();
+}
+
+
+Time Time::FromTimespec(struct timespec ts) {
+ ASSERT(ts.tv_nsec >= 0);
+ ASSERT(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
+ if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
+ return Time();
+ }
+ if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
+ ts.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(ts.tv_sec * kMicrosecondsPerSecond +
+ ts.tv_nsec / kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec Time::ToTimespec() const {
+ struct timespec ts;
+ if (IsNull()) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ return ts;
+ }
+ if (IsMax()) {
+ ts.tv_sec = std::numeric_limits<time_t>::max();
+ ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
+ return ts;
+ }
+ ts.tv_sec = us_ / kMicrosecondsPerSecond;
+ ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+
+Time Time::FromTimeval(struct timeval tv) {
+ ASSERT(tv.tv_usec >= 0);
+ ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
+ if (tv.tv_usec == 0 && tv.tv_sec == 0) {
+ return Time();
+ }
+ if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
+ tv.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
+}
+
+
+struct timeval Time::ToTimeval() const {
+ struct timeval tv;
+ if (IsNull()) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ return tv;
+ }
+ if (IsMax()) {
+ tv.tv_sec = std::numeric_limits<time_t>::max();
+ tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
+ return tv;
+ }
+ tv.tv_sec = us_ / kMicrosecondsPerSecond;
+ tv.tv_usec = us_ % kMicrosecondsPerSecond;
+ return tv;
+}
+
+#endif // V8_OS_WIN
+
+
+Time Time::FromJsTime(double ms_since_epoch) {
+ // The epoch is a valid time, so this constructor doesn't interpret
+ // 0 as the null time.
+ if (ms_since_epoch == std::numeric_limits<double>::max()) {
+ return Max();
+ }
+ return Time(
+ static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
+}
+
+
+double Time::ToJsTime() const {
+ if (IsNull()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ if (IsMax()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::max();
+ }
+ return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
+}
+
+
+#if V8_OS_WIN
+
+class TickClock {
+ public:
+ virtual ~TickClock() {}
+ virtual int64_t Now() = 0;
+ virtual bool IsHighResolution() = 0;
+};
+
+
+// Overview of time counters:
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, the CPU counter is unreliable and should not
+// be used in production. Its biggest issue is that it is per processor and it
+// is not synchronized between processors. Also, on some computers, the counters
+// will change frequency due to thermal and power changes, and stop in some
+// states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (100 nanoseconds) time stamp but is comparatively more expensive
+// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
+// (with some help from ACPI).
+// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
+// in the worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent result on a multiprocessor computer, but it is unreliable in
+// reality due to bugs in BIOS or HAL on some, especially old computers.
+// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
+// it should be used with caution.
+//
+// (3) System time. The system time provides a low-resolution (typically 10ms
+// to 55 milliseconds) time stamp but is comparatively less expensive to
+// retrieve and more reliable.
+class HighResolutionTickClock V8_FINAL : public TickClock {
+ public:
+ explicit HighResolutionTickClock(int64_t ticks_per_second)
+ : ticks_per_second_(ticks_per_second) {
+ ASSERT_LT(0, ticks_per_second);
+ }
+ virtual ~HighResolutionTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LARGE_INTEGER now;
+ BOOL result = QueryPerformanceCounter(&now);
+ ASSERT(result);
+ USE(result);
+
+ // Intentionally calculate microseconds in a round about manner to avoid
+ // overflow and precision issues. Think twice before simplifying!
+ int64_t whole_seconds = now.QuadPart / ticks_per_second_;
+ int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+ int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
+ ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
+
+ // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
+ // will never return 0.
+ return ticks + 1;
+ }
+
+ virtual bool IsHighResolution() V8_OVERRIDE {
+ return true;
+ }
+
+ private:
+ int64_t ticks_per_second_;
+};
+
+
+class RolloverProtectedTickClock V8_FINAL : public TickClock {
+ public:
+ // We initialize rollover_ms_ to 1 to ensure that we will never
+ // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
+ RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+ virtual ~RolloverProtectedTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LockGuard<Mutex> lock_guard(&mutex_);
+ // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
+ // every ~49.7 days. We try to track rollover ourselves, which works if
+ // TimeTicks::Now() is called at least every 49 days.
+ // Note that we do not use GetTickCount() here, since timeGetTime() gives
+ // more predictable delta values, as described here:
+ // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for V8 wants fast timers, it
+ // can use timeBeginPeriod() to increase the resolution.
+ DWORD now = timeGetTime();
+ if (now < last_seen_now_) {
+ rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
+ }
+ last_seen_now_ = now;
+ return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
+ }
+
+ virtual bool IsHighResolution() V8_OVERRIDE {
+ return false;
+ }
+
+ private:
+ Mutex mutex_;
+ DWORD last_seen_now_;
+ int64_t rollover_ms_;
+};
+
+
+static LazyStaticInstance<RolloverProtectedTickClock,
+ DefaultConstructTrait<RolloverProtectedTickClock>,
+ ThreadSafeInitOnceTrait>::type tick_clock =
+ LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+struct CreateHighResTickClockTrait {
+ static TickClock* Create() {
+ // Check if the installed hardware supports a high-resolution performance
+ // counter, and if not fallback to the low-resolution tick clock.
+ LARGE_INTEGER ticks_per_second;
+ if (!QueryPerformanceFrequency(&ticks_per_second)) {
+ return tick_clock.Pointer();
+ }
+
+ // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
+ // is unreliable, fallback to the low-resolution tick clock.
+ CPU cpu;
+ if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+ return tick_clock.Pointer();
+ }
+
+ return new HighResolutionTickClock(ticks_per_second.QuadPart);
+ }
+};
+
+
+static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
+ ThreadSafeInitOnceTrait>::type high_res_tick_clock =
+ LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+TimeTicks TimeTicks::Now() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+
+TimeTicks TimeTicks::HighResolutionNow() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+ return high_res_tick_clock.Pointer()->IsHighResolution();
+}
+
+#else // V8_OS_WIN
+
+TimeTicks TimeTicks::Now() {
+ return HighResolutionNow();
+}
+
+
+TimeTicks TimeTicks::HighResolutionNow() {
+ int64_t ticks;
+#if V8_OS_MACOSX
+ static struct mach_timebase_info info;
+ if (info.denom == 0) {
+ kern_return_t result = mach_timebase_info(&info);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+ }
+ ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+ info.numer / info.denom);
+#elif V8_OS_SOLARIS
+ ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
+#elif V8_LIBRT_NOT_AVAILABLE
+ // TODO(bmeurer): This is a temporary hack to support cross-compiling
+ // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
+ // cleanup the tools/gyp/v8.gyp file.
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
+#elif V8_OS_POSIX
+ struct timespec ts;
+ int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+#endif // V8_OS_MACOSX
+ // Make sure we never return 0 here.
+ return TimeTicks(ticks + 1);
+}
+
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+ return true;
+}
+
+#endif // V8_OS_WIN
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_TIME_H_
+#define V8_BASE_PLATFORM_TIME_H_
+
+#include <time.h>
+#include <limits>
+
+#include "src/base/macros.h"
+
+// Forward declarations.
+extern "C" {
+struct _FILETIME;
+struct mach_timespec;
+struct timespec;
+struct timeval;
+}
+
+namespace v8 {
+namespace base {
+
+class Time;
+class TimeTicks;
+
+// -----------------------------------------------------------------------------
+// TimeDelta
+//
+// This class represents a duration of time, internally represented in
+// microseonds.
+
+class TimeDelta V8_FINAL {
+ public:
+ TimeDelta() : delta_(0) {}
+
+ // Converts units of time to TimeDeltas.
+ static TimeDelta FromDays(int days);
+ static TimeDelta FromHours(int hours);
+ static TimeDelta FromMinutes(int minutes);
+ static TimeDelta FromSeconds(int64_t seconds);
+ static TimeDelta FromMilliseconds(int64_t milliseconds);
+ static TimeDelta FromMicroseconds(int64_t microseconds) {
+ return TimeDelta(microseconds);
+ }
+ static TimeDelta FromNanoseconds(int64_t nanoseconds);
+
+ // Returns the time delta in some unit. The F versions return a floating
+ // point value, the "regular" versions return a rounded-down value.
+ //
+ // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+ // to the next full millisecond.
+ int InDays() const;
+ int InHours() const;
+ int InMinutes() const;
+ double InSecondsF() const;
+ int64_t InSeconds() const;
+ double InMillisecondsF() const;
+ int64_t InMilliseconds() const;
+ int64_t InMillisecondsRoundedUp() const;
+ int64_t InMicroseconds() const { return delta_; }
+ int64_t InNanoseconds() const;
+
+ // Converts to/from Mach time specs.
+ static TimeDelta FromMachTimespec(struct mach_timespec ts);
+ struct mach_timespec ToMachTimespec() const;
+
+ // Converts to/from POSIX time specs.
+ static TimeDelta FromTimespec(struct timespec ts);
+ struct timespec ToTimespec() const;
+
+ TimeDelta& operator=(const TimeDelta& other) {
+ delta_ = other.delta_;
+ return *this;
+ }
+
+ // Computations with other deltas.
+ TimeDelta operator+(const TimeDelta& other) const {
+ return TimeDelta(delta_ + other.delta_);
+ }
+ TimeDelta operator-(const TimeDelta& other) const {
+ return TimeDelta(delta_ - other.delta_);
+ }
+
+ TimeDelta& operator+=(const TimeDelta& other) {
+ delta_ += other.delta_;
+ return *this;
+ }
+ TimeDelta& operator-=(const TimeDelta& other) {
+ delta_ -= other.delta_;
+ return *this;
+ }
+ TimeDelta operator-() const {
+ return TimeDelta(-delta_);
+ }
+
+ double TimesOf(const TimeDelta& other) const {
+ return static_cast<double>(delta_) / static_cast<double>(other.delta_);
+ }
+ double PercentOf(const TimeDelta& other) const {
+ return TimesOf(other) * 100.0;
+ }
+
+ // Computations with ints, note that we only allow multiplicative operations
+ // with ints, and additive operations with other deltas.
+ TimeDelta operator*(int64_t a) const {
+ return TimeDelta(delta_ * a);
+ }
+ TimeDelta operator/(int64_t a) const {
+ return TimeDelta(delta_ / a);
+ }
+ TimeDelta& operator*=(int64_t a) {
+ delta_ *= a;
+ return *this;
+ }
+ TimeDelta& operator/=(int64_t a) {
+ delta_ /= a;
+ return *this;
+ }
+ int64_t operator/(const TimeDelta& other) const {
+ return delta_ / other.delta_;
+ }
+
+ // Comparison operators.
+ bool operator==(const TimeDelta& other) const {
+ return delta_ == other.delta_;
+ }
+ bool operator!=(const TimeDelta& other) const {
+ return delta_ != other.delta_;
+ }
+ bool operator<(const TimeDelta& other) const {
+ return delta_ < other.delta_;
+ }
+ bool operator<=(const TimeDelta& other) const {
+ return delta_ <= other.delta_;
+ }
+ bool operator>(const TimeDelta& other) const {
+ return delta_ > other.delta_;
+ }
+ bool operator>=(const TimeDelta& other) const {
+ return delta_ >= other.delta_;
+ }
+
+ private:
+ // Constructs a delta given the duration in microseconds. This is private
+ // to avoid confusion by callers with an integer constructor. Use
+ // FromSeconds, FromMilliseconds, etc. instead.
+ explicit TimeDelta(int64_t delta) : delta_(delta) {}
+
+ // Delta in microseconds.
+ int64_t delta_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Time
+//
+// This class represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+
+class Time V8_FINAL {
+ public:
+ static const int64_t kMillisecondsPerSecond = 1000;
+ static const int64_t kMicrosecondsPerMillisecond = 1000;
+ static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
+ kMillisecondsPerSecond;
+ static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+ static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static const int64_t kNanosecondsPerMicrosecond = 1000;
+ static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
+ kMicrosecondsPerSecond;
+
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ Time() : us_(0) {}
+
+ // Returns true if the time object has not been initialized.
+ bool IsNull() const { return us_ == 0; }
+
+ // Returns true if the time object is the maximum time.
+ bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+ // Returns the current time. Watch out, the system might adjust its clock
+ // in which case time will actually go backwards. We don't guarantee that
+ // times are increasing, or that two calls to Now() won't be the same.
+ static Time Now();
+
+ // Returns the current time. Same as Now() except that this function always
+ // uses system time so that there are no discrepancies between the returned
+ // time and system time even on virtual environments including our test bot.
+ // For timing sensitive unittests, this function should be used.
+ static Time NowFromSystemTime();
+
+ // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+ static Time UnixEpoch() { return Time(0); }
+
+ // Returns the maximum time, which should be greater than any reasonable time
+ // with which we might compare it.
+ static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
+
+ // Converts to/from internal values. The meaning of the "internal value" is
+ // completely up to the implementation, so it should be treated as opaque.
+ static Time FromInternalValue(int64_t value) {
+ return Time(value);
+ }
+ int64_t ToInternalValue() const {
+ return us_;
+ }
+
+ // Converts to/from POSIX time specs.
+ static Time FromTimespec(struct timespec ts);
+ struct timespec ToTimespec() const;
+
+ // Converts to/from POSIX time values.
+ static Time FromTimeval(struct timeval tv);
+ struct timeval ToTimeval() const;
+
+ // Converts to/from Windows file times.
+ static Time FromFiletime(struct _FILETIME ft);
+ struct _FILETIME ToFiletime() const;
+
+ // Converts to/from the Javascript convention for times, a number of
+ // milliseconds since the epoch:
+ static Time FromJsTime(double ms_since_epoch);
+ double ToJsTime() const;
+
+ Time& operator=(const Time& other) {
+ us_ = other.us_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const Time& other) const {
+ return TimeDelta::FromMicroseconds(us_ - other.us_);
+ }
+
+ // Modify by some time delta.
+ Time& operator+=(const TimeDelta& delta) {
+ us_ += delta.InMicroseconds();
+ return *this;
+ }
+ Time& operator-=(const TimeDelta& delta) {
+ us_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new time modified by some delta.
+ Time operator+(const TimeDelta& delta) const {
+ return Time(us_ + delta.InMicroseconds());
+ }
+ Time operator-(const TimeDelta& delta) const {
+ return Time(us_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const Time& other) const {
+ return us_ == other.us_;
+ }
+ bool operator!=(const Time& other) const {
+ return us_ != other.us_;
+ }
+ bool operator<(const Time& other) const {
+ return us_ < other.us_;
+ }
+ bool operator<=(const Time& other) const {
+ return us_ <= other.us_;
+ }
+ bool operator>(const Time& other) const {
+ return us_ > other.us_;
+ }
+ bool operator>=(const Time& other) const {
+ return us_ >= other.us_;
+ }
+
+ private:
+ explicit Time(int64_t us) : us_(us) {}
+
+ // Time in microseconds in UTC.
+ int64_t us_;
+};
+
+inline Time operator+(const TimeDelta& delta, const Time& time) {
+ return time + delta;
+}
+
+
+// -----------------------------------------------------------------------------
+// TimeTicks
+//
+// This class represents an abstract time that is most of the time incrementing
+// for use in measuring time durations. It is internally represented in
+// microseconds. It can not be converted to a human-readable time, but is
+// guaranteed not to decrease (if the user changes the computer clock,
+// Time::Now() may actually decrease or jump). But note that TimeTicks may
+// "stand still", for example if the computer suspended.
+
+class TimeTicks V8_FINAL {
+ public:
+ TimeTicks() : ticks_(0) {}
+
+ // Platform-dependent tick count representing "right now."
+ // The resolution of this clock is ~1-15ms. Resolution varies depending
+ // on hardware/operating system configuration.
+ // This method never returns a null TimeTicks.
+ static TimeTicks Now();
+
+ // Returns a platform-dependent high-resolution tick count. Implementation
+ // is hardware dependent and may or may not return sub-millisecond
+ // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
+ // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
+ // This method never returns a null TimeTicks.
+ static TimeTicks HighResolutionNow();
+
+ // Returns true if the high-resolution clock is working on this system.
+ static bool IsHighResolutionClockWorking();
+
+ // Returns true if this object has not been initialized.
+ bool IsNull() const { return ticks_ == 0; }
+
+ // Converts to/from internal values. The meaning of the "internal value" is
+ // completely up to the implementation, so it should be treated as opaque.
+ static TimeTicks FromInternalValue(int64_t value) {
+ return TimeTicks(value);
+ }
+ int64_t ToInternalValue() const {
+ return ticks_;
+ }
+
+ TimeTicks& operator=(const TimeTicks other) {
+ ticks_ = other.ticks_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const TimeTicks other) const {
+ return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
+ }
+
+ // Modify by some time delta.
+ TimeTicks& operator+=(const TimeDelta& delta) {
+ ticks_ += delta.InMicroseconds();
+ return *this;
+ }
+ TimeTicks& operator-=(const TimeDelta& delta) {
+ ticks_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new TimeTicks modified by some delta.
+ TimeTicks operator+(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ + delta.InMicroseconds());
+ }
+ TimeTicks operator-(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const TimeTicks& other) const {
+ return ticks_ == other.ticks_;
+ }
+ bool operator!=(const TimeTicks& other) const {
+ return ticks_ != other.ticks_;
+ }
+ bool operator<(const TimeTicks& other) const {
+ return ticks_ < other.ticks_;
+ }
+ bool operator<=(const TimeTicks& other) const {
+ return ticks_ <= other.ticks_;
+ }
+ bool operator>(const TimeTicks& other) const {
+ return ticks_ > other.ticks_;
+ }
+ bool operator>=(const TimeTicks& other) const {
+ return ticks_ >= other.ticks_;
+ }
+
+ private:
+ // Please use Now() to create a new object. This is for internal use
+ // and testing. Ticks is in microseconds.
+ explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
+
+ // Tick count in microseconds.
+ int64_t ticks_;
+};
+
+inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
+ return ticks + delta;
+}
+
+} } // namespace v8::base
+
+#endif // V8_BASE_PLATFORM_TIME_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_QNX_MATH_H_
+#define V8_QBASE_NX_MATH_H_
+
+#include <cmath>
+
+#undef fpclassify
+#undef isfinite
+#undef isinf
+#undef isnan
+#undef isnormal
+#undef signbit
+
+using std::lrint;
+
+#endif // V8_BASE_QNX_MATH_H_
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <new>
+
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+static RandomNumberGenerator::EntropySource entropy_source = NULL;
+
+
+// static
+void RandomNumberGenerator::SetEntropySource(EntropySource source) {
+ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ entropy_source = source;
+}
+
+
+RandomNumberGenerator::RandomNumberGenerator() {
+ // Check if embedder supplied an entropy source.
+ { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ if (entropy_source != NULL) {
+ int64_t seed;
+ if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
+ sizeof(seed))) {
+ SetSeed(seed);
+ return;
+ }
+ }
+ }
+
+#if V8_OS_CYGWIN || V8_OS_WIN
+ // Use rand_s() to gather entropy on Windows. See:
+ // https://code.google.com/p/v8/issues/detail?id=2905
+ unsigned first_half, second_half;
+ errno_t result = rand_s(&first_half);
+ ASSERT_EQ(0, result);
+ result = rand_s(&second_half);
+ ASSERT_EQ(0, result);
+ SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#else
+ // Gather entropy from /dev/urandom if available.
+ FILE* fp = fopen("/dev/urandom", "rb");
+ if (fp != NULL) {
+ int64_t seed;
+ size_t n = fread(&seed, sizeof(seed), 1, fp);
+ fclose(fp);
+ if (n == 1) {
+ SetSeed(seed);
+ return;
+ }
+ }
+
+ // We cannot assume that random() or rand() were seeded
+ // properly, so instead of relying on random() or rand(),
+ // we just seed our PRNG using timing data as fallback.
+ // This is weak entropy, but it's sufficient, because
+ // it is the responsibility of the embedder to install
+ // an entropy source using v8::V8::SetEntropySource(),
+ // which provides reasonable entropy, see:
+ // https://code.google.com/p/v8/issues/detail?id=2905
+ int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
+ seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
+ seed ^= TimeTicks::Now().ToInternalValue() << 8;
+ SetSeed(seed);
+#endif // V8_OS_CYGWIN || V8_OS_WIN
+}
+
+
+int RandomNumberGenerator::NextInt(int max) {
+ ASSERT_LE(0, max);
+
+ // Fast path if max is a power of 2.
+ if (IS_POWER_OF_TWO(max)) {
+ return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
+ }
+
+ while (true) {
+ int rnd = Next(31);
+ int val = rnd % max;
+ if (rnd - val + (max - 1) >= 0) {
+ return val;
+ }
+ }
+}
+
+
+double RandomNumberGenerator::NextDouble() {
+ return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
+ static_cast<double>(static_cast<int64_t>(1) << 53);
+}
+
+
+void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
+ for (size_t n = 0; n < buflen; ++n) {
+ static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
+ }
+}
+
+
+int RandomNumberGenerator::Next(int bits) {
+ ASSERT_LT(0, bits);
+ ASSERT_GE(32, bits);
+ // Do unsigned multiplication, which has the intended modulo semantics, while
+ // signed multiplication would expose undefined behavior.
+ uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
+ // Assigning a uint64_t to an int64_t is implementation defined, but this
+ // should be OK. Use a static_cast to explicitly state that we know what we're
+ // doing. (Famous last words...)
+ int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
+ seed_ = seed;
+ return static_cast<int>(seed >> (48 - bits));
+}
+
+
+void RandomNumberGenerator::SetSeed(int64_t seed) {
+ seed_ = (seed ^ kMultiplier) & kMask;
+}
+
+} } // namespace v8::base
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// -----------------------------------------------------------------------------
+// RandomNumberGenerator
+//
+// This class is used to generate a stream of pseudorandom numbers. The class
+// uses a 48-bit seed, which is modified using a linear congruential formula.
+// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
+// If two instances of RandomNumberGenerator are created with the same seed, and
+// the same sequence of method calls is made for each, they will generate and
+// return identical sequences of numbers.
+// This class uses (probably) weak entropy by default, but it's sufficient,
+// because it is the responsibility of the embedder to install an entropy source
+// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
+// https://code.google.com/p/v8/issues/detail?id=2905
+// This class is neither reentrant nor threadsafe.
+
+class RandomNumberGenerator V8_FINAL {
+ public:
+ // EntropySource is used as a callback function when V8 needs a source of
+ // entropy.
+ typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen);
+ static void SetEntropySource(EntropySource entropy_source);
+
+ RandomNumberGenerator();
+ explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); }
+
+ // Returns the next pseudorandom, uniformly distributed int value from this
+ // random number generator's sequence. The general contract of |NextInt()| is
+ // that one int value is pseudorandomly generated and returned.
+ // All 2^32 possible integer values are produced with (approximately) equal
+ // probability.
+ V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT {
+ return Next(32);
+ }
+
+ // Returns a pseudorandom, uniformly distributed int value between 0
+ // (inclusive) and the specified max value (exclusive), drawn from this random
+ // number generator's sequence. The general contract of |NextInt(int)| is that
+ // one int value in the specified range is pseudorandomly generated and
+ // returned. All max possible int values are produced with (approximately)
+ // equal probability.
+ int NextInt(int max) V8_WARN_UNUSED_RESULT;
+
+ // Returns the next pseudorandom, uniformly distributed boolean value from
+ // this random number generator's sequence. The general contract of
+ // |NextBoolean()| is that one boolean value is pseudorandomly generated and
+ // returned. The values true and false are produced with (approximately) equal
+ // probability.
+ V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT {
+ return Next(1) != 0;
+ }
+
+ // Returns the next pseudorandom, uniformly distributed double value between
+ // 0.0 and 1.0 from this random number generator's sequence.
+ // The general contract of |NextDouble()| is that one double value, chosen
+ // (approximately) uniformly from the range 0.0 (inclusive) to 1.0
+ // (exclusive), is pseudorandomly generated and returned.
+ double NextDouble() V8_WARN_UNUSED_RESULT;
+
+ // Fills the elements of a specified array of bytes with random numbers.
+ void NextBytes(void* buffer, size_t buflen);
+
+ // Override the current ssed.
+ void SetSeed(int64_t seed);
+
+ private:
+ static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
+ static const int64_t kAddend = 0xb;
+ static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
+
+ int Next(int bits) V8_WARN_UNUSED_RESULT;
+
+ int64_t seed_;
+};
+
+} } // namespace v8::base
+
+#endif // V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
+
+#include "src/base/win32-headers.h"
+#include <float.h> // Required for DBL_MAX and on Win32 for finite()
+#include <limits.h> // Required for INT_MAX etc.
+#include <cmath>
+#include "src/base/win32-math.h"
+
+#include "src/base/logging.h"
+
+
+namespace std {
+
+// Test for a NaN (not a number) value - usually defined in math.h
+int isnan(double x) {
+ return _isnan(x);
+}
+
+
+// Test for infinity - usually defined in math.h
+int isinf(double x) {
+ return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
+}
+
+
+// Test for finite value - usually defined in math.h
+int isfinite(double x) {
+ return _finite(x);
+}
+
+
+// Test if x is less than y and both nominal - usually defined in math.h
+int isless(double x, double y) {
+ return isnan(x) || isnan(y) ? 0 : x < y;
+}
+
+
+// Test if x is greater than y and both nominal - usually defined in math.h
+int isgreater(double x, double y) {
+ return isnan(x) || isnan(y) ? 0 : x > y;
+}
+
+
+// Classify floating point number - usually defined in math.h
+int fpclassify(double x) {
+ // Use the MS-specific _fpclass() for classification.
+ int flags = _fpclass(x);
+
+ // Determine class. We cannot use a switch statement because
+ // the _FPCLASS_ constants are defined as flags.
+ if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
+ if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
+ if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
+ if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
+
+ // All cases should be covered by the code above.
+ ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
+ return FP_NAN;
+}
+
+
+// Test sign - usually defined in math.h
+int signbit(double x) {
+ // We need to take care of the special case of both positive
+ // and negative versions of zero.
+ if (x == 0)
+ return _fpclass(x) & _FPCLASS_NZ;
+ else
+ return x < 0;
+}
+
+} // namespace std
+
+#endif // _MSC_VER
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+
+#ifndef V8_BASE_WIN32_MATH_H_
+#define V8_BASE_WIN32_MATH_H_
+
+#ifndef _MSC_VER
+#error Wrong environment, expected MSVC.
+#endif // _MSC_VER
+
+// MSVC 2013+ provides implementations of all standard math functions.
+#if (_MSC_VER < 1800)
+enum {
+ FP_NAN,
+ FP_INFINITE,
+ FP_ZERO,
+ FP_SUBNORMAL,
+ FP_NORMAL
+};
+
+
+namespace std {
+
+int isfinite(double x);
+int isinf(double x);
+int isnan(double x);
+int isless(double x, double y);
+int isgreater(double x, double y);
+int fpclassify(double x);
+int signbit(double x);
+
+} // namespace std
+
+#endif // _MSC_VER < 1800
+
+#endif // V8_BASE_WIN32_MATH_H_
#include <cmath>
#include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/utils.h"
#include "src/bignum-dtoa.h"
// When an error is thrown during bootstrapping we automatically print
// the line number at which this happened to the console in the isolate
// error throwing functionality.
- OS::PrintError("Error installing extension '%s'.\n",
- current->extension()->name());
+ base::OS::PrintError("Error installing extension '%s'.\n",
+ current->extension()->name());
isolate->clear_pending_exception();
}
extension_states->set_state(current, INSTALLED);
#include <cmath>
#include "include/v8stdint.h"
+#include "src/base/logging.h"
#include "src/cached-powers.h"
-#include "src/checks.h"
#include "src/globals.h"
namespace v8 {
#ifndef V8_CACHED_POWERS_H_
#define V8_CACHED_POWERS_H_
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/diy-fp.h"
namespace v8 {
#include "src/checks.h"
-#if V8_LIBC_GLIBC || V8_OS_BSD
-# include <cxxabi.h>
-# include <execinfo.h>
-#elif V8_OS_QNX
-# include <backtrace.h>
-#endif // V8_LIBC_GLIBC || V8_OS_BSD
-#include <stdio.h>
-
-#include "src/platform.h"
#include "src/v8.h"
namespace v8 {
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
-// Attempts to dump a backtrace (if supported).
-void DumpBacktrace() {
-#if V8_LIBC_GLIBC || V8_OS_BSD
- void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
- char** symbols = backtrace_symbols(trace, size);
- OS::PrintError("\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- OS::PrintError("(empty)\n");
- } else if (symbols == NULL) {
- OS::PrintError("(no symbols)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- OS::PrintError("%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
- int status;
- size_t length;
- char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
- OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
- free(demangled);
- } else {
- OS::PrintError("??\n");
- }
- }
- }
- free(symbols);
-#elif V8_OS_QNX
- char out[1024];
- bt_accessor_t acc;
- bt_memmap_t memmap;
- bt_init_accessor(&acc, BT_SELF);
- bt_load_memmap(&acc, &memmap);
- bt_sprn_memmap(&memmap, out, sizeof(out));
- OS::PrintError(out);
- bt_addr_t trace[100];
- int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace));
- OS::PrintError("\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- OS::PrintError("(empty)\n");
- } else {
- bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
- out, sizeof(out), NULL);
- OS::PrintError(out);
- }
- bt_unload_memmap(&memmap);
- bt_release_accessor(&acc);
-#endif // V8_LIBC_GLIBC || V8_OS_BSD
-}
-
} } // namespace v8::internal
-// Contains protection against recursive calls (faults while handling faults).
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
- fflush(stdout);
- fflush(stderr);
- i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n");
- v8::internal::DumpBacktrace();
- fflush(stderr);
- i::OS::Abort();
-}
-
-
void CheckEqualsHelper(const char* file,
int line,
const char* expected_source,
#ifndef V8_CHECKS_H_
#define V8_CHECKS_H_
-#include <string.h>
-
-#include "include/v8stdint.h"
-#include "src/base/build_config.h"
-
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
-
-
-// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
-// development, but they should not be relied on in the final product.
-#ifdef DEBUG
-#define FATAL(msg) \
- V8_Fatal(__FILE__, __LINE__, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal(__FILE__, __LINE__, "unimplemented code")
-#define UNREACHABLE() \
- V8_Fatal(__FILE__, __LINE__, "unreachable code")
-#else
-#define FATAL(msg) \
- V8_Fatal("", 0, "%s", (msg))
-#define UNIMPLEMENTED() \
- V8_Fatal("", 0, "unimplemented code")
-#define UNREACHABLE() ((void) 0)
-#endif
+#include "src/base/logging.h"
// Simulator specific helpers.
// We can't use USE_SIMULATOR here because it isn't defined yet.
#endif
-// The CHECK macro checks that the given condition is true; if not, it
-// prints a message to stderr and aborts.
-#define CHECK(condition) do { \
- if (!(condition)) { \
- V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
- } \
- } while (0)
-
-
-// Helper function used by the CHECK_EQ function when given int
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source, int expected,
- const char* value_source, int value) {
- if (expected != value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
- expected_source, value_source, expected, value);
- }
-}
-
-
-// Helper function used by the CHECK_EQ function when given int64_t
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
- const char* expected_source,
- int64_t expected,
- const char* value_source,
- int64_t value) {
- if (expected != value) {
- // Print int64_t values in hex, as two int32s,
- // to avoid platform-dependencies.
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n#"
- " Expected: 0x%08x%08x\n# Found: 0x%08x%08x",
- expected_source, value_source,
- static_cast<uint32_t>(expected >> 32),
- static_cast<uint32_t>(expected),
- static_cast<uint32_t>(value >> 32),
- static_cast<uint32_t>(value));
- }
-}
-
-
-// Helper function used by the CHECK_NE function when given int
-// arguments. Should not be called directly.
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- int unexpected,
- const char* value_source,
- int value) {
- if (unexpected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
- unexpected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given string
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
- if ((expected == NULL && value != NULL) ||
- (expected != NULL && value == NULL) ||
- (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
- expected_source, value_source, expected, value);
- }
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const char* expected,
- const char* value_source,
- const char* value) {
- if (expected == value ||
- (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s",
- expected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given pointer
-// arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
- if (expected != value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
- expected_source, value_source,
- expected, value);
- }
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- const void* expected,
- const char* value_source,
- const void* value) {
- if (expected == value) {
- V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
- expected_source, value_source, value);
- }
-}
-
-
-// Helper function used by the CHECK function when given floating
-// point arguments. Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- double expected,
- const char* value_source,
- double value) {
- // Force values to 64 bit memory to truncate 80 bit precision on IA32.
- volatile double* exp = new double[1];
- *exp = expected;
- volatile double* val = new double[1];
- *val = value;
- if (*exp != *val) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
- expected_source, value_source, *exp, *val);
- }
- delete[] exp;
- delete[] val;
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- int64_t expected,
- const char* value_source,
- int64_t value) {
- if (expected == value) {
- V8_Fatal(file, line,
- "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
- expected_source, value_source, expected, value);
- }
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- double expected,
- const char* value_source,
- double value) {
- // Force values to 64 bit memory to truncate 80 bit precision on IA32.
- volatile double* exp = new double[1];
- *exp = expected;
- volatile double* val = new double[1];
- *val = value;
- if (*exp == *val) {
- V8_Fatal(file, line,
- "CHECK_NE(%s, %s) failed\n# Value: %f",
- expected_source, value_source, *val);
- }
- delete[] exp;
- delete[] val;
-}
-
-
-#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
- #expected, expected, #value, value)
-
-
-#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
- #unexpected, unexpected, #value, value)
-
-
-#define CHECK_GT(a, b) CHECK((a) > (b))
-#define CHECK_GE(a, b) CHECK((a) >= (b))
-#define CHECK_LT(a, b) CHECK((a) < (b))
-#define CHECK_LE(a, b) CHECK((a) <= (b))
-
-
#ifdef DEBUG
#ifndef OPTIMIZED_DEBUG
#define ENABLE_SLOW_ASSERTS 1
#endif
namespace v8 {
+
+class Value;
+template <class T> class Handle;
+
namespace internal {
+
+intptr_t HeapObjectTagMask();
+
#ifdef ENABLE_SLOW_ASSERTS
#define SLOW_ASSERT(condition) \
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
const bool FLAG_enable_slow_asserts = false;
#endif
-// Exposed for making debugging easier (to see where your function is being
-// called, just add a call to DumpBacktrace).
-void DumpBacktrace();
-
} } // namespace v8::internal
-// The ASSERT macro is equivalent to CHECK except that it only
-// generates code in debug builds.
-#ifdef DEBUG
-#define ASSERT_RESULT(expr) CHECK(expr)
-#define ASSERT(condition) CHECK(condition)
-#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
-#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
-#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
-#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
-#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#else
-#define ASSERT_RESULT(expr) (expr)
-#define ASSERT(condition) ((void) 0)
-#define ASSERT_EQ(v1, v2) ((void) 0)
-#define ASSERT_NE(v1, v2) ((void) 0)
-#define ASSERT_GE(v1, v2) ((void) 0)
-#define ASSERT_LT(v1, v2) ((void) 0)
-#define ASSERT_LE(v1, v2) ((void) 0)
-#endif
+void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* unexpected_source,
+ v8::Handle<v8::Value> unexpected,
+ const char* value_source,
+ v8::Handle<v8::Value> value);
-#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
+void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ v8::Handle<v8::Value> expected,
+ const char* value_source,
+ v8::Handle<v8::Value> value);
-// "Extra checks" are lightweight checks that are enabled in some release
-// builds.
-#ifdef ENABLE_EXTRA_CHECKS
-#define EXTRA_CHECK(condition) CHECK(condition)
-#else
-#define EXTRA_CHECK(condition) ((void) 0)
-#endif
+#define ASSERT_TAG_ALIGNED(address) \
+ ASSERT((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0)
+
+#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & HeapObjectTagMask()) == 0)
#endif // V8_CHECKS_H_
ASSERT(!descriptor->stack_parameter_count().is_valid());
return stub->GenerateLightweightMissCode();
}
- ElapsedTimer timer;
+ base::ElapsedTimer timer;
if (FLAG_profile_hydrogen_code_stub_compilation) {
timer.Start();
}
// performance of the hydrogen-based compiler.
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
- ElapsedTimer timer;
+ base::ElapsedTimer timer;
if (FLAG_hydrogen_stats) {
timer.Start();
}
: (FLAG_trace_hydrogen &&
info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter));
return (tracing_on &&
- OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
+ base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
}
} } // namespace v8::internal
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
- TimeDelta time_taken_to_create_graph_;
- TimeDelta time_taken_to_optimize_;
- TimeDelta time_taken_to_codegen_;
+ base::TimeDelta time_taken_to_create_graph_;
+ base::TimeDelta time_taken_to_optimize_;
+ base::TimeDelta time_taken_to_codegen_;
Status last_status_;
bool awaiting_install_;
void RecordOptimizationStats();
struct Timer {
- Timer(OptimizedCompileJob* job, TimeDelta* location)
+ Timer(OptimizedCompileJob* job, base::TimeDelta* location)
: job_(job), location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
}
OptimizedCompileJob* job_;
- ElapsedTimer timer_;
- TimeDelta* location_;
+ base::ElapsedTimer timer_;
+ base::TimeDelta* location_;
};
};
CompilationInfo* info_;
Zone zone_;
unsigned info_zone_start_allocation_size_;
- ElapsedTimer timer_;
+ base::ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
// ----------------------------------------------------------------------------
// Extra POSIX/ANSI functions for Win32/MSVC.
+#include "src/base/platform/platform.h"
#include "src/conversions.h"
#include "src/double.h"
-#include "src/platform.h"
#include "src/scanner.h"
#include "src/strtod.h"
#include <limits>
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/utils.h"
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/counters.h"
#include "src/isolate.h"
-#include "src/platform.h"
namespace v8 {
namespace internal {
#include "include/v8.h"
#include "src/allocation.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/globals.h"
#include "src/objects.h"
-#include "src/platform/elapsed-timer.h"
namespace v8 {
namespace internal {
// TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
#ifdef DEBUG
- ElapsedTimer* timer() { return &timer_; }
+ base::ElapsedTimer* timer() { return &timer_; }
#endif
private:
- ElapsedTimer timer_;
+ base::ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.
ProfilerEventsProcessor::ProfilerEventsProcessor(
ProfileGenerator* generator,
Sampler* sampler,
- TimeDelta period)
+ base::TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
sampler_(sampler),
void ProfilerEventsProcessor::Run() {
while (running_) {
- ElapsedTimer timer;
+ base::ElapsedTimer timer;
timer.Start();
// Keep processing existing events until we need to do next sample.
do {
CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
- sampling_interval_(TimeDelta::FromMicroseconds(
+ sampling_interval_(base::TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(new CpuProfilesCollection(isolate->heap())),
generator_(NULL),
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
- sampling_interval_(TimeDelta::FromMicroseconds(
+ sampling_interval_(base::TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
generator_(test_generator),
}
-void CpuProfiler::set_sampling_interval(TimeDelta value) {
+void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
ASSERT(!is_profiling_);
sampling_interval_ = value;
}
#include "src/allocation.h"
#include "src/base/atomicops.h"
+#include "src/base/platform/time.h"
#include "src/circular-queue.h"
-#include "src/platform/time.h"
#include "src/sampler.h"
#include "src/unbound-queue.h"
// This class implements both the profile events processor thread and
// methods called by event producers: VM and stack sampler threads.
-class ProfilerEventsProcessor : public Thread {
+class ProfilerEventsProcessor : public base::Thread {
public:
ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
- TimeDelta period);
+ base::TimeDelta period);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
Sampler* sampler_;
bool running_;
// Sampling period in microseconds.
- const TimeDelta period_;
+ const base::TimeDelta period_;
UnboundQueue<CodeEventsContainer> events_buffer_;
static const size_t kTickSampleBufferSize = 1 * MB;
static const size_t kTickSampleQueueLength =
virtual ~CpuProfiler();
- void set_sampling_interval(TimeDelta value);
+ void set_sampling_interval(base::TimeDelta value);
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
void LogBuiltins();
Isolate* isolate_;
- TimeDelta sampling_interval_;
+ base::TimeDelta sampling_interval_;
CpuProfilesCollection* profiles_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/cpu.h"
-
-#if V8_LIBC_MSVCRT
-#include <intrin.h> // __cpuid()
-#endif
-#if V8_OS_POSIX
-#include <unistd.h> // sysconf()
-#endif
-#if V8_OS_QNX
-#include <sys/syspage.h> // cpuinfo
-#endif
-
-#include <ctype.h>
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <algorithm>
-
-#include "src/checks.h"
-#if V8_OS_WIN
-#include "src/base/win32-headers.h" // NOLINT
-#endif
-
-namespace v8 {
-namespace internal {
-
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-
-// Define __cpuid() for non-MSVC libraries.
-#if !V8_LIBC_MSVCRT
-
-static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
-#if defined(__i386__) && defined(__pic__)
- // Make sure to preserve ebx, which contains the pointer
- // to the GOT in case we're generating PIC.
- __asm__ volatile (
- "mov %%ebx, %%edi\n\t"
- "cpuid\n\t"
- "xchg %%edi, %%ebx\n\t"
- : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
- : "a"(info_type)
- );
-#else
- __asm__ volatile (
- "cpuid \n\t"
- : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
- : "a"(info_type)
- );
-#endif // defined(__i386__) && defined(__pic__)
-}
-
-#endif // !V8_LIBC_MSVCRT
-
-#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS
-
-#if V8_OS_LINUX
-
-#if V8_HOST_ARCH_ARM
-
-// See <uapi/asm/hwcap.h> kernel header.
-/*
- * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
- */
-#define HWCAP_SWP (1 << 0)
-#define HWCAP_HALF (1 << 1)
-#define HWCAP_THUMB (1 << 2)
-#define HWCAP_26BIT (1 << 3) /* Play it safe */
-#define HWCAP_FAST_MULT (1 << 4)
-#define HWCAP_FPA (1 << 5)
-#define HWCAP_VFP (1 << 6)
-#define HWCAP_EDSP (1 << 7)
-#define HWCAP_JAVA (1 << 8)
-#define HWCAP_IWMMXT (1 << 9)
-#define HWCAP_CRUNCH (1 << 10)
-#define HWCAP_THUMBEE (1 << 11)
-#define HWCAP_NEON (1 << 12)
-#define HWCAP_VFPv3 (1 << 13)
-#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
-#define HWCAP_TLS (1 << 15)
-#define HWCAP_VFPv4 (1 << 16)
-#define HWCAP_IDIVA (1 << 17)
-#define HWCAP_IDIVT (1 << 18)
-#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
-#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
-#define HWCAP_LPAE (1 << 20)
-
-#define AT_HWCAP 16
-
-// Read the ELF HWCAP flags by parsing /proc/self/auxv.
-static uint32_t ReadELFHWCaps() {
- uint32_t result = 0;
- FILE* fp = fopen("/proc/self/auxv", "r");
- if (fp != NULL) {
- struct { uint32_t tag; uint32_t value; } entry;
- for (;;) {
- size_t n = fread(&entry, sizeof(entry), 1, fp);
- if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
- break;
- }
- if (entry.tag == AT_HWCAP) {
- result = entry.value;
- break;
- }
- }
- fclose(fp);
- }
- return result;
-}
-
-#endif // V8_HOST_ARCH_ARM
-
-// Extract the information exposed by the kernel via /proc/cpuinfo.
-class CPUInfo V8_FINAL BASE_EMBEDDED {
- public:
- CPUInfo() : datalen_(0) {
- // Get the size of the cpuinfo file by reading it until the end. This is
- // required because files under /proc do not always return a valid size
- // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
- static const char PATHNAME[] = "/proc/cpuinfo";
- FILE* fp = fopen(PATHNAME, "r");
- if (fp != NULL) {
- for (;;) {
- char buffer[256];
- size_t n = fread(buffer, 1, sizeof(buffer), fp);
- if (n == 0) {
- break;
- }
- datalen_ += n;
- }
- fclose(fp);
- }
-
- // Read the contents of the cpuinfo file.
- data_ = new char[datalen_ + 1];
- fp = fopen(PATHNAME, "r");
- if (fp != NULL) {
- for (size_t offset = 0; offset < datalen_; ) {
- size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
- if (n == 0) {
- break;
- }
- offset += n;
- }
- fclose(fp);
- }
-
- // Zero-terminate the data.
- data_[datalen_] = '\0';
- }
-
- ~CPUInfo() {
- delete[] data_;
- }
-
- // Extract the content of a the first occurence of a given field in
- // the content of the cpuinfo file and return it as a heap-allocated
- // string that must be freed by the caller using delete[].
- // Return NULL if not found.
- char* ExtractField(const char* field) const {
- ASSERT(field != NULL);
-
- // Look for first field occurence, and ensure it starts the line.
- size_t fieldlen = strlen(field);
- char* p = data_;
- for (;;) {
- p = strstr(p, field);
- if (p == NULL) {
- return NULL;
- }
- if (p == data_ || p[-1] == '\n') {
- break;
- }
- p += fieldlen;
- }
-
- // Skip to the first colon followed by a space.
- p = strchr(p + fieldlen, ':');
- if (p == NULL || !isspace(p[1])) {
- return NULL;
- }
- p += 2;
-
- // Find the end of the line.
- char* q = strchr(p, '\n');
- if (q == NULL) {
- q = data_ + datalen_;
- }
-
- // Copy the line into a heap-allocated buffer.
- size_t len = q - p;
- char* result = new char[len + 1];
- if (result != NULL) {
- memcpy(result, p, len);
- result[len] = '\0';
- }
- return result;
- }
-
- private:
- char* data_;
- size_t datalen_;
-};
-
-#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
-
-// Checks that a space-separated list of items contains one given 'item'.
-static bool HasListItem(const char* list, const char* item) {
- ssize_t item_len = strlen(item);
- const char* p = list;
- if (p != NULL) {
- while (*p != '\0') {
- // Skip whitespace.
- while (isspace(*p)) ++p;
-
- // Find end of current list item.
- const char* q = p;
- while (*q != '\0' && !isspace(*q)) ++q;
-
- if (item_len == q - p && memcmp(p, item, item_len) == 0) {
- return true;
- }
-
- // Skip to next item.
- p = q;
- }
- }
- return false;
-}
-
-#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
-
-#endif // V8_OS_LINUX
-
-#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-
-CPU::CPU() : stepping_(0),
- model_(0),
- ext_model_(0),
- family_(0),
- ext_family_(0),
- type_(0),
- implementer_(0),
- architecture_(0),
- part_(0),
- has_fpu_(false),
- has_cmov_(false),
- has_sahf_(false),
- has_mmx_(false),
- has_sse_(false),
- has_sse2_(false),
- has_sse3_(false),
- has_ssse3_(false),
- has_sse41_(false),
- has_sse42_(false),
- has_idiva_(false),
- has_neon_(false),
- has_thumb2_(false),
- has_vfp_(false),
- has_vfp3_(false),
- has_vfp3_d32_(false) {
- memcpy(vendor_, "Unknown", 8);
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- int cpu_info[4];
-
- // __cpuid with an InfoType argument of 0 returns the number of
- // valid Ids in CPUInfo[0] and the CPU identification string in
- // the other three array elements. The CPU identification string is
- // not in linear order. The code below arranges the information
- // in a human readable form. The human readable order is CPUInfo[1] |
- // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
- // before using memcpy to copy these three array elements to cpu_string.
- __cpuid(cpu_info, 0);
- unsigned num_ids = cpu_info[0];
- std::swap(cpu_info[2], cpu_info[3]);
- memcpy(vendor_, cpu_info + 1, 12);
- vendor_[12] = '\0';
-
- // Interpret CPU feature information.
- if (num_ids > 0) {
- __cpuid(cpu_info, 1);
- stepping_ = cpu_info[0] & 0xf;
- model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
- family_ = (cpu_info[0] >> 8) & 0xf;
- type_ = (cpu_info[0] >> 12) & 0x3;
- ext_model_ = (cpu_info[0] >> 16) & 0xf;
- ext_family_ = (cpu_info[0] >> 20) & 0xff;
- has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
- has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
- has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
- has_sse_ = (cpu_info[3] & 0x02000000) != 0;
- has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
- has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
- has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
- has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
- has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
- }
-
-#if V8_HOST_ARCH_IA32
- // SAHF is always available in compat/legacy mode,
- has_sahf_ = true;
-#else
- // Query extended IDs.
- __cpuid(cpu_info, 0x80000000);
- unsigned num_ext_ids = cpu_info[0];
-
- // Interpret extended CPU feature information.
- if (num_ext_ids > 0x80000000) {
- __cpuid(cpu_info, 0x80000001);
- // SAHF must be probed in long mode.
- has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
- }
-#endif
-
-#elif V8_HOST_ARCH_ARM
-
-#if V8_OS_LINUX
-
- CPUInfo cpu_info;
-
- // Extract implementor from the "CPU implementer" field.
- char* implementer = cpu_info.ExtractField("CPU implementer");
- if (implementer != NULL) {
- char* end ;
- implementer_ = strtol(implementer, &end, 0);
- if (end == implementer) {
- implementer_ = 0;
- }
- delete[] implementer;
- }
-
- // Extract part number from the "CPU part" field.
- char* part = cpu_info.ExtractField("CPU part");
- if (part != NULL) {
- char* end ;
- part_ = strtol(part, &end, 0);
- if (end == part) {
- part_ = 0;
- }
- delete[] part;
- }
-
- // Extract architecture from the "CPU Architecture" field.
- // The list is well-known, unlike the the output of
- // the 'Processor' field which can vary greatly.
- // See the definition of the 'proc_arch' array in
- // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
- // same file.
- char* architecture = cpu_info.ExtractField("CPU architecture");
- if (architecture != NULL) {
- char* end;
- architecture_ = strtol(architecture, &end, 10);
- if (end == architecture) {
- architecture_ = 0;
- }
- delete[] architecture;
-
- // Unfortunately, it seems that certain ARMv6-based CPUs
- // report an incorrect architecture number of 7!
- //
- // See http://code.google.com/p/android/issues/detail?id=10812
- //
- // We try to correct this by looking at the 'elf_format'
- // field reported by the 'Processor' field, which is of the
- // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
- // an ARMv6-one. For example, the Raspberry Pi is one popular
- // ARMv6 device that reports architecture 7.
- if (architecture_ == 7) {
- char* processor = cpu_info.ExtractField("Processor");
- if (HasListItem(processor, "(v6l)")) {
- architecture_ = 6;
- }
- delete[] processor;
- }
- }
-
- // Try to extract the list of CPU features from ELF hwcaps.
- uint32_t hwcaps = ReadELFHWCaps();
- if (hwcaps != 0) {
- has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
- has_neon_ = (hwcaps & HWCAP_NEON) != 0;
- has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
- has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
- has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
- (hwcaps & HWCAP_VFPD32) != 0));
- } else {
- // Try to fallback to "Features" CPUInfo field.
- char* features = cpu_info.ExtractField("Features");
- has_idiva_ = HasListItem(features, "idiva");
- has_neon_ = HasListItem(features, "neon");
- has_thumb2_ = HasListItem(features, "thumb2");
- has_vfp_ = HasListItem(features, "vfp");
- if (HasListItem(features, "vfpv3d16")) {
- has_vfp3_ = true;
- } else if (HasListItem(features, "vfpv3")) {
- has_vfp3_ = true;
- has_vfp3_d32_ = true;
- }
- delete[] features;
- }
-
- // Some old kernels will report vfp not vfpv3. Here we make an attempt
- // to detect vfpv3 by checking for vfp *and* neon, since neon is only
- // available on architectures with vfpv3. Checking neon on its own is
- // not enough as it is possible to have neon without vfp.
- if (has_vfp_ && has_neon_) {
- has_vfp3_ = true;
- }
-
- // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
- if (architecture_ < 7 && has_vfp3_) {
- architecture_ = 7;
- }
-
- // ARMv7 implies Thumb2.
- if (architecture_ >= 7) {
- has_thumb2_ = true;
- }
-
- // The earliest architecture with Thumb2 is ARMv6T2.
- if (has_thumb2_ && architecture_ < 6) {
- architecture_ = 6;
- }
-
- // We don't support any FPUs other than VFP.
- has_fpu_ = has_vfp_;
-
-#elif V8_OS_QNX
-
- uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags;
- if (cpu_flags & ARM_CPU_FLAG_V7) {
- architecture_ = 7;
- has_thumb2_ = true;
- } else if (cpu_flags & ARM_CPU_FLAG_V6) {
- architecture_ = 6;
- // QNX doesn't say if Thumb2 is available.
- // Assume false for the architectures older than ARMv7.
- }
- ASSERT(architecture_ >= 6);
- has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
- has_vfp_ = has_fpu_;
- if (cpu_flags & ARM_CPU_FLAG_NEON) {
- has_neon_ = true;
- has_vfp3_ = has_vfp_;
-#ifdef ARM_CPU_FLAG_VFP_D32
- has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0;
-#endif
- }
- has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0;
-
-#endif // V8_OS_LINUX
-
-#elif V8_HOST_ARCH_MIPS
-
- // Simple detection of FPU at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to MIPS (early 2010), no similar
- // facility is universally available on the MIPS architectures,
- // so it's up to individual OSes to provide such.
- CPUInfo cpu_info;
- char* cpu_model = cpu_info.ExtractField("cpu model");
- has_fpu_ = HasListItem(cpu_model, "FPU");
- delete[] cpu_model;
-
-#elif V8_HOST_ARCH_ARM64
-
- CPUInfo cpu_info;
-
- // Extract implementor from the "CPU implementer" field.
- char* implementer = cpu_info.ExtractField("CPU implementer");
- if (implementer != NULL) {
- char* end ;
- implementer_ = strtol(implementer, &end, 0);
- if (end == implementer) {
- implementer_ = 0;
- }
- delete[] implementer;
- }
-
- // Extract part number from the "CPU part" field.
- char* part = cpu_info.ExtractField("CPU part");
- if (part != NULL) {
- char* end ;
- part_ = strtol(part, &end, 0);
- if (end == part) {
- part_ = 0;
- }
- delete[] part;
- }
-
-#endif
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2006-2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This module contains the architecture-specific code. This make the rest of
-// the code less dependent on differences between different processor
-// architecture.
-// The classes have the same definition for all architectures. The
-// implementation for a particular architecture is put in cpu_<arch>.cc.
-// The build system then uses the implementation for the target architecture.
-//
-
-#ifndef V8_CPU_H_
-#define V8_CPU_H_
-
-#include "src/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// CPU
-//
-// Query information about the processor.
-//
-// This class also has static methods for the architecture specific functions.
-// Add methods here to cope with differences between the supported
-// architectures. For each architecture the file cpu_<arch>.cc contains the
-// implementation of these static functions.
-
-class CPU V8_FINAL BASE_EMBEDDED {
- public:
- CPU();
-
- // x86 CPUID information
- const char* vendor() const { return vendor_; }
- int stepping() const { return stepping_; }
- int model() const { return model_; }
- int ext_model() const { return ext_model_; }
- int family() const { return family_; }
- int ext_family() const { return ext_family_; }
- int type() const { return type_; }
-
- // arm implementer/part information
- int implementer() const { return implementer_; }
- static const int ARM = 0x41;
- static const int NVIDIA = 0x4e;
- static const int QUALCOMM = 0x51;
- int architecture() const { return architecture_; }
- int part() const { return part_; }
- static const int ARM_CORTEX_A5 = 0xc05;
- static const int ARM_CORTEX_A7 = 0xc07;
- static const int ARM_CORTEX_A8 = 0xc08;
- static const int ARM_CORTEX_A9 = 0xc09;
- static const int ARM_CORTEX_A12 = 0xc0c;
- static const int ARM_CORTEX_A15 = 0xc0f;
-
- // General features
- bool has_fpu() const { return has_fpu_; }
-
- // x86 features
- bool has_cmov() const { return has_cmov_; }
- bool has_sahf() const { return has_sahf_; }
- bool has_mmx() const { return has_mmx_; }
- bool has_sse() const { return has_sse_; }
- bool has_sse2() const { return has_sse2_; }
- bool has_sse3() const { return has_sse3_; }
- bool has_ssse3() const { return has_ssse3_; }
- bool has_sse41() const { return has_sse41_; }
- bool has_sse42() const { return has_sse42_; }
-
- // arm features
- bool has_idiva() const { return has_idiva_; }
- bool has_neon() const { return has_neon_; }
- bool has_thumb2() const { return has_thumb2_; }
- bool has_vfp() const { return has_vfp_; }
- bool has_vfp3() const { return has_vfp3_; }
- bool has_vfp3_d32() const { return has_vfp3_d32_; }
-
- // Flush instruction cache.
- static void FlushICache(void* start, size_t size);
-
- private:
- char vendor_[13];
- int stepping_;
- int model_;
- int ext_model_;
- int family_;
- int ext_family_;
- int type_;
- int implementer_;
- int architecture_;
- int part_;
- bool has_fpu_;
- bool has_cmov_;
- bool has_sahf_;
- bool has_mmx_;
- bool has_sse_;
- bool has_sse2_;
- bool has_sse3_;
- bool has_ssse3_;
- bool has_sse41_;
- bool has_sse42_;
- bool has_idiva_;
- bool has_neon_;
- bool has_thumb2_;
- bool has_vfp_;
- bool has_vfp3_;
- bool has_vfp3_d32_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_CPU_H_
#ifndef V8_SHARED
#include "src/api.h"
-#include "src/checks.h"
-#include "src/cpu.h"
+#include "src/base/cpu.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
#include "src/d8-debug.h"
#include "src/debug.h"
#include "src/natives.h"
-#include "src/platform.h"
#include "src/v8.h"
#endif // !V8_SHARED
#ifndef V8_SHARED
CounterMap* Shell::counter_map_;
-i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
+base::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
-i::Mutex Shell::context_mutex_;
-const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow();
+base::Mutex Shell::context_mutex_;
+const base::TimeTicks Shell::kInitialTicks =
+ base::TimeTicks::HighResolutionNow();
Persistent<Context> Shell::utility_context_;
#endif // !V8_SHARED
args.GetReturnValue().Set(heap->synthetic_time());
} else {
- i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
+ base::TimeDelta delta =
+ base::TimeTicks::HighResolutionNow() - kInitialTicks;
args.GetReturnValue().Set(delta.InMillisecondsF());
}
}
void Shell::MapCounters(const char* name) {
- counters_file_ = i::OS::MemoryMappedFile::create(
+ counters_file_ = base::OS::MemoryMappedFile::create(
name, sizeof(CounterCollection), &local_counters_);
void* memory = (counters_file_ == NULL) ?
NULL : counters_file_->memory();
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
- i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&context_mutex_);
#endif // !V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
#ifndef V8_SHARED
-i::Thread::Options SourceGroup::GetThreadOptions() {
+base::Thread::Options SourceGroup::GetThreadOptions() {
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less
// which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some
// OS-specific padding for thread startup code. 2Mbytes seems to be enough.
- return i::Thread::Options("IsolateThread", 2 * MB);
+ return base::Thread::Options("IsolateThread", 2 * MB);
}
Isolate* isolate = Isolate::New();
#ifndef V8_SHARED
v8::ResourceConstraints constraints;
- constraints.ConfigureDefaults(i::OS::TotalPhysicalMemory(),
- i::OS::MaxVirtualMemory(),
- i::OS::NumberOfProcessorsOnline());
+ constraints.ConfigureDefaults(base::OS::TotalPhysicalMemory(),
+ base::OS::MaxVirtualMemory(),
+ base::OS::NumberOfProcessorsOnline());
v8::SetResourceConstraints(isolate, &constraints);
#endif
DumbLineEditor dumb_line_editor(isolate);
void WaitForThread();
private:
- class IsolateThread : public i::Thread {
+ class IsolateThread : public base::Thread {
public:
explicit IsolateThread(SourceGroup* group)
- : i::Thread(GetThreadOptions()), group_(group) {}
+ : base::Thread(GetThreadOptions()), group_(group) {}
virtual void Run() {
group_->ExecuteInThread();
SourceGroup* group_;
};
- static i::Thread::Options GetThreadOptions();
+ static base::Thread::Options GetThreadOptions();
void ExecuteInThread();
- i::Semaphore next_semaphore_;
- i::Semaphore done_semaphore_;
- i::Thread* thread_;
+ base::Semaphore next_semaphore_;
+ base::Semaphore done_semaphore_;
+ base::Thread* thread_;
#endif // !V8_SHARED
void ExitShell(int exit_code);
// don't want to store the stats in a memory-mapped file
static CounterCollection local_counters_;
static CounterCollection* counters_;
- static i::OS::MemoryMappedFile* counters_file_;
- static i::Mutex context_mutex_;
- static const i::TimeTicks kInitialTicks;
+ static base::OS::MemoryMappedFile* counters_file_;
+ static base::Mutex context_mutex_;
+ static const base::TimeTicks kInitialTicks;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
after_ = &dst_[1];
local_offset_ms_ = kInvalidLocalOffsetInMs;
ymd_valid_ = false;
- OS::ClearTimezoneCache(tz_cache_);
+ base::OS::ClearTimezoneCache(tz_cache_);
}
#define V8_DATE_H_
#include "src/allocation.h"
+#include "src/base/platform/platform.h"
#include "src/globals.h"
-#include "src/platform.h"
namespace v8 {
// It is an invariant of DateCache that cache stamp is non-negative.
static const int kInvalidStamp = -1;
- DateCache() : stamp_(0), tz_cache_(OS::CreateTimezoneCache()) {
+ DateCache() : stamp_(0), tz_cache_(base::OS::CreateTimezoneCache()) {
ResetDateCache();
}
virtual ~DateCache() {
- OS::DisposeTimezoneCache(tz_cache_);
+ base::OS::DisposeTimezoneCache(tz_cache_);
tz_cache_ = NULL;
}
if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
time_ms = EquivalentTime(time_ms);
}
- return OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_);
+ return base::OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_);
}
// ECMA 262 - 15.9.5.26
// These functions are virtual so that we can override them when testing.
virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
double time_ms = static_cast<double>(time_sec * 1000);
- return static_cast<int>(OS::DaylightSavingsOffset(time_ms, tz_cache_));
+ return static_cast<int>(
+ base::OS::DaylightSavingsOffset(time_ms, tz_cache_));
}
virtual int GetLocalOffsetFromOS() {
- double offset = OS::LocalTimeOffset(tz_cache_);
+ double offset = base::OS::LocalTimeOffset(tz_cache_);
ASSERT(offset < kInvalidLocalOffsetInMs);
return static_cast<int>(offset);
}
int ymd_month_;
int ymd_day_;
- TimezoneCache* tz_cache_;
+ base::TimezoneCache* tz_cache_;
};
} } // namespace v8::internal
bool LockingCommandMessageQueue::IsEmpty() const {
- LockGuard<Mutex> lock_guard(&mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
return queue_.IsEmpty();
}
CommandMessage LockingCommandMessageQueue::Get() {
- LockGuard<Mutex> lock_guard(&mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
CommandMessage result = queue_.Get();
logger_->DebugEvent("Get", result.text());
return result;
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- LockGuard<Mutex> lock_guard(&mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
queue_.Put(message);
logger_->DebugEvent("Put", message.text());
}
void LockingCommandMessageQueue::Clear() {
- LockGuard<Mutex> lock_guard(&mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
queue_.Clear();
}
#include "src/allocation.h"
#include "src/arguments.h"
#include "src/assembler.h"
+#include "src/base/platform/platform.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/flags.h"
#include "src/frames-inl.h"
#include "src/hashmap.h"
#include "src/liveedit.h"
-#include "src/platform.h"
#include "src/string-stream.h"
#include "src/v8threads.h"
private:
Logger* logger_;
CommandMessageQueue queue_;
- mutable Mutex mutex_;
+ mutable base::Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
v8::Debug::MessageHandler message_handler_;
static const int kQueueInitialSize = 4;
- Semaphore command_received_; // Signaled for each command received.
+ base::Semaphore command_received_; // Signaled for each command received.
LockingCommandMessageQueue command_queue_;
LockingCommandMessageQueue event_command_queue_;
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
- OS::CommitPageSize(),
+ base::OS::CommitPageSize(),
#if defined(__native_client__)
// The Native Client port of V8 uses an interpreter,
// so code pages don't need PROT_EXEC.
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
- int commit_page_size = static_cast<int>(OS::CommitPageSize());
+ int commit_page_size = static_cast<int>(base::OS::CommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
- ElapsedTimer timer;
+ base::ElapsedTimer timer;
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
chunk->CommitArea(desc.instr_size);
CopyBytes(chunk->area_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
- CPU::FlushICache(chunk->area_start(), desc.instr_size);
+ CpuFeatures::FlushICache(chunk->area_start(), desc.instr_size);
data->deopt_entry_code_entries_[type] = entry_count;
}
// found in the LICENSE file.
#include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/diy-fp.h"
#include "src/globals.h"
#include <cmath>
#include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/utils.h"
#include "src/dtoa.h"
#ifndef V8_ELEMENTS_KIND_H_
#define V8_ELEMENTS_KIND_H_
-#include "src/v8checks.h"
+#include "src/checks.h"
namespace v8 {
namespace internal {
// found in the LICENSE file.
#include "src/extensions/free-buffer-extension.h"
-#include "src/platform.h"
+
+#include "src/base/platform/platform.h"
#include "src/v8.h"
namespace v8 {
// found in the LICENSE file.
#include "src/extensions/gc-extension.h"
-#include "src/platform.h"
+
+#include "src/base/platform/platform.h"
namespace v8 {
namespace internal {
// found in the LICENSE file.
#include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/utils.h"
#include "src/fast-dtoa.h"
#include <cmath>
#include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/utils.h"
#include "src/double.h"
#include "src/v8.h"
#include "src/assembler.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "src/smart-pointers.h"
#include "src/string-stream.h"
#ifdef ENABLE_GDB_JIT_INTERFACE
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/compiler.h"
#include "src/frames-inl.h"
#include "src/global-handles.h"
#include "src/messages.h"
#include "src/natives.h"
-#include "src/platform.h"
#include "src/scopes.h"
namespace v8 {
}
-static LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
+static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
void GDBJITInterface::AddCode(const char* name,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
- LockGuard<Mutex> lock_guard(mutex.Pointer());
+ base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
DisallowHeapAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
- LockGuard<Mutex> lock_guard(mutex.Pointer());
+ base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
- LockGuard<Mutex> lock_guard(mutex.Pointer());
+ base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
#include "include/v8stdint.h"
#include "src/base/build_config.h"
+#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/checks.h"
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
// warning flag and certain versions of GCC due to a bug:
#endif
namespace v8 {
+
+namespace base {
+class Mutex;
+class RecursiveMutex;
+class VirtualMemory;
+}
+
namespace internal {
// Determine whether we are running in a simulated environment.
const int kCodeZapValue = 0xbadc0de;
-// Number of bits to represent the page size for paged spaces. The value of 20
-// gives 1Mb bytes per page.
-const int kPageSizeBits = 20;
-
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
// used for aligning data, it doesn't hurt to align on a greater value.
class RelocInfo;
class Deserializer;
class MessageLocation;
-class VirtualMemory;
-class Mutex;
-class RecursiveMutex;
typedef bool (*WeakSlotCallback)(Object** pointer);
#define V8_HASHMAP_H_
#include "src/allocation.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/utils.h"
namespace v8 {
#include <cmath>
+#include "src/base/platform/platform.h"
#include "src/cpu-profiler.h"
#include "src/heap.h"
#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/list-inl.h"
#include "src/objects.h"
-#include "src/platform.h"
#include "src/store-buffer.h"
#include "src/store-buffer-inl.h"
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ",
- self_size(), id(), indent, ' ', prefix, edge_name);
+ base::OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ", self_size(), id(), indent,
+ ' ', prefix, edge_name);
if (type() != kString) {
- OS::Print("%s %.40s\n", TypeAsString(), name_);
+ base::OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
- OS::Print("\"");
+ base::OS::Print("\"");
const char* c = name_;
while (*c && (c - name_) <= 40) {
if (*c != '\n')
- OS::Print("%c", *c);
+ base::OS::Print("%c", *c);
else
- OS::Print("\\n");
+ base::OS::Print("\\n");
++c;
}
- OS::Print("\"\n");
+ base::OS::Print("\"\n");
}
if (--max_depth == 0) return;
Vector<HeapGraphEdge*> ch = children();
#include "src/accessors.h"
#include "src/api.h"
#include "src/base/once.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/snapshot.h"
#include "src/store-buffer.h"
#include "src/utils.h"
-#include "src/utils/random-number-generator.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED));
ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
- set_nan_value(*factory->NewHeapNumber(OS::nan_value(), TENURED));
+ set_nan_value(*factory->NewHeapNumber(base::OS::nan_value(), TENURED));
set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED));
// The hole has not been created yet, but we want to put something
*stats->memory_allocator_capacity =
isolate()->memory_allocator()->Size() +
isolate()->memory_allocator()->Available();
- *stats->os_error = OS::GetLastError();
+ *stats->os_error = base::OS::GetLastError();
isolate()->memory_allocator()->Available();
if (take_snapshot) {
HeapIterator iterator(this);
gc_reason_(gc_reason),
collector_reason_(collector_reason) {
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
- start_time_ = OS::TimeCurrentMillis();
+ start_time_ = base::OS::TimeCurrentMillis();
start_object_size_ = heap_->SizeOfObjects();
start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
- heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
+ heap_->last_gc_end_timestamp_ = base::OS::TimeCurrentMillis();
double time = heap_->last_gc_end_timestamp_ - start_time_;
}
-static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
+static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
void Heap::CheckpointObjectStats() {
- LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
+ base::LockGuard<base::Mutex> lock_guard(
+ checkpoint_object_stats_mutex.Pointer());
Counters* counters = isolate()->counters();
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
counters->count_of_##name()->Increment( \
MemoryChunk* chunks_queued_for_free_;
- Mutex relocation_mutex_;
+ base::Mutex relocation_mutex_;
int gc_callbacks_depth_;
Scope(GCTracer* tracer, ScopeId scope)
: tracer_(tracer),
scope_(scope) {
- start_time_ = OS::TimeCurrentMillis();
+ start_time_ = base::OS::TimeCurrentMillis();
}
~Scope() {
ASSERT(scope_ < kNumberOfScopes); // scope_ is unsigned.
- tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
+ tracer_->scopes_[scope_] += base::OS::TimeCurrentMillis() - start_time_;
}
private:
if (!keep_new_check) {
if (FLAG_trace_bce) {
- OS::Print("Eliminating check #%d after tightening\n",
- new_check->id());
+ base::OS::Print("Eliminating check #%d after tightening\n",
+ new_check->id());
}
new_check->block()->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
: lower_check_;
if (FLAG_trace_bce) {
- OS::Print("Moving second check #%d after first check #%d\n",
- new_check->id(), first_check->id());
+ base::OS::Print("Moving second check #%d after first check #%d\n",
+ new_check->id(), first_check->id());
}
// The length is guaranteed to be live at first_check.
ASSERT(new_check->length() == first_check->length());
original_check->ReplaceAllUsesWith(original_check->index());
original_check->SetOperandAt(0, tighter_check->index());
if (FLAG_trace_bce) {
- OS::Print("Tightened check #%d with offset %d from #%d\n",
- original_check->id(), new_offset, tighter_check->id());
+ base::OS::Print("Tightened check #%d with offset %d from #%d\n",
+ original_check->id(), new_offset, tighter_check->id());
}
}
NULL);
*data_p = bb_data_list;
if (FLAG_trace_bce) {
- OS::Print("Fresh bounds check data for block #%d: [%d]\n",
- bb->block_id(), offset);
+ base::OS::Print("Fresh bounds check data for block #%d: [%d]\n",
+ bb->block_id(), offset);
}
} else if (data->OffsetIsCovered(offset)) {
bb->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
if (FLAG_trace_bce) {
- OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
- check->id(), offset);
+ base::OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
+ check->id(), offset);
}
check->DeleteAndReplaceWith(check->ActualValue());
} else if (data->BasicBlock() == bb) {
bb_data_list,
data);
if (FLAG_trace_bce) {
- OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
- bb->block_id(), new_lower_offset, new_upper_offset);
+ base::OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
+ bb->block_id(), new_lower_offset, new_upper_offset);
}
table_.Insert(key, bb_data_list, zone());
}
void TraceGVN(const char* msg, ...) {
va_list arguments;
va_start(arguments, msg);
- OS::VPrint(msg, arguments);
+ base::OS::VPrint(msg, arguments);
va_end(arguments);
}
res = handle->BooleanValue() ?
new(zone) HConstant(1) : new(zone) HConstant(0);
} else if (handle->IsUndefined()) {
- res = new(zone) HConstant(OS::nan_value());
+ res = new(zone) HConstant(base::OS::nan_value());
} else if (handle->IsNull()) {
res = new(zone) HConstant(0);
}
if (!constant->HasNumberValue()) break;
double d = constant->DoubleValue();
if (std::isnan(d)) { // NaN poisons everything.
- return H_CONSTANT_DOUBLE(OS::nan_value());
+ return H_CONSTANT_DOUBLE(base::OS::nan_value());
}
if (std::isinf(d)) { // +Infinity and -Infinity.
switch (op) {
return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
case kMathLog:
case kMathSqrt:
- return H_CONSTANT_DOUBLE((d > 0.0) ? d : OS::nan_value());
+ return H_CONSTANT_DOUBLE((d > 0.0) ? d : base::OS::nan_value());
case kMathPowHalf:
case kMathAbs:
return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
double result = power_helper(c_left->DoubleValue(),
c_right->DoubleValue());
- return H_CONSTANT_DOUBLE(std::isnan(result) ? OS::nan_value() : result);
+ return H_CONSTANT_DOUBLE(std::isnan(result) ? base::OS::nan_value()
+ : result);
}
}
return new(zone) HPower(left, right);
}
}
// All comparisons failed, must be NaN.
- return H_CONSTANT_DOUBLE(OS::nan_value());
+ return H_CONSTANT_DOUBLE(base::OS::nan_value());
}
}
return new(zone) HMathMinMax(context, left, right, op);
if (FLAG_trace_range) {
va_list arguments;
va_start(arguments, msg);
- OS::VPrint(msg, arguments);
+ base::OS::VPrint(msg, arguments);
va_end(arguments);
}
}
int32_t i = c_index->NumberValueAsInteger32();
Handle<String> s = c_string->StringValue();
if (i < 0 || i >= s->length()) {
- return New<HConstant>(OS::nan_value());
+ return New<HConstant>(base::OS::nan_value());
}
return New<HConstant>(s->Get(i));
}
PrintStringProperty("name", CodeStub::MajorName(major_key, false));
PrintStringProperty("method", "stub");
}
- PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
+ PrintLongProperty("date",
+ static_cast<int64_t>(base::OS::TimeCurrentMillis()));
}
void HStatistics::Print() {
PrintF("Timing results:\n");
- TimeDelta sum;
+ base::TimeDelta sum;
for (int i = 0; i < times_.length(); ++i) {
sum += times_[i];
}
PrintF("----------------------------------------"
"---------------------------------------\n");
- TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
+ base::TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Create graph",
create_graph_.InMillisecondsF(),
}
-void HStatistics::SaveTiming(const char* name, TimeDelta time, unsigned size) {
+void HStatistics::SaveTiming(const char* name, base::TimeDelta time,
+ unsigned size) {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
if (strcmp(names_[i], name) == 0) {
void Initialize(CompilationInfo* info);
void Print();
- void SaveTiming(const char* name, TimeDelta time, unsigned size);
+ void SaveTiming(const char* name, base::TimeDelta time, unsigned size);
- void IncrementFullCodeGen(TimeDelta full_code_gen) {
+ void IncrementFullCodeGen(base::TimeDelta full_code_gen) {
full_code_gen_ += full_code_gen;
}
- void IncrementSubtotals(TimeDelta create_graph,
- TimeDelta optimize_graph,
- TimeDelta generate_code) {
+ void IncrementSubtotals(base::TimeDelta create_graph,
+ base::TimeDelta optimize_graph,
+ base::TimeDelta generate_code) {
create_graph_ += create_graph;
optimize_graph_ += optimize_graph;
generate_code_ += generate_code;
}
private:
- List<TimeDelta> times_;
+ List<base::TimeDelta> times_;
List<const char*> names_;
List<unsigned> sizes_;
- TimeDelta create_graph_;
- TimeDelta optimize_graph_;
- TimeDelta generate_code_;
+ base::TimeDelta create_graph_;
+ base::TimeDelta optimize_graph_;
+ base::TimeDelta generate_code_;
unsigned total_size_;
- TimeDelta full_code_gen_;
+ base::TimeDelta full_code_gen_;
double source_size_;
};
if (FLAG_trace_hydrogen_file == NULL) {
SNPrintF(filename_,
"hydrogen-%d-%d.cfg",
- OS::GetCurrentProcessId(),
+ base::OS::GetCurrentProcessId(),
isolate_id);
} else {
StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length());
#include "src/ia32/assembler-ia32.h"
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/debug.h"
namespace v8 {
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == CODE_AGE_SEQUENCE) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
}
ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(p, sizeof(int32_t));
+ CpuFeatures::FlushICache(p, sizeof(int32_t));
}
}
#if V8_TARGET_ARCH_IA32
+#include "src/base/cpu.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
// Implementation of CpuFeatures
void CpuFeatures::ProbeImpl(bool cross_compile) {
- CPU cpu;
+ base::CPU cpu;
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
CHECK(cpu.has_cmov()); // CMOV support is mandatory.
}
// Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
+ CpuFeatures::FlushICache(pc_, instruction_count);
}
break;
}
ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
+ CpuFeatures::FlushICache(stub->instruction_start(), 7);
}
private:
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// esp[1 * kPointerSize]: raw double input
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
LabelConverter conv(buffer);
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CPU::FlushICache(sequence, young_length);
+ CpuFeatures::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
#if V8_TARGET_ARCH_IA32
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
// No need to flush the instruction cache on Intel. On Intel instruction
// cache flushing is only necessary when multiple cores running the same
// code simultaneously. V8 (and JavaScript) is single threaded and when code
#include "src/ia32/lithium-ia32.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/deoptimizer.h"
#include "src/ia32/lithium-gap-resolver-ia32.h"
#include "src/lithium-codegen.h"
}
// Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
and_(esp, -kFrameAlignment);
void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
}
call(function);
- if (OS::ActivationFrameAlignment() != 0) {
+ if (base::OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
add(esp, Immediate(num_arguments * kPointerSize));
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
}
}
- CPU::FlushICache(stub->instruction_start(), stub->instruction_size());
+ CpuFeatures::FlushICache(stub->instruction_start(), stub->instruction_size());
}
void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
if (marking_deque_memory_ == NULL) {
- marking_deque_memory_ = new VirtualMemory(4 * MB);
+ marking_deque_memory_ = new base::VirtualMemory(4 * MB);
}
if (!marking_deque_memory_committed_) {
bool success = marking_deque_memory_->Commit(
if (state() == MARKING) {
double start = 0.0;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
- start = OS::TimeCurrentMillis();
+ start = base::OS::TimeCurrentMillis();
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Hurry\n");
}
ProcessMarkingDeque();
state_ = COMPLETE;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
- double end = OS::TimeCurrentMillis();
+ double end = base::OS::TimeCurrentMillis();
double delta = end - start;
heap_->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
FLAG_print_cumulative_gc_stat) {
- start = OS::TimeCurrentMillis();
+ start = base::OS::TimeCurrentMillis();
}
if (state_ == SWEEPING) {
if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
FLAG_print_cumulative_gc_stat) {
- double end = OS::TimeCurrentMillis();
+ double end = base::OS::TimeCurrentMillis();
double delta = (end - start);
longest_step_ = Max(longest_step_, delta);
steps_took_ += delta;
State state_;
bool is_compacting_;
- VirtualMemory* marking_deque_memory_;
+ base::VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
#ifndef V8_ISOLATE_INL_H_
#define V8_ISOLATE_INL_H_
+#include "src/base/utils/random-number-generator.h"
#include "src/debug.h"
#include "src/isolate.h"
-#include "src/utils/random-number-generator.h"
namespace v8 {
namespace internal {
}
-RandomNumberGenerator* Isolate::random_number_generator() {
+base::RandomNumberGenerator* Isolate::random_number_generator() {
if (random_number_generator_ == NULL) {
if (FLAG_random_seed != 0) {
- random_number_generator_ = new RandomNumberGenerator(FLAG_random_seed);
+ random_number_generator_ =
+ new base::RandomNumberGenerator(FLAG_random_seed);
} else {
- random_number_generator_ = new RandomNumberGenerator();
+ random_number_generator_ = new base::RandomNumberGenerator();
}
}
return random_number_generator_;
#include "src/v8.h"
#include "src/ast.h"
+#include "src/base/platform/platform.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/lithium-allocator.h"
#include "src/log.h"
#include "src/messages.h"
-#include "src/platform.h"
#include "src/regexp-stack.h"
#include "src/runtime-profiler.h"
#include "src/sampler.h"
#include "src/spaces.h"
#include "src/stub-cache.h"
#include "src/sweeper-thread.h"
-#include "src/utils/random-number-generator.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
int ThreadId::GetCurrentThreadId() {
- int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
+ int thread_id = base::Thread::GetThreadLocalInt(Isolate::thread_id_key_);
if (thread_id == 0) {
thread_id = AllocateThreadId();
- Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
+ base::Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
}
return thread_id;
}
}
-Thread::LocalStorageKey Isolate::isolate_key_;
-Thread::LocalStorageKey Isolate::thread_id_key_;
-Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
+base::Thread::LocalStorageKey Isolate::isolate_key_;
+base::Thread::LocalStorageKey Isolate::thread_id_key_;
+base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
#ifdef DEBUG
-Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
+base::Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
#endif // DEBUG
-Mutex Isolate::process_wide_mutex_;
+base::Mutex Isolate::process_wide_mutex_;
// TODO(dcarney): Remove with default isolate.
enum DefaultIsolateStatus {
kDefaultIsolateUninitialized,
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
if (per_thread == NULL) {
per_thread = new PerIsolateThreadData(this, thread_id);
ThreadId thread_id) {
PerIsolateThreadData* per_thread = NULL;
{
- LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
}
return per_thread;
void Isolate::SetCrashIfDefaultIsolateInitialized() {
- LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&process_wide_mutex_);
CHECK(default_isolate_status_ != kDefaultIsolateInitialized);
default_isolate_status_ = kDefaultIsolateCrashIfInitialized;
}
void Isolate::EnsureDefaultIsolate() {
- LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&process_wide_mutex_);
CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized);
if (thread_data_table_ == NULL) {
- isolate_key_ = Thread::CreateThreadLocalKey();
- thread_id_key_ = Thread::CreateThreadLocalKey();
- per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
+ isolate_key_ = base::Thread::CreateThreadLocalKey();
+ thread_id_key_ = base::Thread::CreateThreadLocalKey();
+ per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
#ifdef DEBUG
- PerThreadAssertScopeBase::thread_local_key = Thread::CreateThreadLocalKey();
+ PerThreadAssertScopeBase::thread_local_key =
+ base::Thread::CreateThreadLocalKey();
#endif // DEBUG
thread_data_table_ = new Isolate::ThreadDataTable();
}
return stack_trace;
} else if (stack_trace_nesting_level_ == 1) {
stack_trace_nesting_level_++;
- OS::PrintError(
+ base::OS::PrintError(
"\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
+ base::OS::PrintError(
"If you are lucky you may find a partial stack dump on stdout.\n\n");
incomplete_message_->OutputToStdOut();
return factory()->empty_string();
} else {
- OS::Abort();
+ base::OS::Abort();
// Unreachable
return factory()->empty_string();
}
String::WriteToFlat(*trace, buffer, 0, length);
buffer[length] = '\0';
// TODO(dcarney): convert buffer to utf8?
- OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n",
- magic, magic2,
- static_cast<void*>(object), static_cast<void*>(map),
- reinterpret_cast<char*>(buffer));
- OS::Abort();
+ base::OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2,
+ static_cast<void*>(object), static_cast<void*>(map),
+ reinterpret_cast<char*>(buffer));
+ base::OS::Abort();
}
stack_trace_nesting_level_ = 0;
} else if (stack_trace_nesting_level_ == 1) {
stack_trace_nesting_level_++;
- OS::PrintError(
+ base::OS::PrintError(
"\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
+ base::OS::PrintError(
"If you are lucky you may find a partial stack dump on stdout.\n\n");
incomplete_message_->OutputToFile(out);
}
"%s\n\nFROM\n",
MessageHandler::GetLocalizedMessage(this, message_obj).get());
PrintCurrentStackTrace(stderr);
- OS::Abort();
+ base::OS::Abort();
}
} else if (location != NULL && !location->script().is_null()) {
// We are bootstrapping and caught an error where the location is set
int line_number =
location->script()->GetLineNumber(location->start_pos()) + 1;
if (exception->IsString() && location->script()->name()->IsString()) {
- OS::PrintError(
+ base::OS::PrintError(
"Extension or internal compilation error: %s in %s at line %d.\n",
String::cast(exception)->ToCString().get(),
String::cast(location->script()->name())->ToCString().get(),
line_number);
} else if (location->script()->name()->IsString()) {
- OS::PrintError(
+ base::OS::PrintError(
"Extension or internal compilation error in %s at line %d.\n",
String::cast(location->script()->name())->ToCString().get(),
line_number);
} else {
- OS::PrintError("Extension or internal compilation error.\n");
+ base::OS::PrintError("Extension or internal compilation error.\n");
}
#ifdef OBJECT_PRINT
// Since comments and empty lines have been stripped from the source of
Deinit();
- { LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ { base::LockGuard<base::Mutex> lock_guard(&process_wide_mutex_);
thread_data_table_->RemoveAllThreads(this);
}
void Isolate::SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data) {
- Thread::SetThreadLocal(isolate_key_, isolate);
- Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
+ base::Thread::SetThreadLocal(isolate_key_, isolate);
+ base::Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
}
// once ResourceConstraints becomes an argument to the Isolate constructor.
if (max_available_threads_ < 1) {
// Choose the default between 1 and 4.
- max_available_threads_ = Max(Min(OS::NumberOfProcessorsOnline(), 4), 1);
+ max_available_threads_ =
+ Max(Min(base::OS::NumberOfProcessorsOnline(), 4), 1);
}
if (!FLAG_job_based_sweeping) {
Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
state_ = INITIALIZED;
- time_millis_at_init_ = OS::TimeCurrentMillis();
+ time_millis_at_init_ = base::OS::TimeCurrentMillis();
if (!create_heap_objects) {
// Now that the heap is consistent, it's OK to generate the code for the
#include "src/zone.h"
namespace v8 {
+
+namespace base {
+class RandomNumberGenerator;
+}
+
namespace internal {
class Bootstrapper;
class MaterializedObjectStore;
class NoAllocationStringAllocator;
class CodeAgingHelper;
-class RandomNumberGenerator;
class RegExpStack;
class SaveContext;
class StringTracker;
// not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
return reinterpret_cast<PerIsolateThreadData*>(
- Thread::GetThreadLocal(per_isolate_thread_data_key_));
+ base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
}
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
Isolate* isolate = reinterpret_cast<Isolate*>(
- Thread::GetExistingThreadLocal(isolate_key_));
+ base::Thread::GetExistingThreadLocal(isolate_key_));
ASSERT(isolate != NULL);
return isolate;
}
INLINE(static Isolate* UncheckedCurrent()) {
- return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
+ return reinterpret_cast<Isolate*>(
+ base::Thread::GetThreadLocal(isolate_key_));
}
// Usually called by Init(), but can be called early e.g. to allow
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
- static Thread::LocalStorageKey isolate_key() {
+ static base::Thread::LocalStorageKey isolate_key() {
return isolate_key_;
}
// Returns the key used to store process-wide thread IDs.
- static Thread::LocalStorageKey thread_id_key() {
+ static base::Thread::LocalStorageKey thread_id_key() {
return thread_id_key_;
}
- static Thread::LocalStorageKey per_isolate_thread_data_key();
+ static base::Thread::LocalStorageKey per_isolate_thread_data_key();
// Mutex for serializing access to break control structures.
- RecursiveMutex* break_access() { return &break_access_; }
+ base::RecursiveMutex* break_access() { return &break_access_; }
Address get_address_from_id(AddressId id);
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
- return OS::TimeCurrentMillis() - time_millis_at_init_;
+ return base::OS::TimeCurrentMillis() - time_millis_at_init_;
}
DateCache* date_cache() {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
- inline RandomNumberGenerator* random_number_generator();
+ inline base::RandomNumberGenerator* random_number_generator();
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
};
// This mutex protects highest_thread_id_ and thread_data_table_.
- static Mutex process_wide_mutex_;
+ static base::Mutex process_wide_mutex_;
- static Thread::LocalStorageKey per_isolate_thread_data_key_;
- static Thread::LocalStorageKey isolate_key_;
- static Thread::LocalStorageKey thread_id_key_;
+ static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
+ static base::Thread::LocalStorageKey isolate_key_;
+ static base::Thread::LocalStorageKey thread_id_key_;
static ThreadDataTable* thread_data_table_;
// A global counter for all generated Isolates, might overflow.
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
- RecursiveMutex break_access_;
+ base::RecursiveMutex break_access_;
base::Atomic32 debugger_initialized_;
Logger* logger_;
StackGuard stack_guard_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
CallInterfaceDescriptor* call_descriptors_;
- RandomNumberGenerator* random_number_generator_;
+ base::RandomNumberGenerator* random_number_generator_;
// Whether the isolate has been created for snapshotting.
bool serializer_enabled_;
if (FLAG_redirect_code_traces_to == NULL) {
SNPrintF(filename_,
"code-%d-%d.asm",
- OS::GetCurrentProcessId(),
+ base::OS::GetCurrentProcessId(),
isolate_id);
} else {
StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
}
if (file_ == NULL) {
- file_ = OS::FOpen(filename_.start(), "a");
+ file_ = base::OS::FOpen(filename_.start(), "a");
}
scope_depth_++;
number = StringToDouble(isolate()->unicode_cache(),
chars,
NO_FLAGS, // Hex, octal or trailing junk.
- OS::nan_value());
+ base::OS::nan_value());
} else {
Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
#include "src/v8.h"
#include "src/ast.h"
+#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
#include "src/execution.h"
#include "src/jsregexp-inl.h"
#include "src/jsregexp.h"
#include "src/parser.h"
-#include "src/platform.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-macro-assembler-irregexp.h"
#include "src/regexp-macro-assembler-tracer.h"
StringStream stream(&alloc);
DispatchTableDumper dumper(&stream);
tree()->ForEach(&dumper);
- OS::PrintError("%s", stream.ToCString().get());
+ base::OS::PrintError("%s", stream.ToCString().get());
}
include_rules = [
- # TODO(jochen): Enable this.
- #"-src",
+ "-src",
"+src/base",
"+src/libplatform",
]
#include <algorithm>
#include <queue>
-// TODO(jochen): We should have our own version of checks.h.
-#include "src/checks.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
#include "src/libplatform/worker-thread.h"
-#include "src/platform.h"
namespace v8 {
namespace internal {
DefaultPlatform::~DefaultPlatform() {
- LockGuard<Mutex> guard(&lock_);
+ base::LockGuard<base::Mutex> guard(&lock_);
queue_.Terminate();
if (initialized_) {
for (std::vector<WorkerThread*>::iterator i = thread_pool_.begin();
void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
- LockGuard<Mutex> guard(&lock_);
+ base::LockGuard<base::Mutex> guard(&lock_);
ASSERT(thread_pool_size >= 0);
if (thread_pool_size < 1)
- thread_pool_size = OS::NumberOfProcessorsOnline();
+ thread_pool_size = base::OS::NumberOfProcessorsOnline();
thread_pool_size_ =
std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
}
void DefaultPlatform::EnsureInitialized() {
- LockGuard<Mutex> guard(&lock_);
+ base::LockGuard<base::Mutex> guard(&lock_);
if (initialized_) return;
initialized_ = true;
#include "include/v8-platform.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/libplatform/task-queue.h"
-#include "src/platform/mutex.h"
namespace v8 {
namespace internal {
private:
static const int kMaxThreadPoolSize;
- Mutex lock_;
+ base::Mutex lock_;
bool initialized_;
int thread_pool_size_;
std::vector<WorkerThread*> thread_pool_;
#include "src/libplatform/task-queue.h"
-// TODO(jochen): We should have our own version of checks.h.
-#include "src/checks.h"
+#include "src/base/logging.h"
namespace v8 {
namespace internal {
TaskQueue::~TaskQueue() {
- LockGuard<Mutex> guard(&lock_);
+ base::LockGuard<base::Mutex> guard(&lock_);
ASSERT(terminated_);
ASSERT(task_queue_.empty());
}
void TaskQueue::Append(Task* task) {
- LockGuard<Mutex> guard(&lock_);
+ base::LockGuard<base::Mutex> guard(&lock_);
ASSERT(!terminated_);
task_queue_.push(task);
process_queue_semaphore_.Signal();
Task* TaskQueue::GetNext() {
for (;;) {
{
- LockGuard<Mutex> guard(&lock_);
+ base::LockGuard<base::Mutex> guard(&lock_);
if (!task_queue_.empty()) {
Task* result = task_queue_.front();
task_queue_.pop();
void TaskQueue::Terminate() {
- LockGuard<Mutex> guard(&lock_);
+ base::LockGuard<base::Mutex> guard(&lock_);
ASSERT(!terminated_);
terminated_ = true;
process_queue_semaphore_.Signal();
#include <queue>
#include "src/base/macros.h"
-#include "src/platform/mutex.h"
-#include "src/platform/semaphore.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
namespace v8 {
void Terminate();
private:
- Mutex lock_;
- Semaphore process_queue_semaphore_;
+ base::Mutex lock_;
+ base::Semaphore process_queue_semaphore_;
std::queue<Task*> task_queue_;
bool terminated_;
#include <queue>
#include "src/base/macros.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
namespace v8 {
class TaskQueue;
-class WorkerThread : public Thread {
+class WorkerThread : public base::Thread {
public:
explicit WorkerThread(TaskQueue* queue);
virtual ~WorkerThread();
#define V8_LIST_INL_H_
#include "src/list.h"
-#include "src/platform.h"
+
+#include "src/base/platform/platform.h"
namespace v8 {
namespace internal {
#ifndef V8_LIST_H_
#define V8_LIST_H_
+#include "src/checks.h"
#include "src/utils.h"
namespace v8 {
if (FLAG_trace_alloc) {
va_list arguments;
va_start(arguments, msg);
- OS::VPrint(msg, arguments);
+ base::OS::VPrint(msg, arguments);
va_end(arguments);
}
}
if (FLAG_hydrogen_stats) {
unsigned size = allocator_->zone()->allocation_size() -
allocator_zone_start_allocation_size_;
- isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size);
+ isolate()->GetHStatistics()->SaveTiming(name(), base::TimeDelta(), size);
}
if (ShouldProduceTraceOutput()) {
void Log::OpenTemporaryFile() {
ASSERT(!IsEnabled());
- output_handle_ = i::OS::OpenTemporaryFile();
+ output_handle_ = base::OS::OpenTemporaryFile();
}
void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled());
- output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+ output_handle_ = base::OS::FOpen(name, base::OS::LogFileOpenMode);
}
private:
Log* log_;
- LockGuard<Mutex> lock_guard_;
+ base::LockGuard<base::Mutex> lock_guard_;
int pos_;
};
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
- Mutex mutex_;
+ base::Mutex mutex_;
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/log-utils.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/runtime-profiler.h"
#include "src/serialize.h"
#include "src/string-stream.h"
int size = SNPrintF(
perf_dump_name,
kFilenameFormatString,
- OS::GetCurrentProcessId());
+ base::OS::GetCurrentProcessId());
CHECK_NE(size, -1);
- perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode);
+ perf_output_handle_ =
+ base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
CHECK_NE(perf_output_handle_, NULL);
setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
}
int length) {
ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize);
- OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
- reinterpret_cast<uint64_t>(code->instruction_start()),
- code->instruction_size(),
- length, name);
+ base::OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
+ reinterpret_cast<uint64_t>(code->instruction_start()),
+ code->instruction_size(), length, name);
}
int size = SNPrintF(
perf_dump_name,
kFilenameFormatString,
- OS::GetCurrentProcessId());
+ base::OS::GetCurrentProcessId());
CHECK_NE(size, -1);
- perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode);
+ perf_output_handle_ =
+ base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
CHECK_NE(perf_output_handle_, NULL);
setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
code_load.id = JIT_CODE_LOAD;
code_load.total_size = sizeof(code_load) + length + 1 + code_size;
code_load.timestamp =
- static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0);
+ static_cast<uint64_t>(base::OS::TimeCurrentMillis() * 1000.0);
code_load.vma = 0x0; // Our addresses are absolute.
code_load.code_addr = reinterpret_cast<uint64_t>(code->instruction_start());
code_load.code_size = code_size;
header.total_size = sizeof(jitheader);
header.pad1 = 0xdeadbeef;
header.elf_mach = GetElfMach();
- header.pid = OS::GetCurrentProcessId();
- header.timestamp = static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0);
+ header.pid = base::OS::GetCurrentProcessId();
+ header.timestamp =
+ static_cast<uint64_t>(base::OS::TimeCurrentMillis() * 1000.0);
LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
}
ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt)));
MemCopy(ll_name.start(), name, len);
MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
- ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
+ ll_output_handle_ =
+ base::OS::FOpen(ll_name.start(), base::OS::LogFileOpenMode);
setvbuf(ll_output_handle_, NULL, _IOFBF, kLogBufferSize);
LogCodeInfo();
// An independent thread removes data and writes it to the log.
// This design minimizes the time spent in the sampler.
//
-class Profiler: public Thread {
+class Profiler: public base::Thread {
public:
explicit Profiler(Isolate* isolate);
void Engage();
int tail_; // Index to the buffer tail.
bool overflow_; // Tell whether a buffer overflow has occurred.
// Sempahore used for buffer synchronization.
- Semaphore buffer_semaphore_;
+ base::Semaphore buffer_semaphore_;
// Tells whether profiler is engaged, that is, processing thread is stated.
bool engaged_;
// Profiler implementation.
//
Profiler::Profiler(Isolate* isolate)
- : Thread("v8:Profiler"),
+ : base::Thread("v8:Profiler"),
isolate_(isolate),
head_(0),
tail_(0),
if (engaged_) return;
engaged_ = true;
- std::vector<OS::SharedLibraryAddress> addresses =
- OS::GetSharedLibraryAddresses();
+ std::vector<base::OS::SharedLibraryAddress> addresses =
+ base::OS::GetSharedLibraryAddresses();
for (size_t i = 0; i < addresses.size(); ++i) {
LOG(isolate_, SharedLibraryEvent(
addresses[i].library_path, addresses[i].start, addresses[i].end));
if (!is_logging_code_events()) return;
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
CALL_LISTENERS(CodeMovingGCEvent());
- OS::SignalCodeMovingGC();
+ base::OS::SignalCodeMovingGC();
}
msg.Append("%s,%s,", name, tag);
uint32_t sec, usec;
- if (OS::GetUserTime(&sec, &usec) != -1) {
+ if (base::OS::GetUserTime(&sec, &usec) != -1) {
msg.Append("%d,%d,", sec, usec);
}
- msg.Append("%.0f", OS::TimeCurrentMillis());
+ msg.Append("%.0f", base::OS::TimeCurrentMillis());
msg.Append('\n');
msg.WriteToLogFile();
// Using non-relative system time in order to be able to synchronize with
// external memory profiling events (e.g. DOM memory size).
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
- space, kind, OS::TimeCurrentMillis());
+ space, kind, base::OS::TimeCurrentMillis());
msg.WriteToLogFile();
}
Log::MessageBuilder msg(log_);
msg.Append("debug-queue-event,%s,%15.3f,%s\n",
event_type,
- OS::TimeCurrentMillis(),
+ base::OS::TimeCurrentMillis(),
parameter_string);
DeleteArray(parameter_string);
msg.WriteToLogFile();
p--;
break;
case 'p':
- stream.Add("%d", OS::GetCurrentProcessId());
+ stream.Add("%d", base::OS::GetCurrentProcessId());
break;
case 't': {
// %t expands to the current time in milliseconds.
- double time = OS::TimeCurrentMillis();
+ double time = base::OS::TimeCurrentMillis();
stream.Add("%.0f", FmtElm(time));
break;
}
#include <string>
#include "src/allocation.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/platform.h"
#include "src/objects.h"
-#include "src/platform.h"
-#include "src/platform/elapsed-timer.h"
namespace v8 {
+
+namespace base {
+class Semaphore;
+}
+
namespace internal {
// Logger is used for collecting logging information from V8 during
class Log;
class PositionsRecorder;
class Profiler;
-class Semaphore;
class Ticker;
struct TickSample;
// 'true' between SetUp() and TearDown().
bool is_initialized_;
- ElapsedTimer timer_;
+ base::ElapsedTimer timer_;
friend class CpuProfiler;
};
}
}
if (FLAG_job_based_sweeping) {
- if (!pending_sweeper_jobs_semaphore_.WaitFor(TimeDelta::FromSeconds(0))) {
+ if (!pending_sweeper_jobs_semaphore_.WaitFor(
+ base::TimeDelta::FromSeconds(0))) {
return false;
}
pending_sweeper_jobs_semaphore_.Signal();
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
- start_time = OS::TimeCurrentMillis();
+ start_time = base::OS::TimeCurrentMillis();
}
p->MarkSweptPrecisely();
}
p->ResetLiveBytes();
if (FLAG_print_cumulative_gc_stat) {
- space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
+ space->heap()->AddSweepingTime(base::OS::TimeCurrentMillis() - start_time);
}
}
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_pending_;
- Semaphore pending_sweeper_jobs_semaphore_;
+ base::Semaphore pending_sweeper_jobs_semaphore_;
bool sequential_sweeping_;
#include "src/mips/assembler-mips.h"
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/debug.h"
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
int count = Assembler::RelocateInternalReference(p, delta);
- CPU::FlushICache(p, count * sizeof(uint32_t));
+ CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
}
}
supported_ |= 1u << FPU;
#else
// Probe for additional features at runtime.
- CPU cpu;
+ base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
#endif
}
}
// Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+ CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
}
// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
// OS::nan_value() returns a qNaN.
void Assembler::QuietNaN(HeapObject* object) {
- HeapNumber::cast(object)->set_value(OS::nan_value());
+ HeapNumber::cast(object)->set_value(base::OS::nan_value());
}
}
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+ CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
}
}
}
if (patched) {
- CPU::FlushICache(pc+2, sizeof(Address));
+ CpuFeatures::FlushICache(pc+2, sizeof(Address));
}
}
break;
}
ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
+ CpuFeatures::FlushICache(stub->instruction_start(),
+ 4 * Assembler::kInstrSize);
}
private:
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
return stub;
#else
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
if (buffer == NULL) return stub;
// This code assumes that cache lines are 32 bytes and if the cache line is
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
return &std::sqrt;
#else
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CPU::FlushICache(sequence, young_length);
+ CpuFeatures::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
#if V8_TARGET_ARCH_MIPS
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/macro-assembler.h"
#include "src/simulator.h" // For cache flushing.
namespace internal {
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
#if V8_TARGET_ARCH_MIPS
+#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/mips/constants-mips.h"
-#include "src/platform.h"
namespace v8 {
namespace internal {
// environment.
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
- return OS::ActivationFrameAlignment();
+ return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
#if V8_HOST_ARCH_MIPS
if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
- if (OS::ActivationFrameAlignment() > kPointerSize) {
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
}
// Check that the code was patched as expected.
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
+ // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
+ // from C.
// Does not handle errors.
void FlushICache(Register address, unsigned instructions);
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- int stack_alignment = OS::ActivationFrameAlignment();
+ int stack_alignment = base::OS::ActivationFrameAlignment();
// Align the stack pointer and save the original sp value on the stack.
__ mov(scratch, sp);
#if V8_TARGET_ARCH_MIPS
#include "src/assembler.h"
-#include "src/cpu.h"
#include "src/disasm.h"
#include "src/globals.h" // Need the BitCast.
#include "src/mips/constants-mips.h"
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
- v8::internal::OS::DebugBreak();
+ v8::base::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
if (argc == 2) {
PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
- OS::Abort();
+ base::OS::Abort();
return 0;
}
PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
- OS::Abort();
+ base::OS::Abort();
}
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
- OS::Abort();
+ base::OS::Abort();
return 0;
}
PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
- OS::Abort();
+ base::OS::Abort();
return 0;
}
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
- OS::Abort();
+ base::OS::Abort();
}
PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
- OS::Abort();
+ base::OS::Abort();
}
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- kCArgsSlotsSize);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
#include "src/v8.h"
#include "src/assembler.h"
+#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/flags.h"
#include "src/list.h"
#include "src/natives.h"
-#include "src/platform.h"
#include "src/serialize.h"
void WriteFilePrefix() const {
fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
fprintf(fp_, "#include \"src/v8.h\"\n");
- fprintf(fp_, "#include \"src/platform.h\"\n\n");
+ fprintf(fp_, "#include \"src/base/platform/platform.h\"\n\n");
fprintf(fp_, "#include \"src/snapshot.h\"\n\n");
fprintf(fp_, "namespace v8 {\n");
fprintf(fp_, "namespace internal {\n\n");
}
FILE* GetFileDescriptorOrDie(const char* filename) {
- FILE* fp = i::OS::FOpen(filename, "wb");
+ FILE* fp = base::OS::FOpen(filename, "wb");
if (fp == NULL) {
i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
exit(1);
HandleScope scope(isolate);
v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context));
const char* name = i::FLAG_extra_code;
- FILE* file = i::OS::FOpen(name, "rb");
+ FILE* file = base::OS::FOpen(name, "rb");
if (file == NULL) {
fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
exit(1);
#include "src/natives.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/list.h"
#include "src/list-inl.h"
#include "src/snapshot-source-sink.h"
inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
- ASSERT(BitCast<uint64_t>(OS::nan_value()) != kHoleNanInt64);
- ASSERT((BitCast<uint64_t>(OS::nan_value()) >> 32) != kHoleNanUpper32);
- return OS::nan_value();
+ ASSERT(BitCast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64);
+ ASSERT((BitCast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32);
+ return base::OS::nan_value();
}
float Float32ArrayTraits::defaultValue() {
- return static_cast<float>(OS::nan_value());
+ return static_cast<float>(base::OS::nan_value());
}
-double Float64ArrayTraits::defaultValue() { return OS::nan_value(); }
+double Float64ArrayTraits::defaultValue() { return base::OS::nan_value(); }
template <class Traits>
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH);
}
- CPU::FlushICache(instruction_start(), instruction_size());
+ CpuFeatures::FlushICache(instruction_start(), instruction_size());
}
it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH);
}
}
- CPU::FlushICache(instruction_start(), instruction_size());
+ CpuFeatures::FlushICache(instruction_start(), instruction_size());
}
Handle<ExternalFloat32Array> array,
uint32_t index,
Handle<Object> value) {
- float cast_value = static_cast<float>(OS::nan_value());
+ float cast_value = static_cast<float>(base::OS::nan_value());
if (index < static_cast<uint32_t>(array->length())) {
if (value->IsSmi()) {
int int_value = Handle<Smi>::cast(value)->value();
Handle<ExternalFloat64Array> array,
uint32_t index,
Handle<Object> value) {
- double double_value = OS::nan_value();
+ double double_value = base::OS::nan_value();
if (index < static_cast<uint32_t>(array->length())) {
if (value->IsNumber()) {
double_value = value->Number();
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/builtins.h"
+#include "src/checks.h"
#include "src/elements-kind.h"
#include "src/field-index.h"
#include "src/flags.h"
#include "src/property-details.h"
#include "src/smart-pointers.h"
#include "src/unicode-inl.h"
-#include "src/v8checks.h"
#include "src/zone.h"
#if V8_TARGET_ARCH_ARM
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
- { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
+ { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
thread_id_ = ThreadId::Current().ToInteger();
}
#endif
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- ElapsedTimer total_timer;
+ base::ElapsedTimer total_timer;
if (FLAG_trace_concurrent_recompilation) total_timer.Start();
while (true) {
isolate_, Logger::TimerEventScope::v8_recompile_concurrent);
if (FLAG_concurrent_recompilation_delay != 0) {
- OS::Sleep(FLAG_concurrent_recompilation_delay);
+ base::OS::Sleep(FLAG_concurrent_recompilation_delay);
}
switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
continue;
}
- ElapsedTimer compiling_timer;
+ base::ElapsedTimer compiling_timer;
if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
CompileNext();
OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
- LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
+ base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return NULL;
OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
ASSERT_NE(NULL, job);
osr_attempts_++;
AddToOsrBuffer(job);
// Add job to the front of the input queue.
- LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
ASSERT_LT(input_queue_length_, input_queue_capacity_);
// Move shift_ back by one.
input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
input_queue_length_++;
} else {
// Add job to the back of the input queue.
- LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
ASSERT_LT(input_queue_length_, input_queue_capacity_);
input_queue_[InputQueueIndex(input_queue_length_)] = job;
input_queue_length_++;
bool OptimizingCompilerThread::IsOptimizerThread() {
- LockGuard<Mutex> lock_guard(&thread_id_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
#endif
#define V8_OPTIMIZING_COMPILER_THREAD_H_
#include "src/base/atomicops.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
#include "src/flags.h"
#include "src/list.h"
-#include "src/platform.h"
-#include "src/platform/mutex.h"
-#include "src/platform/time.h"
#include "src/unbound-queue-inl.h"
namespace v8 {
class OptimizedCompileJob;
class SharedFunctionInfo;
-class OptimizingCompilerThread : public Thread {
+class OptimizingCompilerThread : public base::Thread {
public:
explicit OptimizingCompilerThread(Isolate *isolate) :
Thread("OptimizingCompilerThread"),
bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
- LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
return input_queue_length_ < input_queue_capacity_;
}
#ifdef DEBUG
int thread_id_;
- Mutex thread_id_mutex_;
+ base::Mutex thread_id_mutex_;
#endif
Isolate* isolate_;
- Semaphore stop_semaphore_;
- Semaphore input_queue_semaphore_;
+ base::Semaphore stop_semaphore_;
+ base::Semaphore input_queue_semaphore_;
// Circular queue of incoming recompilation tasks (including OSR).
OptimizedCompileJob** input_queue_;
int input_queue_capacity_;
int input_queue_length_;
int input_queue_shift_;
- Mutex input_queue_mutex_;
+ base::Mutex input_queue_mutex_;
// Queue of recompilation tasks ready to be installed (excluding OSR).
UnboundQueue<OptimizedCompileJob*> output_queue_;
int osr_buffer_cursor_;
volatile base::AtomicWord stop_thread_;
- TimeDelta time_spent_compiling_;
- TimeDelta time_spent_total_;
+ base::TimeDelta time_spent_compiling_;
+ base::TimeDelta time_spent_total_;
int osr_hits_;
int osr_attempts_;
#include "src/api.h"
#include "src/ast.h"
+#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/char-predicates-inl.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/messages.h"
#include "src/parser.h"
-#include "src/platform.h"
#include "src/preparser.h"
#include "src/runtime.h"
#include "src/scanner-character-streams.h"
HistogramTimerScope timer_scope(isolate()->counters()->parse(), true);
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- ElapsedTimer timer;
+ base::ElapsedTimer timer;
if (FLAG_trace_parse) {
timer.Start();
}
HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- ElapsedTimer timer;
+ base::ElapsedTimer timer;
if (FLAG_trace_parse) {
timer.Start();
}
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for Cygwin goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#include <errno.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <stdarg.h>
-#include <strings.h> // index
-#include <sys/mman.h> // mmap & munmap
-#include <sys/time.h>
-#include <unistd.h> // sysconf
-
-#include <cmath>
-
-#undef MAP_TYPE
-
-#include "src/base/win32-headers.h"
-#include "src/platform.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return tzname[0]; // The location of the timezone string on Cygwin.
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
- // On Cygwin, struct tm does not contain a tm_gmtoff field.
- time_t utc = time(NULL);
- ASSERT(utc != -1);
- struct tm* loc = localtime(&utc);
- ASSERT(loc != NULL);
- // time - localtime includes any daylight savings offset, so subtract it.
- return static_cast<double>((mktime(loc) - utc) * msPerSecond -
- (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddresses> result;
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- result.push_back(SharedLibraryAddress(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to set up
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
- return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
- // Nothing to do on Cygwin.
-}
-
-
-// The VirtualMemory implementation is taken from platform-win32.cc.
-// The mmap-based virtual memory implementation as it is used on most posix
-// platforms does not work well because Cygwin does not support MAP_FIXED.
-// This causes VirtualMemory::Commit to not always commit the memory region
-// specified.
-
-static void* GetRandomAddr() {
- Isolate* isolate = Isolate::UncheckedCurrent();
- // Note that the current isolate isn't set up in a call path via
- // CpuFeatures::Probe. We don't care about randomization in this case because
- // the code page is immediately freed.
- if (isolate != NULL) {
- // The address range used to randomize RWX allocations in OS::Allocate
- // Try not to map pages into the default range that windows loads DLLs
- // Use a multiple of 64k to prevent committing unused memory.
- // Note: This does not guarantee RWX regions will be within the
- // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
- static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
- static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
- static const intptr_t kAllocationRandomAddressMin = 0x04000000;
- static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
- uintptr_t address =
- (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
- kAllocationRandomAddressMin;
- address &= kAllocationRandomAddressMax;
- return reinterpret_cast<void *>(address);
- }
- return NULL;
-}
-
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
- LPVOID base = NULL;
-
- if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
- // For exectutable pages try and randomize the allocation address
- for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
- base = VirtualAlloc(GetRandomAddr(), size, action, protection);
- }
- }
-
- // After three attempts give up and let the OS find an address to use.
- if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
- return base;
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size);
- if (address == NULL) return;
- uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- ASSERT(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != NULL) {
- request_size = size;
- ASSERT(base == static_cast<uint8_t*>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size);
- if (address == NULL) return;
- }
- address_ = address;
- size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address_, size_);
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return UncommitRegion(address, size);
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_NOACCESS)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for FreeBSD goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/ucontext.h>
-
-#include <sys/fcntl.h> // open
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <sys/types.h> // mmap & munmap
-#include <unistd.h> // getpagesize
-// If you don't have execinfo.h then you need devel/libexecinfo from ports.
-#include <errno.h>
-#include <limits.h>
-#include <stdarg.h>
-#include <strings.h> // index
-
-#include <cmath>
-
-#undef MAP_TYPE
-
-#include "src/platform.h"
-#include "src/utils.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return result;
- while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- int result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned start = StringToLong(addr_buffer);
- result = read(fd, addr_buffer + 2, 1);
- if (result < 1) break;
- if (addr_buffer[2] != '-') break;
- result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned end = StringToLong(addr_buffer);
- char buffer[MAP_LENGTH];
- int bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1)
- break;
- result = read(fd, buffer + bytes_read, 1);
- if (result < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- result.push_back(SharedLibraryAddress(start_of_path, start, end));
- }
- close(fd);
- return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- uint8_t* base = static_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for Linux goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <sys/prctl.h>
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <sys/types.h>
-
-// Ubuntu Dapper requires memory pages to be marked as
-// executable. Otherwise, OS raises an exception when executing code
-// in that page.
-#include <errno.h>
-#include <fcntl.h> // open
-#include <stdarg.h>
-#include <strings.h> // index
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <sys/types.h> // mmap & munmap
-#include <unistd.h> // sysconf
-
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
- !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h> // NOLINT
-#endif
-
-#if defined(LEAK_SANITIZER)
-#include <sanitizer/lsan_interface.h>
-#endif
-
-#include <cmath>
-
-#undef MAP_TYPE
-
-#include "src/platform.h"
-#include "src/utils.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-#ifdef __arm__
-
-bool OS::ArmUsingHardFloat() {
- // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
- // the Floating Point ABI used (PCS stands for Procedure Call Standard).
- // We use these as well as a couple of other defines to statically determine
- // what FP ABI used.
- // GCC versions 4.4 and below don't support hard-fp.
- // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
- // __ARM_PCS_VFP.
-
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40600
-#if defined(__ARM_PCS_VFP)
- return true;
-#else
- return false;
-#endif
-
-#elif GCC_VERSION < 40500
- return false;
-
-#else
-#if defined(__ARM_PCS_VFP)
- return true;
-#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
- !defined(__VFP_FP__)
- return false;
-#else
-#error "Your version of GCC does not report the FP ABI compiled for." \
- "Please report it on this issue" \
- "http://code.google.com/p/v8/issues/detail?id=2140"
-
-#endif
-#endif
-#undef GCC_VERSION
-}
-
-#endif // def __arm__
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = OS::GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
- fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if ((c == '/') || (c == '[')) {
- // Push the '/' or '[' back into the stream to be read below.
- ungetc(c, fp);
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' or '[' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- result.push_back(SharedLibraryAddress(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to set up
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
- return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
- // Support for ll_prof.py.
- //
- // The Linux profiler built into the kernel logs all mmap's with
- // PROT_EXEC so that analysis tools can properly attribute ticks. We
- // do a mmap with a name known by ll_prof.py and immediately munmap
- // it. This injects a GC marker into the stream of events generated
- // by the kernel and allows us to synchronize V8 code log and the
- // kernel log.
- int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
- if (f == NULL) {
- OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
- OS::Abort();
- }
- void* addr = mmap(OS::GetRandomMmapAddr(),
- size,
-#if defined(__native_client__)
- // The Native Client port of V8 uses an interpreter,
- // so code pages don't need PROT_EXEC.
- PROT_READ,
-#else
- PROT_READ | PROT_EXEC,
-#endif
- MAP_PRIVATE,
- fileno(f),
- 0);
- ASSERT(addr != MAP_FAILED);
- OS::Free(addr, size);
- fclose(f);
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- uint8_t* base = static_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(address_, size_);
-#endif
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
-#if defined(LEAK_SANITIZER)
- __lsan_register_root_region(result, size);
-#endif
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-#if defined(__native_client__)
- // The Native Client port of V8 uses an interpreter,
- // so code pages don't need PROT_EXEC.
- int prot = PROT_READ | PROT_WRITE;
-#else
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-#endif
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-#if defined(LEAK_SANITIZER)
- __lsan_unregister_root_region(base, size);
-#endif
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- return true;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for MacOS goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#include <dlfcn.h>
-#include <mach/mach_init.h>
-#include <mach-o/dyld.h>
-#include <mach-o/getsect.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include <AvailabilityMacros.h>
-
-#include <errno.h>
-#include <libkern/OSAtomic.h>
-#include <mach/mach.h>
-#include <mach/semaphore.h>
-#include <mach/task.h>
-#include <mach/vm_statistics.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/resource.h>
-#include <sys/sysctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-
-#include <cmath>
-
-#undef MAP_TYPE
-
-#include "src/platform.h"
-#include "src/utils.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-// Constants used for mmap.
-// kMmapFd is used to pass vm_alloc flags to tag the region with the user
-// defined tag 255 This helps identify V8-allocated regions in memory analysis
-// tools like vmmap(1).
-static const int kMmapFd = VM_MAKE_TAG(255);
-static const off_t kMmapFdOffset = 0;
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(OS::GetRandomMmapAddr(),
- msize,
- prot,
- MAP_PRIVATE | MAP_ANON,
- kMmapFd,
- kMmapFdOffset);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
- fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- unsigned int images_count = _dyld_image_count();
- for (unsigned int i = 0; i < images_count; ++i) {
- const mach_header* header = _dyld_get_image_header(i);
- if (header == NULL) continue;
-#if V8_HOST_ARCH_X64
- uint64_t size;
- char* code_ptr = getsectdatafromheader_64(
- reinterpret_cast<const mach_header_64*>(header),
- SEG_TEXT,
- SECT_TEXT,
- &size);
-#else
- unsigned int size;
- char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
-#endif
- if (code_ptr == NULL) continue;
- const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
- const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- result.push_back(
- SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
- }
- return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- uint8_t* base = static_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* address,
- size_t size,
- bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
- return mmap(address,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
- return munmap(address, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- return false;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for OpenBSD and NetBSD goes here. For the
-// POSIX-compatible parts, the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <sys/types.h>
-
-#include <errno.h>
-#include <fcntl.h> // open
-#include <stdarg.h>
-#include <strings.h> // index
-#include <sys/mman.h> // mmap & munmap
-#include <sys/stat.h> // open
-#include <sys/types.h> // mmap & munmap
-#include <unistd.h> // sysconf
-
-#include <cmath>
-
-#undef MAP_TYPE
-
-#include "src/platform.h"
-#include "src/utils.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = OS::GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
- fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- // This function assumes that the layout of the file is as follows:
- // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
- // If we encounter an unexpected situation we abort scanning further entries.
- FILE* fp = fopen("/proc/self/maps", "r");
- if (fp == NULL) return result;
-
- // Allocate enough room to be able to store a full file name.
- const int kLibNameLen = FILENAME_MAX + 1;
- char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
- // This loop will terminate once the scanning hits an EOF.
- while (true) {
- uintptr_t start, end;
- char attr_r, attr_w, attr_x, attr_p;
- // Parse the addresses and permission bits at the beginning of the line.
- if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
- if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
- int c;
- if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
- // Found a read-only executable entry. Skip characters until we reach
- // the beginning of the filename or the end of the line.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
- if (c == EOF) break; // EOF: Was unexpected, just exit.
-
- // Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
-
- // Read to the end of the line. Exit if the read fails.
- if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
- // Drop the newline character read by fgets. We do not need to check
- // for a zero-length string because we know that we at least read the
- // '/' character.
- lib_name[strlen(lib_name) - 1] = '\0';
- } else {
- // No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
- }
- result.push_back(SharedLibraryAddress(lib_name, start, end));
- } else {
- // Entry not describing executable data. Skip to end of line to set up
- // reading the next entry.
- do {
- c = getc(fp);
- } while ((c != EOF) && (c != '\n'));
- if (c == EOF) break;
- }
- }
- free(lib_name);
- fclose(fp);
- return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
- // Support for ll_prof.py.
- //
- // The Linux profiler built into the kernel logs all mmap's with
- // PROT_EXEC so that analysis tools can properly attribute ticks. We
- // do a mmap with a name known by ll_prof.py and immediately munmap
- // it. This injects a GC marker into the stream of events generated
- // by the kernel and allows us to synchronize V8 code log and the
- // kernel log.
- int size = sysconf(_SC_PAGESIZE);
- FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
- if (f == NULL) {
- OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
- OS::Abort();
- }
- void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
- fileno(f), 0);
- ASSERT(addr != MAP_FAILED);
- OS::Free(addr, size);
- fclose(f);
-}
-
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- uint8_t* base = static_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for POSIX goes here. This is not a platform on its
-// own, but contains the parts which are the same across the POSIX platforms
-// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
-
-#include <dlfcn.h>
-#include <errno.h>
-#include <limits.h>
-#include <pthread.h>
-#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
-#include <pthread_np.h> // for pthread_set_name_np
-#endif
-#include <sched.h> // for sched_yield
-#include <time.h>
-#include <unistd.h>
-
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-
-#if defined(__linux__)
-#include <sys/prctl.h> // NOLINT, for prctl
-#endif
-#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
- defined(__NetBSD__) || defined(__OpenBSD__)
-#include <sys/sysctl.h> // NOLINT, for sysctl
-#endif
-
-#include <arpa/inet.h>
-#include <netdb.h>
-#include <netinet/in.h>
-
-#undef MAP_TYPE
-
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-#define LOG_TAG "v8"
-#include <android/log.h> // NOLINT
-#endif
-
-#include <cmath>
-#include <cstdlib>
-
-#include "src/base/lazy-instance.h"
-#include "src/base/macros.h"
-#include "src/platform.h"
-#include "src/platform/time.h"
-#include "src/utils/random-number-generator.h"
-
-#ifdef V8_FAST_TLS_SUPPORTED
-#include "src/base/atomicops.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// 0 is never a valid thread id.
-const pthread_t kNoThread = (pthread_t) 0;
-
-bool g_hard_abort = false;
-
-const char* g_gc_fake_mmap = NULL;
-
-} // namespace
-
-
-int OS::NumberOfProcessorsOnline() {
- return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
-}
-
-
-// Maximum size of the virtual memory. 0 means there is no artificial
-// limit.
-
-intptr_t OS::MaxVirtualMemory() {
- struct rlimit limit;
- int result = getrlimit(RLIMIT_DATA, &limit);
- if (result != 0) return 0;
-#if V8_OS_NACL
- // The NaCl compiler doesn't like resource.h constants.
- if (static_cast<int>(limit.rlim_cur) == -1) return 0;
-#else
- if (limit.rlim_cur == RLIM_INFINITY) return 0;
-#endif
- return limit.rlim_cur;
-}
-
-
-uint64_t OS::TotalPhysicalMemory() {
-#if V8_OS_MACOSX
- int mib[2];
- mib[0] = CTL_HW;
- mib[1] = HW_MEMSIZE;
- int64_t size = 0;
- size_t len = sizeof(size);
- if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(size);
-#elif V8_OS_FREEBSD
- int pages, page_size;
- size_t size = sizeof(pages);
- sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
- sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
- if (pages == -1 || page_size == -1) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(pages) * page_size;
-#elif V8_OS_CYGWIN
- MEMORYSTATUS memory_info;
- memory_info.dwLength = sizeof(memory_info);
- if (!GlobalMemoryStatus(&memory_info)) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(memory_info.dwTotalPhys);
-#elif V8_OS_QNX
- struct stat stat_buf;
- if (stat("/proc", &stat_buf) != 0) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(stat_buf.st_size);
-#else
- intptr_t pages = sysconf(_SC_PHYS_PAGES);
- intptr_t page_size = sysconf(_SC_PAGESIZE);
- if (pages == -1 || page_size == -1) {
- UNREACHABLE();
- return 0;
- }
- return static_cast<uint64_t>(pages) * page_size;
-#endif
-}
-
-
-int OS::ActivationFrameAlignment() {
-#if V8_TARGET_ARCH_ARM
- // On EABI ARM targets this is required for fp correctness in the
- // runtime system.
- return 8;
-#elif V8_TARGET_ARCH_MIPS
- return 8;
-#else
- // Otherwise we just assume 16 byte alignment, i.e.:
- // - With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
- // see "Mac OS X ABI Function Call Guide"
- return 16;
-#endif
-}
-
-
-intptr_t OS::CommitPageSize() {
- static intptr_t page_size = getpagesize();
- return page_size;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(address, size);
- USE(result);
- ASSERT(result == 0);
-}
-
-
-// Get rid of writable permission on code allocations.
-void OS::ProtectCode(void* address, const size_t size) {
-#if V8_OS_CYGWIN
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-#elif V8_OS_NACL
- // The Native Client port of V8 uses an interpreter, so
- // code pages don't need PROT_EXEC.
- mprotect(address, size, PROT_READ);
-#else
- mprotect(address, size, PROT_READ | PROT_EXEC);
-#endif
-}
-
-
-// Create guard pages.
-void OS::Guard(void* address, const size_t size) {
-#if V8_OS_CYGWIN
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-#else
- mprotect(address, size, PROT_NONE);
-#endif
-}
-
-
-static base::LazyInstance<RandomNumberGenerator>::type
- platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
-
-
-void OS::Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap) {
- if (random_seed) {
- platform_random_number_generator.Pointer()->SetSeed(random_seed);
- }
- g_hard_abort = hard_abort;
- g_gc_fake_mmap = gc_fake_mmap;
-}
-
-
-const char* OS::GetGCFakeMMapFile() {
- return g_gc_fake_mmap;
-}
-
-
-void* OS::GetRandomMmapAddr() {
-#if V8_OS_NACL
- // TODO(bradchen): restore randomization once Native Client gets
- // smarter about using mmap address hints.
- // See http://code.google.com/p/nativeclient/issues/3341
- return NULL;
-#endif
-#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
- defined(THREAD_SANITIZER)
- // Dynamic tools do not support custom mmap addresses.
- return NULL;
-#endif
- uintptr_t raw_addr;
- platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
- sizeof(raw_addr));
-#if V8_TARGET_ARCH_X64
- // Currently available CPUs have 48 bits of virtual addressing. Truncate
- // the hint address to 46 bits to give the kernel a fighting chance of
- // fulfilling our placement request.
- raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
- raw_addr &= 0x3ffff000;
-
-# ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-# else
- // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
- // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
- // 10.6 and 10.7.
- raw_addr += 0x20000000;
-# endif
-#endif
- return reinterpret_cast<void*>(raw_addr);
-}
-
-
-size_t OS::AllocateAlignment() {
- return static_cast<size_t>(sysconf(_SC_PAGESIZE));
-}
-
-
-void OS::Sleep(int milliseconds) {
- useconds_t ms = static_cast<useconds_t>(milliseconds);
- usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
- if (g_hard_abort) {
- V8_IMMEDIATE_CRASH();
- }
- // Redirect to std abort to signal abnormal program termination.
- abort();
-}
-
-
-void OS::DebugBreak() {
-#if V8_HOST_ARCH_ARM
- asm("bkpt 0");
-#elif V8_HOST_ARCH_ARM64
- asm("brk 0");
-#elif V8_HOST_ARCH_MIPS
- asm("break");
-#elif V8_HOST_ARCH_IA32
-#if defined(__native_client__)
- asm("hlt");
-#else
- asm("int $3");
-#endif // __native_client__
-#elif V8_HOST_ARCH_X64
- asm("int $3");
-#else
-#error Unsupported host architecture.
-#endif
-}
-
-
-// ----------------------------------------------------------------------------
-// Math functions
-
-double OS::nan_value() {
- // NAN from math.h is defined in C99 and not in POSIX.
- return NAN;
-}
-
-
-int OS::GetCurrentProcessId() {
- return static_cast<int>(getpid());
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX date/time support.
-//
-
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- struct rusage usage;
-
- if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
- *secs = usage.ru_utime.tv_sec;
- *usecs = usage.ru_utime.tv_usec;
- return 0;
-}
-
-
-double OS::TimeCurrentMillis() {
- return Time::Now().ToJsTime();
-}
-
-
-class TimezoneCache {};
-
-
-TimezoneCache* OS::CreateTimezoneCache() {
- return NULL;
-}
-
-
-void OS::DisposeTimezoneCache(TimezoneCache* cache) {
- ASSERT(cache == NULL);
-}
-
-
-void OS::ClearTimezoneCache(TimezoneCache* cache) {
- ASSERT(cache == NULL);
-}
-
-
-double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
- if (std::isnan(time)) return nan_value();
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return nan_value();
- return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
-}
-
-
-int OS::GetLastError() {
- return errno;
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX stdio support.
-//
-
-FILE* OS::FOpen(const char* path, const char* mode) {
- FILE* file = fopen(path, mode);
- if (file == NULL) return NULL;
- struct stat file_stat;
- if (fstat(fileno(file), &file_stat) != 0) return NULL;
- bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
- if (is_regular_file) return file;
- fclose(file);
- return NULL;
-}
-
-
-bool OS::Remove(const char* path) {
- return (remove(path) == 0);
-}
-
-
-FILE* OS::OpenTemporaryFile() {
- return tmpfile();
-}
-
-
-const char* const OS::LogFileOpenMode = "w";
-
-
-void OS::Print(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrint(format, args);
- va_end(args);
-}
-
-
-void OS::VPrint(const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
-#else
- vprintf(format, args);
-#endif
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
-#else
- vfprintf(out, format, args);
-#endif
-}
-
-
-void OS::PrintError(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-void OS::VPrintError(const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
-#else
- vfprintf(stderr, format, args);
-#endif
-}
-
-
-int OS::SNPrintF(char* str, int length, const char* format, ...) {
- va_list args;
- va_start(args, format);
- int result = VSNPrintF(str, length, format, args);
- va_end(args);
- return result;
-}
-
-
-int OS::VSNPrintF(char* str,
- int length,
- const char* format,
- va_list args) {
- int n = vsnprintf(str, length, format, args);
- if (n < 0 || n >= length) {
- // If the length is zero, the assignment fails.
- if (length > 0)
- str[length - 1] = '\0';
- return -1;
- } else {
- return n;
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX string support.
-//
-
-char* OS::StrChr(char* str, int c) {
- return strchr(str, c);
-}
-
-
-void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
- strncpy(dest, src, n);
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX thread support.
-//
-
-class Thread::PlatformData {
- public:
- PlatformData() : thread_(kNoThread) {}
- pthread_t thread_; // Thread handle for pthread.
- // Synchronizes thread creation
- Mutex thread_creation_mutex_;
-};
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData),
- stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
- if (stack_size_ > 0 && stack_size_ < PTHREAD_STACK_MIN) {
- stack_size_ = PTHREAD_STACK_MIN;
- }
- set_name(options.name());
-}
-
-
-Thread::~Thread() {
- delete data_;
-}
-
-
-static void SetThreadName(const char* name) {
-#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
- pthread_set_name_np(pthread_self(), name);
-#elif V8_OS_NETBSD
- STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
- pthread_setname_np(pthread_self(), "%s", name);
-#elif V8_OS_MACOSX
- // pthread_setname_np is only available in 10.6 or later, so test
- // for it at runtime.
- int (*dynamic_pthread_setname_np)(const char*);
- *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
- dlsym(RTLD_DEFAULT, "pthread_setname_np");
- if (dynamic_pthread_setname_np == NULL)
- return;
-
- // Mac OS X does not expose the length limit of the name, so hardcode it.
- static const int kMaxNameLength = 63;
- STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
- dynamic_pthread_setname_np(name);
-#elif defined(PR_SET_NAME)
- prctl(PR_SET_NAME,
- reinterpret_cast<unsigned long>(name), // NOLINT
- 0, 0, 0);
-#endif
-}
-
-
-static void* ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- // We take the lock here to make sure that pthread_create finished first since
- // we don't know which thread will run first (the original thread or the new
- // one).
- { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
- SetThreadName(thread->name());
- ASSERT(thread->data()->thread_ != kNoThread);
- thread->NotifyStartedAndRun();
- return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- int result;
- pthread_attr_t attr;
- memset(&attr, 0, sizeof(attr));
- result = pthread_attr_init(&attr);
- ASSERT_EQ(0, result);
- // Native client uses default stack size.
-#if !V8_OS_NACL
- if (stack_size_ > 0) {
- result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
- ASSERT_EQ(0, result);
- }
-#endif
- {
- LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
- result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
- }
- ASSERT_EQ(0, result);
- result = pthread_attr_destroy(&attr);
- ASSERT_EQ(0, result);
- ASSERT(data_->thread_ != kNoThread);
- USE(result);
-}
-
-
-void Thread::Join() {
- pthread_join(data_->thread_, NULL);
-}
-
-
-void Thread::YieldCPU() {
- int result = sched_yield();
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
-#if V8_OS_CYGWIN
- // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
- // because pthread_key_t is a pointer type on Cygwin. This will probably not
- // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
- return static_cast<Thread::LocalStorageKey>(ptr_key);
-#else
- return static_cast<Thread::LocalStorageKey>(pthread_key);
-#endif
-}
-
-
-static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
-#if V8_OS_CYGWIN
- STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
- intptr_t ptr_key = static_cast<intptr_t>(local_key);
- return reinterpret_cast<pthread_key_t>(ptr_key);
-#else
- return static_cast<pthread_key_t>(local_key);
-#endif
-}
-
-
-#ifdef V8_FAST_TLS_SUPPORTED
-
-static base::Atomic32 tls_base_offset_initialized = 0;
-intptr_t kMacTlsBaseOffset = 0;
-
-// It's safe to do the initialization more that once, but it has to be
-// done at least once.
-static void InitializeTlsBaseOffset() {
- const size_t kBufferSize = 128;
- char buffer[kBufferSize];
- size_t buffer_size = kBufferSize;
- int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
- if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
- }
- // The buffer now contains a string of the form XX.YY.ZZ, where
- // XX is the major kernel version component.
- // Make sure the buffer is 0-terminated.
- buffer[kBufferSize - 1] = '\0';
- char* period_pos = strchr(buffer, '.');
- *period_pos = '\0';
- int kernel_version_major =
- static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
- // The constants below are taken from pthreads.s from the XNU kernel
- // sources archive at www.opensource.apple.com.
- if (kernel_version_major < 11) {
- // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
- // same offsets.
-#if V8_HOST_ARCH_IA32
- kMacTlsBaseOffset = 0x48;
-#else
- kMacTlsBaseOffset = 0x60;
-#endif
- } else {
- // 11.x.x (Lion) changed the offset.
- kMacTlsBaseOffset = 0;
- }
-
- base::Release_Store(&tls_base_offset_initialized, 1);
-}
-
-
-static void CheckFastTls(Thread::LocalStorageKey key) {
- void* expected = reinterpret_cast<void*>(0x1234CAFE);
- Thread::SetThreadLocal(key, expected);
- void* actual = Thread::GetExistingThreadLocal(key);
- if (expected != actual) {
- V8_Fatal(__FILE__, __LINE__,
- "V8 failed to initialize fast TLS on current kernel");
- }
- Thread::SetThreadLocal(key, NULL);
-}
-
-#endif // V8_FAST_TLS_SUPPORTED
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-#ifdef V8_FAST_TLS_SUPPORTED
- bool check_fast_tls = false;
- if (tls_base_offset_initialized == 0) {
- check_fast_tls = true;
- InitializeTlsBaseOffset();
- }
-#endif
- pthread_key_t key;
- int result = pthread_key_create(&key, NULL);
- ASSERT_EQ(0, result);
- USE(result);
- LocalStorageKey local_key = PthreadKeyToLocalKey(key);
-#ifdef V8_FAST_TLS_SUPPORTED
- // If we just initialized fast TLS support, make sure it works.
- if (check_fast_tls) CheckFastTls(local_key);
-#endif
- return local_key;
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- int result = pthread_key_delete(pthread_key);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
- int result = pthread_setspecific(pthread_key, value);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for QNX goes here. For the POSIX-compatible
-// parts the implementation is in platform-posix.cc.
-
-#include <backtrace.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <ucontext.h>
-
-// QNX requires memory pages to be marked as executable.
-// Otherwise, the OS raises an exception when executing code in that page.
-#include <errno.h>
-#include <fcntl.h> // open
-#include <stdarg.h>
-#include <strings.h> // index
-#include <sys/mman.h> // mmap & munmap
-#include <sys/procfs.h>
-#include <sys/stat.h> // open
-#include <sys/types.h> // mmap & munmap
-#include <unistd.h> // sysconf
-
-#include <cmath>
-
-#undef MAP_TYPE
-
-#include "src/platform.h"
-#include "src/utils.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on Qnx since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-#ifdef __arm__
-
-bool OS::ArmUsingHardFloat() {
- // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
- // the Floating Point ABI used (PCS stands for Procedure Call Standard).
- // We use these as well as a couple of other defines to statically determine
- // what FP ABI used.
- // GCC versions 4.4 and below don't support hard-fp.
- // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
- // __ARM_PCS_VFP.
-
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40600
-#if defined(__ARM_PCS_VFP)
- return true;
-#else
- return false;
-#endif
-
-#elif GCC_VERSION < 40500
- return false;
-
-#else
-#if defined(__ARM_PCS_VFP)
- return true;
-#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
- !defined(__VFP_FP__)
- return false;
-#else
-#error "Your version of GCC does not report the FP ABI compiled for." \
- "Please report it on this issue" \
- "http://code.google.com/p/v8/issues/detail?id=2140"
-
-#endif
-#endif
-#undef GCC_VERSION
-}
-
-#endif // __arm__
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* addr = OS::GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fileno(file),
- 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) OS::Free(memory_, size_);
- fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- std::vector<SharedLibraryAddress> result;
- procfs_mapinfo *mapinfos = NULL, *mapinfo;
- int proc_fd, num, i;
-
- struct {
- procfs_debuginfo info;
- char buff[PATH_MAX];
- } map;
-
- char buf[PATH_MAX + 1];
- snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
-
- if ((proc_fd = open(buf, O_RDONLY)) == -1) {
- close(proc_fd);
- return result;
- }
-
- /* Get the number of map entries. */
- if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
- close(proc_fd);
- return result;
- }
-
- mapinfos = reinterpret_cast<procfs_mapinfo *>(
- malloc(num * sizeof(procfs_mapinfo)));
- if (mapinfos == NULL) {
- close(proc_fd);
- return result;
- }
-
- /* Fill the map entries. */
- if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
- mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
- free(mapinfos);
- close(proc_fd);
- return result;
- }
-
- for (i = 0; i < num; i++) {
- mapinfo = mapinfos + i;
- if (mapinfo->flags & MAP_ELF) {
- map.info.vaddr = mapinfo->vaddr;
- if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
- continue;
- }
- result.push_back(SharedLibraryAddress(
- map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
- }
- }
- free(mapinfos);
- close(proc_fd);
- return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- uint8_t* base = static_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
-
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- return false;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#ifdef __sparc
-# error "V8 does not support the SPARC CPU architecture."
-#endif
-
-#include <dlfcn.h> // dladdr
-#include <errno.h>
-#include <ieeefp.h> // finite()
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h> // sigemptyset(), etc
-#include <sys/mman.h> // mmap()
-#include <sys/regset.h>
-#include <sys/stack.h> // for stack alignment
-#include <sys/time.h> // gettimeofday(), timeradd()
-#include <time.h>
-#include <ucontext.h> // walkstack(), getcontext()
-#include <unistd.h> // getpagesize(), usleep()
-
-#include <cmath>
-
-#undef MAP_TYPE
-
-#include "src/platform.h"
-#include "src/utils.h"
-
-
-// It seems there is a bug in some Solaris distributions (experienced in
-// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
-// access signbit() despite the availability of other C99 math functions.
-#ifndef signbit
-namespace std {
-// Test sign - usually defined in math.h
-int signbit(double x) {
- // We need to take care of the special case of both positive and negative
- // versions of zero.
- if (x == 0) {
- return fpclass(x) & FP_NZERO;
- } else {
- // This won't detect negative NaN but that should be okay since we don't
- // assume that behavior.
- return x < 0;
- }
-}
-} // namespace std
-#endif // signbit
-
-namespace v8 {
-namespace internal {
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return tzname[0]; // The location of the timezone string on Solaris.
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
- tzset();
- return -static_cast<double>(timezone * msPerSecond);
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
- if (mbase == MAP_FAILED) return NULL;
- *allocated = msize;
- return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
- PosixMemoryMappedFile(FILE* file, void* memory, int size)
- : file_(file), memory_(memory), size_(size) { }
- virtual ~PosixMemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- FILE* file_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "r+");
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
-
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- FILE* file = fopen(name, "w+");
- if (file == NULL) return NULL;
- int result = fwrite(initial, size, 1, file);
- if (result < 1) {
- fclose(file);
- return NULL;
- }
- void* memory =
- mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
- return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
- fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- return std::vector<SharedLibraryAddress>();
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
- if (reservation == MAP_FAILED) return;
-
- uint8_t* base = static_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = RoundUp(base, alignment);
- ASSERT_LE(base, aligned_base);
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- OS::Free(base, prefix_size);
- request_size -= prefix_size;
- }
-
- size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
- ASSERT_LE(aligned_size, request_size);
-
- if (aligned_size != request_size) {
- size_t suffix_size = request_size - aligned_size;
- OS::Free(aligned_base + aligned_size, suffix_size);
- request_size -= suffix_size;
- }
-
- ASSERT(aligned_size == request_size);
-
- address_ = static_cast<void*>(aligned_base);
- size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
-
- if (result == MAP_FAILED) return NULL;
-
- return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
- kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for Win32.
-
-// Secure API functions are not available using MinGW with msvcrt.dll
-// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
-// disable definition of secure API functions in standard headers that
-// would conflict with our own implementation.
-#ifdef __MINGW32__
-#include <_mingw.h>
-#ifdef MINGW_HAS_SECURE_API
-#undef MINGW_HAS_SECURE_API
-#endif // MINGW_HAS_SECURE_API
-#endif // __MINGW32__
-
-#include "src/base/win32-headers.h"
-
-#include "src/base/lazy-instance.h"
-#include "src/platform.h"
-#include "src/platform/time.h"
-#include "src/utils.h"
-#include "src/utils/random-number-generator.h"
-
-#ifdef _MSC_VER
-
-// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
-// defined in strings.h.
-int strncasecmp(const char* s1, const char* s2, int n) {
- return _strnicmp(s1, s2, n);
-}
-
-#endif // _MSC_VER
-
-
-// Extra functions for MinGW. Most of these are the _s functions which are in
-// the Microsoft Visual Studio C++ CRT.
-#ifdef __MINGW32__
-
-
-#ifndef __MINGW64_VERSION_MAJOR
-
-#define _TRUNCATE 0
-#define STRUNCATE 80
-
-inline void MemoryBarrier() {
- int barrier = 0;
- __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
-}
-
-#endif // __MINGW64_VERSION_MAJOR
-
-
-int localtime_s(tm* out_tm, const time_t* time) {
- tm* posix_local_time_struct = localtime(time);
- if (posix_local_time_struct == NULL) return 1;
- *out_tm = *posix_local_time_struct;
- return 0;
-}
-
-
-int fopen_s(FILE** pFile, const char* filename, const char* mode) {
- *pFile = fopen(filename, mode);
- return *pFile != NULL ? 0 : 1;
-}
-
-int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
- const char* format, va_list argptr) {
- ASSERT(count == _TRUNCATE);
- return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
-}
-
-
-int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
- CHECK(source != NULL);
- CHECK(dest != NULL);
- CHECK_GT(dest_size, 0);
-
- if (count == _TRUNCATE) {
- while (dest_size > 0 && *source != 0) {
- *(dest++) = *(source++);
- --dest_size;
- }
- if (dest_size == 0) {
- *(dest - 1) = 0;
- return STRUNCATE;
- }
- } else {
- while (dest_size > 0 && count > 0 && *source != 0) {
- *(dest++) = *(source++);
- --dest_size;
- --count;
- }
- }
- CHECK_GT(dest_size, 0);
- *dest = 0;
- return 0;
-}
-
-#endif // __MINGW32__
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-bool g_hard_abort = false;
-
-} // namespace
-
-intptr_t OS::MaxVirtualMemory() {
- return 0;
-}
-
-
-class TimezoneCache {
- public:
- TimezoneCache() : initialized_(false) { }
-
- void Clear() {
- initialized_ = false;
- }
-
- // Initialize timezone information. The timezone information is obtained from
- // windows. If we cannot get the timezone information we fall back to CET.
- void InitializeIfNeeded() {
- // Just return if timezone information has already been initialized.
- if (initialized_) return;
-
- // Initialize POSIX time zone data.
- _tzset();
- // Obtain timezone information from operating system.
- memset(&tzinfo_, 0, sizeof(tzinfo_));
- if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
- // If we cannot get timezone information we fall back to CET.
- tzinfo_.Bias = -60;
- tzinfo_.StandardDate.wMonth = 10;
- tzinfo_.StandardDate.wDay = 5;
- tzinfo_.StandardDate.wHour = 3;
- tzinfo_.StandardBias = 0;
- tzinfo_.DaylightDate.wMonth = 3;
- tzinfo_.DaylightDate.wDay = 5;
- tzinfo_.DaylightDate.wHour = 2;
- tzinfo_.DaylightBias = -60;
- }
-
- // Make standard and DST timezone names.
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
- std_tz_name_, kTzNameSize, NULL, NULL);
- std_tz_name_[kTzNameSize - 1] = '\0';
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
- dst_tz_name_, kTzNameSize, NULL, NULL);
- dst_tz_name_[kTzNameSize - 1] = '\0';
-
- // If OS returned empty string or resource id (like "@tzres.dll,-211")
- // simply guess the name from the UTC bias of the timezone.
- // To properly resolve the resource identifier requires a library load,
- // which is not possible in a sandbox.
- if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
- OS::SNPrintF(std_tz_name_, kTzNameSize - 1,
- "%s Standard Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
- if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
- OS::SNPrintF(dst_tz_name_, kTzNameSize - 1,
- "%s Daylight Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
- // Timezone information initialized.
- initialized_ = true;
- }
-
- // Guess the name of the timezone from the bias.
- // The guess is very biased towards the northern hemisphere.
- const char* GuessTimezoneNameFromBias(int bias) {
- static const int kHour = 60;
- switch (-bias) {
- case -9*kHour: return "Alaska";
- case -8*kHour: return "Pacific";
- case -7*kHour: return "Mountain";
- case -6*kHour: return "Central";
- case -5*kHour: return "Eastern";
- case -4*kHour: return "Atlantic";
- case 0*kHour: return "GMT";
- case +1*kHour: return "Central Europe";
- case +2*kHour: return "Eastern Europe";
- case +3*kHour: return "Russia";
- case +5*kHour + 30: return "India";
- case +8*kHour: return "China";
- case +9*kHour: return "Japan";
- case +12*kHour: return "New Zealand";
- default: return "Local";
- }
- }
-
-
- private:
- static const int kTzNameSize = 128;
- bool initialized_;
- char std_tz_name_[kTzNameSize];
- char dst_tz_name_[kTzNameSize];
- TIME_ZONE_INFORMATION tzinfo_;
- friend class Win32Time;
-};
-
-
-// ----------------------------------------------------------------------------
-// The Time class represents time on win32. A timestamp is represented as
-// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
-// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
-// January 1, 1970.
-
-class Win32Time {
- public:
- // Constructors.
- Win32Time();
- explicit Win32Time(double jstime);
- Win32Time(int year, int mon, int day, int hour, int min, int sec);
-
- // Convert timestamp to JavaScript representation.
- double ToJSTime();
-
- // Set timestamp to current time.
- void SetToCurrentTime();
-
- // Returns the local timezone offset in milliseconds east of UTC. This is
- // the number of milliseconds you must add to UTC to get local time, i.e.
- // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
- // routine also takes into account whether daylight saving is effect
- // at the time.
- int64_t LocalOffset(TimezoneCache* cache);
-
- // Returns the daylight savings time offset for the time in milliseconds.
- int64_t DaylightSavingsOffset(TimezoneCache* cache);
-
- // Returns a string identifying the current timezone for the
- // timestamp taking into account daylight saving.
- char* LocalTimezone(TimezoneCache* cache);
-
- private:
- // Constants for time conversion.
- static const int64_t kTimeEpoc = 116444736000000000LL;
- static const int64_t kTimeScaler = 10000;
- static const int64_t kMsPerMinute = 60000;
-
- // Constants for timezone information.
- static const bool kShortTzNames = false;
-
- // Return whether or not daylight savings time is in effect at this time.
- bool InDST(TimezoneCache* cache);
-
- // Accessor for FILETIME representation.
- FILETIME& ft() { return time_.ft_; }
-
- // Accessor for integer representation.
- int64_t& t() { return time_.t_; }
-
- // Although win32 uses 64-bit integers for representing timestamps,
- // these are packed into a FILETIME structure. The FILETIME structure
- // is just a struct representing a 64-bit integer. The TimeStamp union
- // allows access to both a FILETIME and an integer representation of
- // the timestamp.
- union TimeStamp {
- FILETIME ft_;
- int64_t t_;
- };
-
- TimeStamp time_;
-};
-
-
-// Initialize timestamp to start of epoc.
-Win32Time::Win32Time() {
- t() = 0;
-}
-
-
-// Initialize timestamp from a JavaScript timestamp.
-Win32Time::Win32Time(double jstime) {
- t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
-}
-
-
-// Initialize timestamp from date/time components.
-Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
- SYSTEMTIME st;
- st.wYear = year;
- st.wMonth = mon;
- st.wDay = day;
- st.wHour = hour;
- st.wMinute = min;
- st.wSecond = sec;
- st.wMilliseconds = 0;
- SystemTimeToFileTime(&st, &ft());
-}
-
-
-// Convert timestamp to JavaScript timestamp.
-double Win32Time::ToJSTime() {
- return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
-}
-
-
-// Set timestamp to current time.
-void Win32Time::SetToCurrentTime() {
- // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
- // Because we're fast, we like fast timers which have at least a
- // 1ms resolution.
- //
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for v8 wants fast
- // timers, it can use timeBeginPeriod to increase the resolution.
- //
- // Using timeGetTime() has a drawback because it is a 32bit value
- // and hence rolls-over every ~49days.
- //
- // To use the clock, we use GetSystemTimeAsFileTime as our base;
- // and then use timeGetTime to extrapolate current time from the
- // start time. To deal with rollovers, we resync the clock
- // any time when more than kMaxClockElapsedTime has passed or
- // whenever timeGetTime creates a rollover.
-
- static bool initialized = false;
- static TimeStamp init_time;
- static DWORD init_ticks;
- static const int64_t kHundredNanosecondsPerSecond = 10000000;
- static const int64_t kMaxClockElapsedTime =
- 60*kHundredNanosecondsPerSecond; // 1 minute
-
- // If we are uninitialized, we need to resync the clock.
- bool needs_resync = !initialized;
-
- // Get the current time.
- TimeStamp time_now;
- GetSystemTimeAsFileTime(&time_now.ft_);
- DWORD ticks_now = timeGetTime();
-
- // Check if we need to resync due to clock rollover.
- needs_resync |= ticks_now < init_ticks;
-
- // Check if we need to resync due to elapsed time.
- needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
-
- // Check if we need to resync due to backwards time change.
- needs_resync |= time_now.t_ < init_time.t_;
-
- // Resync the clock if necessary.
- if (needs_resync) {
- GetSystemTimeAsFileTime(&init_time.ft_);
- init_ticks = ticks_now = timeGetTime();
- initialized = true;
- }
-
- // Finally, compute the actual time. Why is this so hard.
- DWORD elapsed = ticks_now - init_ticks;
- this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
-}
-
-
-// Return the local timezone offset in milliseconds east of UTC. This
-// takes into account whether daylight saving is in effect at the time.
-// Only times in the 32-bit Unix range may be passed to this function.
-// Also, adding the time-zone offset to the input must not overflow.
-// The function EquivalentTime() in date.js guarantees this.
-int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
- cache->InitializeIfNeeded();
-
- Win32Time rounded_to_second(*this);
- rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
- 1000 * kTimeScaler;
- // Convert to local time using POSIX localtime function.
- // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
- // very slow. Other browsers use localtime().
-
- // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
- // POSIX seconds past 1/1/1970 0:00:00.
- double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
- if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
- return 0;
- }
- // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
- time_t posix_time = static_cast<time_t>(unchecked_posix_time);
-
- // Convert to local time, as struct with fields for day, hour, year, etc.
- tm posix_local_time_struct;
- if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
-
- if (posix_local_time_struct.tm_isdst > 0) {
- return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
- } else if (posix_local_time_struct.tm_isdst == 0) {
- return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
- } else {
- return cache->tzinfo_.Bias * -kMsPerMinute;
- }
-}
-
-
-// Return whether or not daylight savings time is in effect at this time.
-bool Win32Time::InDST(TimezoneCache* cache) {
- cache->InitializeIfNeeded();
-
- // Determine if DST is in effect at the specified time.
- bool in_dst = false;
- if (cache->tzinfo_.StandardDate.wMonth != 0 ||
- cache->tzinfo_.DaylightDate.wMonth != 0) {
- // Get the local timezone offset for the timestamp in milliseconds.
- int64_t offset = LocalOffset(cache);
-
- // Compute the offset for DST. The bias parameters in the timezone info
- // are specified in minutes. These must be converted to milliseconds.
- int64_t dstofs =
- -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
-
- // If the local time offset equals the timezone bias plus the daylight
- // bias then DST is in effect.
- in_dst = offset == dstofs;
- }
-
- return in_dst;
-}
-
-
-// Return the daylight savings time offset for this time.
-int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) {
- return InDST(cache) ? 60 * kMsPerMinute : 0;
-}
-
-
-// Returns a string identifying the current timezone for the
-// timestamp taking into account daylight saving.
-char* Win32Time::LocalTimezone(TimezoneCache* cache) {
- // Return the standard or DST time zone name based on whether daylight
- // saving is in effect at the given time.
- return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
-}
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- FILETIME dummy;
- uint64_t usertime;
-
- // Get the amount of time that the thread has executed in user mode.
- if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
- reinterpret_cast<FILETIME*>(&usertime))) return -1;
-
- // Adjust the resolution to micro-seconds.
- usertime /= 10;
-
- // Convert to seconds and microseconds
- *secs = static_cast<uint32_t>(usertime / 1000000);
- *usecs = static_cast<uint32_t>(usertime % 1000000);
- return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
- return Time::Now().ToJsTime();
-}
-
-
-TimezoneCache* OS::CreateTimezoneCache() {
- return new TimezoneCache();
-}
-
-
-void OS::DisposeTimezoneCache(TimezoneCache* cache) {
- delete cache;
-}
-
-
-void OS::ClearTimezoneCache(TimezoneCache* cache) {
- cache->Clear();
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
- return Win32Time(time).LocalTimezone(cache);
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset(TimezoneCache* cache) {
- // Use current time, rounded to the millisecond.
- Win32Time t(TimeCurrentMillis());
- // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
- return static_cast<double>(t.LocalOffset(cache) -
- t.DaylightSavingsOffset(cache));
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given
-// time.
-double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) {
- int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
- return static_cast<double>(offset);
-}
-
-
-int OS::GetLastError() {
- return ::GetLastError();
-}
-
-
-int OS::GetCurrentProcessId() {
- return static_cast<int>(::GetCurrentProcessId());
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 console output.
-//
-// If a Win32 application is linked as a console application it has a normal
-// standard output and standard error. In this case normal printf works fine
-// for output. However, if the application is linked as a GUI application,
-// the process doesn't have a console, and therefore (debugging) output is lost.
-// This is the case if we are embedded in a windows program (like a browser).
-// In order to be able to get debug output in this case the the debugging
-// facility using OutputDebugString. This output goes to the active debugger
-// for the process (if any). Else the output can be monitored using DBMON.EXE.
-
-enum OutputMode {
- UNKNOWN, // Output method has not yet been determined.
- CONSOLE, // Output is written to stdout.
- ODS // Output is written to debug facility.
-};
-
-static OutputMode output_mode = UNKNOWN; // Current output mode.
-
-
-// Determine if the process has a console for output.
-static bool HasConsole() {
- // Only check the first time. Eventual race conditions are not a problem,
- // because all threads will eventually determine the same mode.
- if (output_mode == UNKNOWN) {
- // We cannot just check that the standard output is attached to a console
- // because this would fail if output is redirected to a file. Therefore we
- // say that a process does not have an output console if either the
- // standard output handle is invalid or its file type is unknown.
- if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
- GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
- output_mode = CONSOLE;
- else
- output_mode = ODS;
- }
- return output_mode == CONSOLE;
-}
-
-
-static void VPrintHelper(FILE* stream, const char* format, va_list args) {
- if ((stream == stdout || stream == stderr) && !HasConsole()) {
- // It is important to use safe print here in order to avoid
- // overflowing the buffer. We might truncate the output, but this
- // does not crash.
- char buffer[4096];
- OS::VSNPrintF(buffer, sizeof(buffer), format, args);
- OutputDebugStringA(buffer);
- } else {
- vfprintf(stream, format, args);
- }
-}
-
-
-FILE* OS::FOpen(const char* path, const char* mode) {
- FILE* result;
- if (fopen_s(&result, path, mode) == 0) {
- return result;
- } else {
- return NULL;
- }
-}
-
-
-bool OS::Remove(const char* path) {
- return (DeleteFileA(path) != 0);
-}
-
-
-FILE* OS::OpenTemporaryFile() {
- // tmpfile_s tries to use the root dir, don't use it.
- char tempPathBuffer[MAX_PATH];
- DWORD path_result = 0;
- path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
- if (path_result > MAX_PATH || path_result == 0) return NULL;
- UINT name_result = 0;
- char tempNameBuffer[MAX_PATH];
- name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
- if (name_result == 0) return NULL;
- FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses.
- if (result != NULL) {
- Remove(tempNameBuffer); // Delete on close.
- }
- return result;
-}
-
-
-// Open log file in binary mode to avoid /n -> /r/n conversion.
-const char* const OS::LogFileOpenMode = "wb";
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrint(format, args);
- va_end(args);
-}
-
-
-void OS::VPrint(const char* format, va_list args) {
- VPrintHelper(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
- VPrintHelper(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-void OS::VPrintError(const char* format, va_list args) {
- VPrintHelper(stderr, format, args);
-}
-
-
-int OS::SNPrintF(char* str, int length, const char* format, ...) {
- va_list args;
- va_start(args, format);
- int result = VSNPrintF(str, length, format, args);
- va_end(args);
- return result;
-}
-
-
-int OS::VSNPrintF(char* str, int length, const char* format, va_list args) {
- int n = _vsnprintf_s(str, length, _TRUNCATE, format, args);
- // Make sure to zero-terminate the string if the output was
- // truncated or if there was an error.
- if (n < 0 || n >= length) {
- if (length > 0)
- str[length - 1] = '\0';
- return -1;
- } else {
- return n;
- }
-}
-
-
-char* OS::StrChr(char* str, int c) {
- return const_cast<char*>(strchr(str, c));
-}
-
-
-void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
- // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
- size_t buffer_size = static_cast<size_t>(length);
- if (n + 1 > buffer_size) // count for trailing '\0'
- n = _TRUNCATE;
- int result = strncpy_s(dest, length, src, n);
- USE(result);
- ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
-}
-
-
-#undef _TRUNCATE
-#undef STRUNCATE
-
-
-// Get the system's page size used by VirtualAlloc() or the next power
-// of two. The reason for always returning a power of two is that the
-// rounding up in OS::Allocate expects that.
-static size_t GetPageSize() {
- static size_t page_size = 0;
- if (page_size == 0) {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- page_size = RoundUpToPowerOf2(info.dwPageSize);
- }
- return page_size;
-}
-
-
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
-size_t OS::AllocateAlignment() {
- static size_t allocate_alignment = 0;
- if (allocate_alignment == 0) {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- allocate_alignment = info.dwAllocationGranularity;
- }
- return allocate_alignment;
-}
-
-
-static base::LazyInstance<RandomNumberGenerator>::type
- platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
-
-
-void OS::Initialize(int64_t random_seed, bool hard_abort,
- const char* const gc_fake_mmap) {
- if (random_seed) {
- platform_random_number_generator.Pointer()->SetSeed(random_seed);
- }
- g_hard_abort = hard_abort;
-}
-
-
-void* OS::GetRandomMmapAddr() {
- // The address range used to randomize RWX allocations in OS::Allocate
- // Try not to map pages into the default range that windows loads DLLs
- // Use a multiple of 64k to prevent committing unused memory.
- // Note: This does not guarantee RWX regions will be within the
- // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
- static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
- static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
- static const intptr_t kAllocationRandomAddressMin = 0x04000000;
- static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
- uintptr_t address =
- (platform_random_number_generator.Pointer()->NextInt() << kPageSizeBits) |
- kAllocationRandomAddressMin;
- address &= kAllocationRandomAddressMax;
- return reinterpret_cast<void *>(address);
-}
-
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
- LPVOID base = NULL;
-
- if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
- // For exectutable pages try and randomize the allocation address
- for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
- base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
- }
- }
-
- // After three attempts give up and let the OS find an address to use.
- if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
- return base;
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
- // VirtualAlloc rounds allocated size to page size automatically.
- size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-
- // Windows XP SP2 allows Data Excution Prevention (DEP).
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-
- LPVOID mbase = RandomizedVirtualAlloc(msize,
- MEM_COMMIT | MEM_RESERVE,
- prot);
-
- if (mbase == NULL) return NULL;
-
- ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
-
- *allocated = msize;
- return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
- // TODO(1240712): VirtualFree has a return value which is ignored here.
- VirtualFree(address, 0, MEM_RELEASE);
- USE(size);
-}
-
-
-intptr_t OS::CommitPageSize() {
- return 4096;
-}
-
-
-void OS::ProtectCode(void* address, const size_t size) {
- DWORD old_protect;
- VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- DWORD oldprotect;
- VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-}
-
-
-void OS::Sleep(int milliseconds) {
- ::Sleep(milliseconds);
-}
-
-
-void OS::Abort() {
- if (g_hard_abort) {
- V8_IMMEDIATE_CRASH();
- }
- // Make the MSVCRT do a silent abort.
- raise(SIGABRT);
-}
-
-
-void OS::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- ::DebugBreak();
-#endif
-}
-
-
-class Win32MemoryMappedFile : public OS::MemoryMappedFile {
- public:
- Win32MemoryMappedFile(HANDLE file,
- HANDLE file_mapping,
- void* memory,
- int size)
- : file_(file),
- file_mapping_(file_mapping),
- memory_(memory),
- size_(size) { }
- virtual ~Win32MemoryMappedFile();
- virtual void* memory() { return memory_; }
- virtual int size() { return size_; }
- private:
- HANDLE file_;
- HANDLE file_mapping_;
- void* memory_;
- int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- // Open a physical file
- HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
- if (file == INVALID_HANDLE_VALUE) return NULL;
-
- int size = static_cast<int>(GetFileSize(file, NULL));
-
- // Create a file mapping for the physical file
- HANDLE file_mapping = CreateFileMapping(file, NULL,
- PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
- if (file_mapping == NULL) return NULL;
-
- // Map a view of the file into memory
- void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- return new Win32MemoryMappedFile(file, file_mapping, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- // Open a physical file
- HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
- if (file == NULL) return NULL;
- // Create a file mapping for the physical file
- HANDLE file_mapping = CreateFileMapping(file, NULL,
- PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
- if (file_mapping == NULL) return NULL;
- // Map a view of the file into memory
- void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- if (memory) MemMove(memory, initial, size);
- return new Win32MemoryMappedFile(file, file_mapping, memory, size);
-}
-
-
-Win32MemoryMappedFile::~Win32MemoryMappedFile() {
- if (memory_ != NULL)
- UnmapViewOfFile(memory_);
- CloseHandle(file_mapping_);
- CloseHandle(file_);
-}
-
-
-// The following code loads functions defined in DbhHelp.h and TlHelp32.h
-// dynamically. This is to avoid being depending on dbghelp.dll and
-// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
-// kernel32.dll at some point so loading functions defines in TlHelp32.h
-// dynamically might not be necessary any more - for some versions of Windows?).
-
-// Function pointers to functions dynamically loaded from dbghelp.dll.
-#define DBGHELP_FUNCTION_LIST(V) \
- V(SymInitialize) \
- V(SymGetOptions) \
- V(SymSetOptions) \
- V(SymGetSearchPath) \
- V(SymLoadModule64) \
- V(StackWalk64) \
- V(SymGetSymFromAddr64) \
- V(SymGetLineFromAddr64) \
- V(SymFunctionTableAccess64) \
- V(SymGetModuleBase64)
-
-// Function pointers to functions dynamically loaded from dbghelp.dll.
-#define TLHELP32_FUNCTION_LIST(V) \
- V(CreateToolhelp32Snapshot) \
- V(Module32FirstW) \
- V(Module32NextW)
-
-// Define the decoration to use for the type and variable name used for
-// dynamically loaded DLL function..
-#define DLL_FUNC_TYPE(name) _##name##_
-#define DLL_FUNC_VAR(name) _##name
-
-// Define the type for each dynamically loaded DLL function. The function
-// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
-// from the Windows include files are redefined here to have the function
-// definitions to be as close to the ones in the original .h files as possible.
-#ifndef IN
-#define IN
-#endif
-#ifndef VOID
-#define VOID void
-#endif
-
-// DbgHelp isn't supported on MinGW yet
-#ifndef __MINGW32__
-// DbgHelp.h functions.
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
- IN PSTR UserSearchPath,
- IN BOOL fInvadeProcess);
-typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
-typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
- IN HANDLE hProcess,
- OUT PSTR SearchPath,
- IN DWORD SearchPathLength);
-typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
- IN HANDLE hProcess,
- IN HANDLE hFile,
- IN PSTR ImageName,
- IN PSTR ModuleName,
- IN DWORD64 BaseOfDll,
- IN DWORD SizeOfDll);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
- DWORD MachineType,
- HANDLE hProcess,
- HANDLE hThread,
- LPSTACKFRAME64 StackFrame,
- PVOID ContextRecord,
- PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
- PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
- PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
- PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
- IN HANDLE hProcess,
- IN DWORD64 qwAddr,
- OUT PDWORD64 pdwDisplacement,
- OUT PIMAGEHLP_SYMBOL64 Symbol);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
- IN HANDLE hProcess,
- IN DWORD64 qwAddr,
- OUT PDWORD pdwDisplacement,
- OUT PIMAGEHLP_LINE64 Line64);
-// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
-typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
- HANDLE hProcess,
- DWORD64 AddrBase); // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
-typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
- HANDLE hProcess,
- DWORD64 AddrBase); // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
-
-// TlHelp32.h functions.
-typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
- DWORD dwFlags,
- DWORD th32ProcessID);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
- LPMODULEENTRY32W lpme);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
- LPMODULEENTRY32W lpme);
-
-#undef IN
-#undef VOID
-
-// Declare a variable for each dynamically loaded DLL function.
-#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
-DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
-TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
-#undef DEF_DLL_FUNCTION
-
-// Load the functions. This function has a lot of "ugly" macros in order to
-// keep down code duplication.
-
-static bool LoadDbgHelpAndTlHelp32() {
- static bool dbghelp_loaded = false;
-
- if (dbghelp_loaded) return true;
-
- HMODULE module;
-
- // Load functions from the dbghelp.dll module.
- module = LoadLibrary(TEXT("dbghelp.dll"));
- if (module == NULL) {
- return false;
- }
-
-#define LOAD_DLL_FUNC(name) \
- DLL_FUNC_VAR(name) = \
- reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
-
-DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
-
-#undef LOAD_DLL_FUNC
-
- // Load functions from the kernel32.dll module (the TlHelp32.h function used
- // to be in tlhelp32.dll but are now moved to kernel32.dll).
- module = LoadLibrary(TEXT("kernel32.dll"));
- if (module == NULL) {
- return false;
- }
-
-#define LOAD_DLL_FUNC(name) \
- DLL_FUNC_VAR(name) = \
- reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
-
-TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
-
-#undef LOAD_DLL_FUNC
-
- // Check that all functions where loaded.
- bool result =
-#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
-
-DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
-TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
-
-#undef DLL_FUNC_LOADED
- true;
-
- dbghelp_loaded = result;
- return result;
- // NOTE: The modules are never unloaded and will stay around until the
- // application is closed.
-}
-
-#undef DBGHELP_FUNCTION_LIST
-#undef TLHELP32_FUNCTION_LIST
-#undef DLL_FUNC_VAR
-#undef DLL_FUNC_TYPE
-
-
-// Load the symbols for generating stack traces.
-static std::vector<OS::SharedLibraryAddress> LoadSymbols(
- HANDLE process_handle) {
- static std::vector<OS::SharedLibraryAddress> result;
-
- static bool symbols_loaded = false;
-
- if (symbols_loaded) return result;
-
- BOOL ok;
-
- // Initialize the symbol engine.
- ok = _SymInitialize(process_handle, // hProcess
- NULL, // UserSearchPath
- false); // fInvadeProcess
- if (!ok) return result;
-
- DWORD options = _SymGetOptions();
- options |= SYMOPT_LOAD_LINES;
- options |= SYMOPT_FAIL_CRITICAL_ERRORS;
- options = _SymSetOptions(options);
-
- char buf[OS::kStackWalkMaxNameLen] = {0};
- ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
- if (!ok) {
- int err = GetLastError();
- PrintF("%d\n", err);
- return result;
- }
-
- HANDLE snapshot = _CreateToolhelp32Snapshot(
- TH32CS_SNAPMODULE, // dwFlags
- GetCurrentProcessId()); // th32ProcessId
- if (snapshot == INVALID_HANDLE_VALUE) return result;
- MODULEENTRY32W module_entry;
- module_entry.dwSize = sizeof(module_entry); // Set the size of the structure.
- BOOL cont = _Module32FirstW(snapshot, &module_entry);
- while (cont) {
- DWORD64 base;
- // NOTE the SymLoadModule64 function has the peculiarity of accepting a
- // both unicode and ASCII strings even though the parameter is PSTR.
- base = _SymLoadModule64(
- process_handle, // hProcess
- 0, // hFile
- reinterpret_cast<PSTR>(module_entry.szExePath), // ImageName
- reinterpret_cast<PSTR>(module_entry.szModule), // ModuleName
- reinterpret_cast<DWORD64>(module_entry.modBaseAddr), // BaseOfDll
- module_entry.modBaseSize); // SizeOfDll
- if (base == 0) {
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND &&
- err != ERROR_INVALID_HANDLE) {
- result.clear();
- return result;
- }
- }
- int lib_name_length = WideCharToMultiByte(
- CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
- std::string lib_name(lib_name_length, 0);
- WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
- lib_name_length, NULL, NULL);
- result.push_back(OS::SharedLibraryAddress(
- lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
- module_entry.modBaseSize)));
- cont = _Module32NextW(snapshot, &module_entry);
- }
- CloseHandle(snapshot);
-
- symbols_loaded = true;
- return result;
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- // SharedLibraryEvents are logged when loading symbol information.
- // Only the shared libraries loaded at the time of the call to
- // GetSharedLibraryAddresses are logged. DLLs loaded after
- // initialization are not accounted for.
- if (!LoadDbgHelpAndTlHelp32()) return std::vector<OS::SharedLibraryAddress>();
- HANDLE process_handle = GetCurrentProcess();
- return LoadSymbols(process_handle);
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-uint64_t OS::TotalPhysicalMemory() {
- MEMORYSTATUSEX memory_info;
- memory_info.dwLength = sizeof(memory_info);
- if (!GlobalMemoryStatusEx(&memory_info)) {
- UNREACHABLE();
- return 0;
- }
-
- return static_cast<uint64_t>(memory_info.ullTotalPhys);
-}
-
-
-#else // __MINGW32__
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
- return std::vector<OS::SharedLibraryAddress>();
-}
-
-
-void OS::SignalCodeMovingGC() { }
-#endif // __MINGW32__
-
-
-int OS::NumberOfProcessorsOnline() {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- return info.dwNumberOfProcessors;
-}
-
-
-double OS::nan_value() {
-#ifdef _MSC_VER
- // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits
- // in mask set, so value equals mask.
- static const __int64 nanval = kQuietNaNMask;
- return *reinterpret_cast<const double*>(&nanval);
-#else // _MSC_VER
- return NAN;
-#endif // _MSC_VER
-}
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef _WIN64
- return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
-#elif defined(__MINGW32__)
- // With gcc 4.4 the tree vectorization optimizer can generate code
- // that requires 16 byte alignment such as movdqa on x86.
- return 16;
-#else
- return 8; // Floating-point math runs faster with 8-byte alignment.
-#endif
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
- : address_(NULL), size_(0) {
- ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size);
- if (address == NULL) return;
- uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
- // Try reducing the size by freeing and then reallocating a specific area.
- bool result = ReleaseRegion(address, request_size);
- USE(result);
- ASSERT(result);
- address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
- if (address != NULL) {
- request_size = size;
- ASSERT(base == static_cast<uint8_t*>(address));
- } else {
- // Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size);
- if (address == NULL) return;
- }
- address_ = address;
- size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
- if (IsReserved()) {
- bool result = ReleaseRegion(address(), size());
- ASSERT(result);
- USE(result);
- }
-}
-
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
- address_ = NULL;
- size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- ASSERT(IsReserved());
- return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_NOACCESS)) {
- return false;
- }
- return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
- return false;
- }
- return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 thread support.
-
-// Definition of invalid thread handle and id.
-static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
-
-// Entry point for threads. The supplied argument is a pointer to the thread
-// object. The entry function dispatches to the run method in the thread
-// object. It is important that this function has __stdcall calling
-// convention.
-static unsigned int __stdcall ThreadEntry(void* arg) {
- Thread* thread = reinterpret_cast<Thread*>(arg);
- thread->NotifyStartedAndRun();
- return 0;
-}
-
-
-class Thread::PlatformData {
- public:
- explicit PlatformData(HANDLE thread) : thread_(thread) {}
- HANDLE thread_;
- unsigned thread_id_;
-};
-
-
-// Initialize a Win32 thread object. The thread has an invalid thread
-// handle until it is started.
-
-Thread::Thread(const Options& options)
- : stack_size_(options.stack_size()),
- start_semaphore_(NULL) {
- data_ = new PlatformData(kNoThread);
- set_name(options.name());
-}
-
-
-void Thread::set_name(const char* name) {
- OS::StrNCpy(name_, sizeof(name_), name, strlen(name));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-// Close our own handle for the thread.
-Thread::~Thread() {
- if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
- delete data_;
-}
-
-
-// Create a new thread. It is important to use _beginthreadex() instead of
-// the Win32 function CreateThread(), because the CreateThread() does not
-// initialize thread specific structures in the C runtime library.
-void Thread::Start() {
- data_->thread_ = reinterpret_cast<HANDLE>(
- _beginthreadex(NULL,
- static_cast<unsigned>(stack_size_),
- ThreadEntry,
- this,
- 0,
- &data_->thread_id_));
-}
-
-
-// Wait for thread to terminate.
-void Thread::Join() {
- if (data_->thread_id_ != GetCurrentThreadId()) {
- WaitForSingleObject(data_->thread_, INFINITE);
- }
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- DWORD result = TlsAlloc();
- ASSERT(result != TLS_OUT_OF_INDEXES);
- return static_cast<LocalStorageKey>(result);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- BOOL result = TlsFree(static_cast<DWORD>(key));
- USE(result);
- ASSERT(result);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- return TlsGetValue(static_cast<DWORD>(key));
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
- USE(result);
- ASSERT(result);
-}
-
-
-
-void Thread::YieldCPU() {
- Sleep(0);
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This module contains the platform-specific code. This make the rest of the
-// code less dependent on operating system, compilers and runtime libraries.
-// This module does specifically not deal with differences between different
-// processor architecture.
-// The platform classes have the same definition for all platforms. The
-// implementation for a particular platform is put in platform_<os>.cc.
-// The build system then uses the implementation for the target platform.
-//
-// This design has been chosen because it is simple and fast. Alternatively,
-// the platform dependent classes could have been implemented using abstract
-// superclasses with virtual methods and having specializations for each
-// platform. This design was rejected because it was more complicated and
-// slower. It would require factory methods for selecting the right
-// implementation and the overhead of virtual methods for performance
-// sensitive like mutex locking/unlocking.
-
-#ifndef V8_PLATFORM_H_
-#define V8_PLATFORM_H_
-
-#include <stdarg.h>
-#include <string>
-#include <vector>
-
-#include "src/base/build_config.h"
-#include "src/platform/mutex.h"
-#include "src/platform/semaphore.h"
-
-#ifdef __sun
-# ifndef signbit
-namespace std {
-int signbit(double x);
-}
-# endif
-#endif
-
-#if V8_OS_QNX
-#include "src/qnx-math.h"
-#endif
-
-// Microsoft Visual C++ specific stuff.
-#if V8_LIBC_MSVCRT
-
-#include "src/base/win32-headers.h"
-#include "src/win32-math.h"
-
-int strncasecmp(const char* s1, const char* s2, int n);
-
-// Visual C++ 2013 and higher implement this function.
-#if (_MSC_VER < 1800)
-inline int lrint(double flt) {
- int intgr;
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
- __asm {
- fld flt
- fistp intgr
- };
-#else
- intgr = static_cast<int>(flt + 0.5);
- if ((intgr & 1) != 0 && intgr - flt == 0.5) {
- // If the number is halfway between two integers, round to the even one.
- intgr--;
- }
-#endif
- return intgr;
-}
-#endif // _MSC_VER < 1800
-
-#endif // V8_LIBC_MSVCRT
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Fast TLS support
-
-#ifndef V8_NO_FAST_TLS
-
-#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- const intptr_t kTibInlineTlsOffset = 0xE10;
- const intptr_t kTibExtraTlsOffset = 0xF94;
- const intptr_t kMaxInlineSlots = 64;
- const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
- const intptr_t kPointerSize = sizeof(void*);
- ASSERT(0 <= index && index < kMaxSlots);
- if (index < kMaxInlineSlots) {
- return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
- kPointerSize * index));
- }
- intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
- ASSERT(extra != 0);
- return *reinterpret_cast<intptr_t*>(extra +
- kPointerSize * (index - kMaxInlineSlots));
-}
-
-#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-extern intptr_t kMacTlsBaseOffset;
-
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
- intptr_t result;
-#if V8_HOST_ARCH_IA32
- asm("movl %%gs:(%1,%2,4), %0;"
- :"=r"(result) // Output must be a writable register.
- :"r"(kMacTlsBaseOffset), "r"(index));
-#else
- asm("movq %%gs:(%1,%2,8), %0;"
- :"=r"(result)
- :"r"(kMacTlsBaseOffset), "r"(index));
-#endif
- return result;
-}
-
-#endif
-
-#endif // V8_NO_FAST_TLS
-
-
-class TimezoneCache;
-
-
-// ----------------------------------------------------------------------------
-// OS
-//
-// This class has static methods for the different platform specific
-// functions. Add methods here to cope with differences between the
-// supported platforms.
-
-class OS {
- public:
- // Initialize the OS class.
- // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
- // - hard_abort: If true, OS::Abort() will crash instead of aborting.
- // - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
- static void Initialize(int64_t random_seed,
- bool hard_abort,
- const char* const gc_fake_mmap);
-
- // Returns the accumulated user time for thread. This routine
- // can be used for profiling. The implementation should
- // strive for high-precision timer resolution, preferable
- // micro-second resolution.
- static int GetUserTime(uint32_t* secs, uint32_t* usecs);
-
- // Returns current time as the number of milliseconds since
- // 00:00:00 UTC, January 1, 1970.
- static double TimeCurrentMillis();
-
- static TimezoneCache* CreateTimezoneCache();
- static void DisposeTimezoneCache(TimezoneCache* cache);
- static void ClearTimezoneCache(TimezoneCache* cache);
-
- // Returns a string identifying the current time zone. The
- // timestamp is used for determining if DST is in effect.
- static const char* LocalTimezone(double time, TimezoneCache* cache);
-
- // Returns the local time offset in milliseconds east of UTC without
- // taking daylight savings time into account.
- static double LocalTimeOffset(TimezoneCache* cache);
-
- // Returns the daylight savings offset for the given time.
- static double DaylightSavingsOffset(double time, TimezoneCache* cache);
-
- // Returns last OS error.
- static int GetLastError();
-
- static FILE* FOpen(const char* path, const char* mode);
- static bool Remove(const char* path);
-
- // Opens a temporary file, the file is auto removed on close.
- static FILE* OpenTemporaryFile();
-
- // Log file open mode is platform-dependent due to line ends issues.
- static const char* const LogFileOpenMode;
-
- // Print output to console. This is mostly used for debugging output.
- // On platforms that has standard terminal output, the output
- // should go to stdout.
- static void Print(const char* format, ...);
- static void VPrint(const char* format, va_list args);
-
- // Print output to a file. This is mostly used for debugging output.
- static void FPrint(FILE* out, const char* format, ...);
- static void VFPrint(FILE* out, const char* format, va_list args);
-
- // Print error output to console. This is mostly used for error message
- // output. On platforms that has standard terminal output, the output
- // should go to stderr.
- static void PrintError(const char* format, ...);
- static void VPrintError(const char* format, va_list args);
-
- // Allocate/Free memory used by JS heap. Pages are readable/writable, but
- // they are not guaranteed to be executable unless 'executable' is true.
- // Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable);
- static void Free(void* address, const size_t size);
-
- // This is the granularity at which the ProtectCode(...) call can set page
- // permissions.
- static intptr_t CommitPageSize();
-
- // Mark code segments non-writable.
- static void ProtectCode(void* address, const size_t size);
-
- // Assign memory as a guard page so that access will cause an exception.
- static void Guard(void* address, const size_t size);
-
- // Generate a random address to be used for hinting mmap().
- static void* GetRandomMmapAddr();
-
- // Get the Alignment guaranteed by Allocate().
- static size_t AllocateAlignment();
-
- // Sleep for a number of milliseconds.
- static void Sleep(const int milliseconds);
-
- // Abort the current process.
- static void Abort();
-
- // Debug break.
- static void DebugBreak();
-
- // Walk the stack.
- static const int kStackWalkError = -1;
- static const int kStackWalkMaxNameLen = 256;
- static const int kStackWalkMaxTextLen = 256;
- struct StackFrame {
- void* address;
- char text[kStackWalkMaxTextLen];
- };
-
- class MemoryMappedFile {
- public:
- static MemoryMappedFile* open(const char* name);
- static MemoryMappedFile* create(const char* name, int size, void* initial);
- virtual ~MemoryMappedFile() { }
- virtual void* memory() = 0;
- virtual int size() = 0;
- };
-
- // Safe formatting print. Ensures that str is always null-terminated.
- // Returns the number of chars written, or -1 if output was truncated.
- static int SNPrintF(char* str, int length, const char* format, ...);
- static int VSNPrintF(char* str,
- int length,
- const char* format,
- va_list args);
-
- static char* StrChr(char* str, int c);
- static void StrNCpy(char* dest, int length, const char* src, size_t n);
-
- // Support for the profiler. Can do nothing, in which case ticks
- // occuring in shared libraries will not be properly accounted for.
- struct SharedLibraryAddress {
- SharedLibraryAddress(
- const std::string& library_path, uintptr_t start, uintptr_t end)
- : library_path(library_path), start(start), end(end) {}
-
- std::string library_path;
- uintptr_t start;
- uintptr_t end;
- };
-
- static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses();
-
- // Support for the profiler. Notifies the external profiling
- // process that a code moving garbage collection starts. Can do
- // nothing, in which case the code objects must not move (e.g., by
- // using --never-compact) if accurate profiling is desired.
- static void SignalCodeMovingGC();
-
- // Returns the number of processors online.
- static int NumberOfProcessorsOnline();
-
- // The total amount of physical memory available on the current system.
- static uint64_t TotalPhysicalMemory();
-
- // Maximum size of the virtual memory. 0 means there is no artificial
- // limit.
- static intptr_t MaxVirtualMemory();
-
- // Returns the double constant NAN
- static double nan_value();
-
- // Support runtime detection of whether the hard float option of the
- // EABI is used.
- static bool ArmUsingHardFloat();
-
- // Returns the activation frame alignment constraint or zero if
- // the platform doesn't care. Guaranteed to be a power of two.
- static int ActivationFrameAlignment();
-
- static int GetCurrentProcessId();
-
- private:
- static const int msPerSecond = 1000;
-
-#if V8_OS_POSIX
- static const char* GetGCFakeMMapFile();
-#endif
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
-};
-
-// Represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by assignment or copy-contructing. This removes the reserved memory
-// from the original object.
-class VirtualMemory {
- public:
- // Empty VirtualMemory object, controlling no reserved memory.
- VirtualMemory();
-
- // Reserves virtual memory with size.
- explicit VirtualMemory(size_t size);
-
- // Reserves virtual memory containing an area of the given size that
- // is aligned per alignment. This may not be at the position returned
- // by address().
- VirtualMemory(size_t size, size_t alignment);
-
- // Releases the reserved memory, if any, controlled by this VirtualMemory
- // object.
- ~VirtualMemory();
-
- // Returns whether the memory has been reserved.
- bool IsReserved();
-
- // Initialize or resets an embedded VirtualMemory object.
- void Reset();
-
- // Returns the start address of the reserved memory.
- // If the memory was reserved with an alignment, this address is not
- // necessarily aligned. The user might need to round it up to a multiple of
- // the alignment to get the start of the aligned block.
- void* address() {
- ASSERT(IsReserved());
- return address_;
- }
-
- // Returns the size of the reserved memory. The returned value is only
- // meaningful when IsReserved() returns true.
- // If the memory was reserved with an alignment, this size may be larger
- // than the requested size.
- size_t size() { return size_; }
-
- // Commits real memory. Returns whether the operation succeeded.
- bool Commit(void* address, size_t size, bool is_executable);
-
- // Uncommit real memory. Returns whether the operation succeeded.
- bool Uncommit(void* address, size_t size);
-
- // Creates a single guard page at the given address.
- bool Guard(void* address);
-
- void Release() {
- ASSERT(IsReserved());
- // Notice: Order is important here. The VirtualMemory object might live
- // inside the allocated region.
- void* address = address_;
- size_t size = size_;
- Reset();
- bool result = ReleaseRegion(address, size);
- USE(result);
- ASSERT(result);
- }
-
- // Assign control of the reserved region to a different VirtualMemory object.
- // The old object is no longer functional (IsReserved() returns false).
- void TakeControl(VirtualMemory* from) {
- ASSERT(!IsReserved());
- address_ = from->address_;
- size_ = from->size_;
- from->Reset();
- }
-
- static void* ReserveRegion(size_t size);
-
- static bool CommitRegion(void* base, size_t size, bool is_executable);
-
- static bool UncommitRegion(void* base, size_t size);
-
- // Must be called with a base pointer that has been returned by ReserveRegion
- // and the same size it was reserved with.
- static bool ReleaseRegion(void* base, size_t size);
-
- // Returns true if OS performs lazy commits, i.e. the memory allocation call
- // defers actual physical memory allocation till the first memory access.
- // Otherwise returns false.
- static bool HasLazyCommits();
-
- private:
- void* address_; // Start address of the virtual memory.
- size_t size_; // Size of the virtual memory.
-};
-
-
-// ----------------------------------------------------------------------------
-// Thread
-//
-// Thread objects are used for creating and running threads. When the start()
-// method is called the new thread starts running the run() method in the new
-// thread. The Thread object should not be deallocated before the thread has
-// terminated.
-
-class Thread {
- public:
- // Opaque data type for thread-local storage keys.
- typedef int32_t LocalStorageKey;
-
- class Options {
- public:
- Options() : name_("v8:<unknown>"), stack_size_(0) {}
- Options(const char* name, int stack_size = 0)
- : name_(name), stack_size_(stack_size) {}
-
- const char* name() const { return name_; }
- int stack_size() const { return stack_size_; }
-
- private:
- const char* name_;
- int stack_size_;
- };
-
- // Create new thread.
- explicit Thread(const Options& options);
- virtual ~Thread();
-
- // Start new thread by calling the Run() method on the new thread.
- void Start();
-
- // Start new thread and wait until Run() method is called on the new thread.
- void StartSynchronously() {
- start_semaphore_ = new Semaphore(0);
- Start();
- start_semaphore_->Wait();
- delete start_semaphore_;
- start_semaphore_ = NULL;
- }
-
- // Wait until thread terminates.
- void Join();
-
- inline const char* name() const {
- return name_;
- }
-
- // Abstract method for run handler.
- virtual void Run() = 0;
-
- // Thread-local storage.
- static LocalStorageKey CreateThreadLocalKey();
- static void DeleteThreadLocalKey(LocalStorageKey key);
- static void* GetThreadLocal(LocalStorageKey key);
- static int GetThreadLocalInt(LocalStorageKey key) {
- return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
- }
- static void SetThreadLocal(LocalStorageKey key, void* value);
- static void SetThreadLocalInt(LocalStorageKey key, int value) {
- SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
- }
- static bool HasThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key) != NULL;
- }
-
-#ifdef V8_FAST_TLS_SUPPORTED
- static inline void* GetExistingThreadLocal(LocalStorageKey key) {
- void* result = reinterpret_cast<void*>(
- InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
- ASSERT(result == GetThreadLocal(key));
- return result;
- }
-#else
- static inline void* GetExistingThreadLocal(LocalStorageKey key) {
- return GetThreadLocal(key);
- }
-#endif
-
- // A hint to the scheduler to let another thread run.
- static void YieldCPU();
-
-
- // The thread name length is limited to 16 based on Linux's implementation of
- // prctl().
- static const int kMaxThreadNameLength = 16;
-
- class PlatformData;
- PlatformData* data() { return data_; }
-
- void NotifyStartedAndRun() {
- if (start_semaphore_) start_semaphore_->Signal();
- Run();
- }
-
- private:
- void set_name(const char* name);
-
- PlatformData* data_;
-
- char name_[kMaxThreadNameLength];
- int stack_size_;
- Semaphore* start_semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(Thread);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_H_
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/platform/condition-variable.h"
-
-#include <errno.h>
-#include <time.h>
-
-#include "src/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_POSIX
-
-ConditionVariable::ConditionVariable() {
- // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
- // hack to support cross-compiling Chrome for Android in AOSP. Remove
- // this once AOSP is fixed.
-#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
- (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
- // On Free/Net/OpenBSD and Linux with glibc we can change the time
- // source for pthread_cond_timedwait() to use the monotonic clock.
- pthread_condattr_t attr;
- int result = pthread_condattr_init(&attr);
- ASSERT_EQ(0, result);
- result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
- ASSERT_EQ(0, result);
- result = pthread_cond_init(&native_handle_, &attr);
- ASSERT_EQ(0, result);
- result = pthread_condattr_destroy(&attr);
-#else
- int result = pthread_cond_init(&native_handle_, NULL);
-#endif
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-ConditionVariable::~ConditionVariable() {
- int result = pthread_cond_destroy(&native_handle_);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-void ConditionVariable::NotifyOne() {
- int result = pthread_cond_signal(&native_handle_);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-void ConditionVariable::NotifyAll() {
- int result = pthread_cond_broadcast(&native_handle_);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-void ConditionVariable::Wait(Mutex* mutex) {
- mutex->AssertHeldAndUnmark();
- int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
- ASSERT_EQ(0, result);
- USE(result);
- mutex->AssertUnheldAndMark();
-}
-
-
-bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
- struct timespec ts;
- int result;
- mutex->AssertHeldAndUnmark();
-#if V8_OS_MACOSX
- // Mac OS X provides pthread_cond_timedwait_relative_np(), which does
- // not depend on the real time clock, which is what you really WANT here!
- ts = rel_time.ToTimespec();
- ASSERT_GE(ts.tv_sec, 0);
- ASSERT_GE(ts.tv_nsec, 0);
- result = pthread_cond_timedwait_relative_np(
- &native_handle_, &mutex->native_handle(), &ts);
-#else
- // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
- // hack to support cross-compiling Chrome for Android in AOSP. Remove
- // this once AOSP is fixed.
-#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
- (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
- // On Free/Net/OpenBSD and Linux with glibc we can change the time
- // source for pthread_cond_timedwait() to use the monotonic clock.
- result = clock_gettime(CLOCK_MONOTONIC, &ts);
- ASSERT_EQ(0, result);
- Time now = Time::FromTimespec(ts);
-#else
- // The timeout argument to pthread_cond_timedwait() is in absolute time.
- Time now = Time::NowFromSystemTime();
-#endif
- Time end_time = now + rel_time;
- ASSERT_GE(end_time, now);
- ts = end_time.ToTimespec();
- result = pthread_cond_timedwait(
- &native_handle_, &mutex->native_handle(), &ts);
-#endif // V8_OS_MACOSX
- mutex->AssertUnheldAndMark();
- if (result == ETIMEDOUT) {
- return false;
- }
- ASSERT_EQ(0, result);
- return true;
-}
-
-#elif V8_OS_WIN
-
-struct ConditionVariable::Event {
- Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
- ASSERT(handle_ != NULL);
- }
-
- ~Event() {
- BOOL ok = ::CloseHandle(handle_);
- ASSERT(ok);
- USE(ok);
- }
-
- bool WaitFor(DWORD timeout_ms) {
- DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
- if (result == WAIT_OBJECT_0) {
- return true;
- }
- ASSERT(result == WAIT_TIMEOUT);
- return false;
- }
-
- HANDLE handle_;
- Event* next_;
- HANDLE thread_;
- volatile bool notified_;
-};
-
-
-ConditionVariable::NativeHandle::~NativeHandle() {
- ASSERT(waitlist_ == NULL);
-
- while (freelist_ != NULL) {
- Event* event = freelist_;
- freelist_ = event->next_;
- delete event;
- }
-}
-
-
-ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
- LockGuard<Mutex> lock_guard(&mutex_);
-
- // Grab an event from the free list or create a new one.
- Event* event = freelist_;
- if (event != NULL) {
- freelist_ = event->next_;
- } else {
- event = new Event;
- }
- event->thread_ = GetCurrentThread();
- event->notified_ = false;
-
-#ifdef DEBUG
- // The event must not be on the wait list.
- for (Event* we = waitlist_; we != NULL; we = we->next_) {
- ASSERT_NE(event, we);
- }
-#endif
-
- // Prepend the event to the wait list.
- event->next_ = waitlist_;
- waitlist_ = event;
-
- return event;
-}
-
-
-void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
- LockGuard<Mutex> lock_guard(&mutex_);
-
- // Remove the event from the wait list.
- for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
- ASSERT_NE(NULL, *wep);
- if (*wep == event) {
- *wep = event->next_;
- break;
- }
- }
-
-#ifdef DEBUG
- // The event must not be on the free list.
- for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
- ASSERT_NE(event, fe);
- }
-#endif
-
- // Reset the event.
- BOOL ok = ::ResetEvent(event->handle_);
- ASSERT(ok);
- USE(ok);
-
- // Insert the event into the free list.
- event->next_ = freelist_;
- freelist_ = event;
-
- // Forward signals delivered after the timeout to the next waiting event.
- if (!result && event->notified_ && waitlist_ != NULL) {
- ok = ::SetEvent(waitlist_->handle_);
- ASSERT(ok);
- USE(ok);
- waitlist_->notified_ = true;
- }
-}
-
-
-ConditionVariable::ConditionVariable() {}
-
-
-ConditionVariable::~ConditionVariable() {}
-
-
-void ConditionVariable::NotifyOne() {
- // Notify the thread with the highest priority in the waitlist
- // that was not already signalled.
- LockGuard<Mutex> lock_guard(native_handle_.mutex());
- Event* highest_event = NULL;
- int highest_priority = std::numeric_limits<int>::min();
- for (Event* event = native_handle().waitlist();
- event != NULL;
- event = event->next_) {
- if (event->notified_) {
- continue;
- }
- int priority = GetThreadPriority(event->thread_);
- ASSERT_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
- if (priority >= highest_priority) {
- highest_priority = priority;
- highest_event = event;
- }
- }
- if (highest_event != NULL) {
- ASSERT(!highest_event->notified_);
- ::SetEvent(highest_event->handle_);
- highest_event->notified_ = true;
- }
-}
-
-
-void ConditionVariable::NotifyAll() {
- // Notify all threads on the waitlist.
- LockGuard<Mutex> lock_guard(native_handle_.mutex());
- for (Event* event = native_handle().waitlist();
- event != NULL;
- event = event->next_) {
- if (!event->notified_) {
- ::SetEvent(event->handle_);
- event->notified_ = true;
- }
- }
-}
-
-
-void ConditionVariable::Wait(Mutex* mutex) {
- // Create and setup the wait event.
- Event* event = native_handle_.Pre();
-
- // Release the user mutex.
- mutex->Unlock();
-
- // Wait on the wait event.
- while (!event->WaitFor(INFINITE))
- ;
-
- // Reaquire the user mutex.
- mutex->Lock();
-
- // Release the wait event (we must have been notified).
- ASSERT(event->notified_);
- native_handle_.Post(event, true);
-}
-
-
-bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
- // Create and setup the wait event.
- Event* event = native_handle_.Pre();
-
- // Release the user mutex.
- mutex->Unlock();
-
- // Wait on the wait event.
- TimeTicks now = TimeTicks::Now();
- TimeTicks end = now + rel_time;
- bool result = false;
- while (true) {
- int64_t msec = (end - now).InMilliseconds();
- if (msec >= static_cast<int64_t>(INFINITE)) {
- result = event->WaitFor(INFINITE - 1);
- if (result) {
- break;
- }
- now = TimeTicks::Now();
- } else {
- result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
- break;
- }
- }
-
- // Reaquire the user mutex.
- mutex->Lock();
-
- // Release the wait event.
- ASSERT(!result || event->notified_);
- native_handle_.Post(event, result);
-
- return result;
-}
-
-#endif // V8_OS_POSIX
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_CONDITION_VARIABLE_H_
-#define V8_PLATFORM_CONDITION_VARIABLE_H_
-
-#include "src/base/lazy-instance.h"
-#include "src/platform/mutex.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class ConditionVariableEvent;
-class TimeDelta;
-
-// -----------------------------------------------------------------------------
-// ConditionVariable
-//
-// This class is a synchronization primitive that can be used to block a thread,
-// or multiple threads at the same time, until:
-// - a notification is received from another thread,
-// - a timeout expires, or
-// - a spurious wakeup occurs
-// Any thread that intends to wait on a ConditionVariable has to acquire a lock
-// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release
-// the mutex and suspend the execution of the calling thread. When the condition
-// variable is notified, the thread is awakened, and the mutex is reacquired.
-
-class ConditionVariable V8_FINAL {
- public:
- ConditionVariable();
- ~ConditionVariable();
-
- // If any threads are waiting on this condition variable, calling
- // |NotifyOne()| unblocks one of the waiting threads.
- void NotifyOne();
-
- // Unblocks all threads currently waiting for this condition variable.
- void NotifyAll();
-
- // |Wait()| causes the calling thread to block until the condition variable is
- // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks
- // the current executing thread, and adds it to the list of threads waiting on
- // this condition variable. The thread will be unblocked when |NotifyAll()| or
- // |NotifyOne()| is executed. It may also be unblocked spuriously. When
- // unblocked, regardless of the reason, the lock on the mutex is reacquired
- // and |Wait()| exits.
- void Wait(Mutex* mutex);
-
- // Atomically releases the mutex, blocks the current executing thread, and
- // adds it to the list of threads waiting on this condition variable. The
- // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed,
- // or when the relative timeout |rel_time| expires. It may also be unblocked
- // spuriously. When unblocked, regardless of the reason, the lock on the mutex
- // is reacquired and |WaitFor()| exits. Returns true if the condition variable
- // was notified prior to the timeout.
- bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
-
- // The implementation-defined native handle type.
-#if V8_OS_POSIX
- typedef pthread_cond_t NativeHandle;
-#elif V8_OS_WIN
- struct Event;
- class NativeHandle V8_FINAL {
- public:
- NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
- ~NativeHandle();
-
- Event* Pre() V8_WARN_UNUSED_RESULT;
- void Post(Event* event, bool result);
-
- Mutex* mutex() { return &mutex_; }
- Event* waitlist() { return waitlist_; }
-
- private:
- Event* waitlist_;
- Event* freelist_;
- Mutex mutex_;
-
- DISALLOW_COPY_AND_ASSIGN(NativeHandle);
- };
-#endif
-
- NativeHandle& native_handle() {
- return native_handle_;
- }
- const NativeHandle& native_handle() const {
- return native_handle_;
- }
-
- private:
- NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
-};
-
-
-// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
-// called).
-// Usage:
-// static LazyConditionVariable my_condvar =
-// LAZY_CONDITION_VARIABLE_INITIALIZER;
-//
-// void my_function() {
-// LockGuard<Mutex> lock_guard(&my_mutex);
-// my_condvar.Pointer()->Wait(&my_mutex);
-// }
-typedef base::LazyStaticInstance<
- ConditionVariable, base::DefaultConstructTrait<ConditionVariable>,
- base::ThreadSafeInitOnceTrait>::type LazyConditionVariable;
-
-#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_CONDITION_VARIABLE_H_
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
-#define V8_PLATFORM_ELAPSED_TIMER_H_
-
-#include "src/checks.h"
-#include "src/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-class ElapsedTimer V8_FINAL BASE_EMBEDDED {
- public:
-#ifdef DEBUG
- ElapsedTimer() : started_(false) {}
-#endif
-
- // Starts this timer. Once started a timer can be checked with
- // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
- // This method must not be called on an already started timer.
- void Start() {
- ASSERT(!IsStarted());
- start_ticks_ = Now();
-#ifdef DEBUG
- started_ = true;
-#endif
- ASSERT(IsStarted());
- }
-
- // Stops this timer. Must not be called on a timer that was not
- // started before.
- void Stop() {
- ASSERT(IsStarted());
- start_ticks_ = TimeTicks();
-#ifdef DEBUG
- started_ = false;
-#endif
- ASSERT(!IsStarted());
- }
-
- // Returns |true| if this timer was started previously.
- bool IsStarted() const {
- ASSERT(started_ || start_ticks_.IsNull());
- ASSERT(!started_ || !start_ticks_.IsNull());
- return !start_ticks_.IsNull();
- }
-
- // Restarts the timer and returns the time elapsed since the previous start.
- // This method is equivalent to obtaining the elapsed time with |Elapsed()|
- // and then starting the timer again, but does so in one single operation,
- // avoiding the need to obtain the clock value twice. It may only be called
- // on a previously started timer.
- TimeDelta Restart() {
- ASSERT(IsStarted());
- TimeTicks ticks = Now();
- TimeDelta elapsed = ticks - start_ticks_;
- ASSERT(elapsed.InMicroseconds() >= 0);
- start_ticks_ = ticks;
- ASSERT(IsStarted());
- return elapsed;
- }
-
- // Returns the time elapsed since the previous start. This method may only
- // be called on a previously started timer.
- TimeDelta Elapsed() const {
- ASSERT(IsStarted());
- TimeDelta elapsed = Now() - start_ticks_;
- ASSERT(elapsed.InMicroseconds() >= 0);
- return elapsed;
- }
-
- // Returns |true| if the specified |time_delta| has elapsed since the
- // previous start, or |false| if not. This method may only be called on
- // a previously started timer.
- bool HasExpired(TimeDelta time_delta) const {
- ASSERT(IsStarted());
- return Elapsed() >= time_delta;
- }
-
- private:
- static V8_INLINE TimeTicks Now() {
- TimeTicks now = TimeTicks::HighResolutionNow();
- ASSERT(!now.IsNull());
- return now;
- }
-
- TimeTicks start_ticks_;
-#ifdef DEBUG
- bool started_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_ELAPSED_TIMER_H_
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/platform/mutex.h"
-
-#include <errno.h>
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_POSIX
-
-static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
- int result;
-#if defined(DEBUG)
- // Use an error checking mutex in debug mode.
- pthread_mutexattr_t attr;
- result = pthread_mutexattr_init(&attr);
- ASSERT_EQ(0, result);
- result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
- ASSERT_EQ(0, result);
- result = pthread_mutex_init(mutex, &attr);
- ASSERT_EQ(0, result);
- result = pthread_mutexattr_destroy(&attr);
-#else
- // Use a fast mutex (default attributes).
- result = pthread_mutex_init(mutex, NULL);
-#endif // defined(DEBUG)
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
- pthread_mutexattr_t attr;
- int result = pthread_mutexattr_init(&attr);
- ASSERT_EQ(0, result);
- result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- ASSERT_EQ(0, result);
- result = pthread_mutex_init(mutex, &attr);
- ASSERT_EQ(0, result);
- result = pthread_mutexattr_destroy(&attr);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
- int result = pthread_mutex_destroy(mutex);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
- int result = pthread_mutex_lock(mutex);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
- int result = pthread_mutex_unlock(mutex);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
- int result = pthread_mutex_trylock(mutex);
- if (result == EBUSY) {
- return false;
- }
- ASSERT_EQ(0, result);
- return true;
-}
-
-#elif V8_OS_WIN
-
-static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
- InitializeCriticalSection(cs);
-}
-
-
-static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
- InitializeCriticalSection(cs);
-}
-
-
-static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
- DeleteCriticalSection(cs);
-}
-
-
-static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
- EnterCriticalSection(cs);
-}
-
-
-static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
- LeaveCriticalSection(cs);
-}
-
-
-static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
- return TryEnterCriticalSection(cs);
-}
-
-#endif // V8_OS_POSIX
-
-
-Mutex::Mutex() {
- InitializeNativeHandle(&native_handle_);
-#ifdef DEBUG
- level_ = 0;
-#endif
-}
-
-
-Mutex::~Mutex() {
- DestroyNativeHandle(&native_handle_);
- ASSERT_EQ(0, level_);
-}
-
-
-void Mutex::Lock() {
- LockNativeHandle(&native_handle_);
- AssertUnheldAndMark();
-}
-
-
-void Mutex::Unlock() {
- AssertHeldAndUnmark();
- UnlockNativeHandle(&native_handle_);
-}
-
-
-bool Mutex::TryLock() {
- if (!TryLockNativeHandle(&native_handle_)) {
- return false;
- }
- AssertUnheldAndMark();
- return true;
-}
-
-
-RecursiveMutex::RecursiveMutex() {
- InitializeRecursiveNativeHandle(&native_handle_);
-#ifdef DEBUG
- level_ = 0;
-#endif
-}
-
-
-RecursiveMutex::~RecursiveMutex() {
- DestroyNativeHandle(&native_handle_);
- ASSERT_EQ(0, level_);
-}
-
-
-void RecursiveMutex::Lock() {
- LockNativeHandle(&native_handle_);
-#ifdef DEBUG
- ASSERT_LE(0, level_);
- level_++;
-#endif
-}
-
-
-void RecursiveMutex::Unlock() {
-#ifdef DEBUG
- ASSERT_LT(0, level_);
- level_--;
-#endif
- UnlockNativeHandle(&native_handle_);
-}
-
-
-bool RecursiveMutex::TryLock() {
- if (!TryLockNativeHandle(&native_handle_)) {
- return false;
- }
-#ifdef DEBUG
- ASSERT_LE(0, level_);
- level_++;
-#endif
- return true;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_MUTEX_H_
-#define V8_PLATFORM_MUTEX_H_
-
-#include "src/base/lazy-instance.h"
-#if V8_OS_WIN
-#include "src/base/win32-headers.h"
-#endif
-#include "src/checks.h"
-
-#if V8_OS_POSIX
-#include <pthread.h> // NOLINT
-#endif
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Mutex
-//
-// This class is a synchronization primitive that can be used to protect shared
-// data from being simultaneously accessed by multiple threads. A mutex offers
-// exclusive, non-recursive ownership semantics:
-// - A calling thread owns a mutex from the time that it successfully calls
-// either |Lock()| or |TryLock()| until it calls |Unlock()|.
-// - When a thread owns a mutex, all other threads will block (for calls to
-// |Lock()|) or receive a |false| return value (for |TryLock()|) if they
-// attempt to claim ownership of the mutex.
-// A calling thread must not own the mutex prior to calling |Lock()| or
-// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
-// while still owned by some thread. The Mutex class is non-copyable.
-
-class Mutex V8_FINAL {
- public:
- Mutex();
- ~Mutex();
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- void Lock();
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- void Unlock();
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- bool TryLock() V8_WARN_UNUSED_RESULT;
-
- // The implementation-defined native handle type.
-#if V8_OS_POSIX
- typedef pthread_mutex_t NativeHandle;
-#elif V8_OS_WIN
- typedef CRITICAL_SECTION NativeHandle;
-#endif
-
- NativeHandle& native_handle() {
- return native_handle_;
- }
- const NativeHandle& native_handle() const {
- return native_handle_;
- }
-
- private:
- NativeHandle native_handle_;
-#ifdef DEBUG
- int level_;
-#endif
-
- V8_INLINE void AssertHeldAndUnmark() {
-#ifdef DEBUG
- ASSERT_EQ(1, level_);
- level_--;
-#endif
- }
-
- V8_INLINE void AssertUnheldAndMark() {
-#ifdef DEBUG
- ASSERT_EQ(0, level_);
- level_++;
-#endif
- }
-
- friend class ConditionVariable;
-
- DISALLOW_COPY_AND_ASSIGN(Mutex);
-};
-
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-// void my_function() {
-// LockGuard<Mutex> guard(my_mutex.Pointer());
-// // Do something.
-// }
-//
-typedef v8::base::LazyStaticInstance<
- Mutex, v8::base::DefaultConstructTrait<Mutex>,
- v8::base::ThreadSafeInitOnceTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
-
-// -----------------------------------------------------------------------------
-// RecursiveMutex
-//
-// This class is a synchronization primitive that can be used to protect shared
-// data from being simultaneously accessed by multiple threads. A recursive
-// mutex offers exclusive, recursive ownership semantics:
-// - A calling thread owns a recursive mutex for a period of time that starts
-// when it successfully calls either |Lock()| or |TryLock()|. During this
-// period, the thread may make additional calls to |Lock()| or |TryLock()|.
-// The period of ownership ends when the thread makes a matching number of
-// calls to |Unlock()|.
-// - When a thread owns a recursive mutex, all other threads will block (for
-// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
-// they attempt to claim ownership of the recursive mutex.
-// - The maximum number of times that a recursive mutex may be locked is
-// unspecified, but after that number is reached, calls to |Lock()| will
-// probably abort the process and calls to |TryLock()| return false.
-// The behavior of a program is undefined if a recursive mutex is destroyed
-// while still owned by some thread. The RecursiveMutex class is non-copyable.
-
-class RecursiveMutex V8_FINAL {
- public:
- RecursiveMutex();
- ~RecursiveMutex();
-
- // Locks the mutex. If another thread has already locked the mutex, a call to
- // |Lock()| will block execution until the lock is acquired. A thread may call
- // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
- // after the thread makes a matching number of calls to |Unlock()|.
- // The behavior is undefined if the mutex is not unlocked before being
- // destroyed, i.e. some thread still owns it.
- void Lock();
-
- // Unlocks the mutex if its level of ownership is 1 (there was exactly one
- // more call to |Lock()| than there were calls to unlock() made by this
- // thread), reduces the level of ownership by 1 otherwise. The mutex must be
- // locked by the current thread of execution, otherwise, the behavior is
- // undefined.
- void Unlock();
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- bool TryLock() V8_WARN_UNUSED_RESULT;
-
- // The implementation-defined native handle type.
- typedef Mutex::NativeHandle NativeHandle;
-
- NativeHandle& native_handle() {
- return native_handle_;
- }
- const NativeHandle& native_handle() const {
- return native_handle_;
- }
-
- private:
- NativeHandle native_handle_;
-#ifdef DEBUG
- int level_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
-};
-
-
-// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
-// called).
-// Usage:
-// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
-//
-// void my_function() {
-// LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
-// // Do something.
-// }
-//
-typedef v8::base::LazyStaticInstance<
- RecursiveMutex, v8::base::DefaultConstructTrait<RecursiveMutex>,
- v8::base::ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
-
-#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
-
-// -----------------------------------------------------------------------------
-// LockGuard
-//
-// This class is a mutex wrapper that provides a convenient RAII-style mechanism
-// for owning a mutex for the duration of a scoped block.
-// When a LockGuard object is created, it attempts to take ownership of the
-// mutex it is given. When control leaves the scope in which the LockGuard
-// object was created, the LockGuard is destructed and the mutex is released.
-// The LockGuard class is non-copyable.
-
-template <typename Mutex>
-class LockGuard V8_FINAL {
- public:
- explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
- ~LockGuard() { mutex_->Unlock(); }
-
- private:
- Mutex* mutex_;
-
- DISALLOW_COPY_AND_ASSIGN(LockGuard);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_MUTEX_H_
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/platform/semaphore.h"
-
-#if V8_OS_MACOSX
-#include <mach/mach_init.h>
-#include <mach/task.h>
-#endif
-
-#include <errno.h>
-
-#include "src/checks.h"
-#include "src/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_MACOSX
-
-Semaphore::Semaphore(int count) {
- kern_return_t result = semaphore_create(
- mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
- ASSERT_EQ(KERN_SUCCESS, result);
- USE(result);
-}
-
-
-Semaphore::~Semaphore() {
- kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
- ASSERT_EQ(KERN_SUCCESS, result);
- USE(result);
-}
-
-
-void Semaphore::Signal() {
- kern_return_t result = semaphore_signal(native_handle_);
- ASSERT_EQ(KERN_SUCCESS, result);
- USE(result);
-}
-
-
-void Semaphore::Wait() {
- while (true) {
- kern_return_t result = semaphore_wait(native_handle_);
- if (result == KERN_SUCCESS) return; // Semaphore was signalled.
- ASSERT_EQ(KERN_ABORTED, result);
- }
-}
-
-
-bool Semaphore::WaitFor(const TimeDelta& rel_time) {
- TimeTicks now = TimeTicks::Now();
- TimeTicks end = now + rel_time;
- while (true) {
- mach_timespec_t ts;
- if (now >= end) {
- // Return immediately if semaphore was not signalled.
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- } else {
- ts = (end - now).ToMachTimespec();
- }
- kern_return_t result = semaphore_timedwait(native_handle_, ts);
- if (result == KERN_SUCCESS) return true; // Semaphore was signalled.
- if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout.
- ASSERT_EQ(KERN_ABORTED, result);
- now = TimeTicks::Now();
- }
-}
-
-#elif V8_OS_POSIX
-
-Semaphore::Semaphore(int count) {
- ASSERT(count >= 0);
- int result = sem_init(&native_handle_, 0, count);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-Semaphore::~Semaphore() {
- int result = sem_destroy(&native_handle_);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-void Semaphore::Signal() {
- int result = sem_post(&native_handle_);
- ASSERT_EQ(0, result);
- USE(result);
-}
-
-
-void Semaphore::Wait() {
- while (true) {
- int result = sem_wait(&native_handle_);
- if (result == 0) return; // Semaphore was signalled.
- // Signal caused spurious wakeup.
- ASSERT_EQ(-1, result);
- ASSERT_EQ(EINTR, errno);
- }
-}
-
-
-bool Semaphore::WaitFor(const TimeDelta& rel_time) {
- // Compute the time for end of timeout.
- const Time time = Time::NowFromSystemTime() + rel_time;
- const struct timespec ts = time.ToTimespec();
-
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&native_handle_, &ts);
- if (result == 0) return true; // Semaphore was signalled.
-#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
- if (result > 0) {
- // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1.
- errno = result;
- result = -1;
- }
-#endif
- if (result == -1 && errno == ETIMEDOUT) {
- // Timed out while waiting for semaphore.
- return false;
- }
- // Signal caused spurious wakeup.
- ASSERT_EQ(-1, result);
- ASSERT_EQ(EINTR, errno);
- }
-}
-
-#elif V8_OS_WIN
-
-Semaphore::Semaphore(int count) {
- ASSERT(count >= 0);
- native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
- ASSERT(native_handle_ != NULL);
-}
-
-
-Semaphore::~Semaphore() {
- BOOL result = CloseHandle(native_handle_);
- ASSERT(result);
- USE(result);
-}
-
-
-void Semaphore::Signal() {
- LONG dummy;
- BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
- ASSERT(result);
- USE(result);
-}
-
-
-void Semaphore::Wait() {
- DWORD result = WaitForSingleObject(native_handle_, INFINITE);
- ASSERT(result == WAIT_OBJECT_0);
- USE(result);
-}
-
-
-bool Semaphore::WaitFor(const TimeDelta& rel_time) {
- TimeTicks now = TimeTicks::Now();
- TimeTicks end = now + rel_time;
- while (true) {
- int64_t msec = (end - now).InMilliseconds();
- if (msec >= static_cast<int64_t>(INFINITE)) {
- DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1);
- if (result == WAIT_OBJECT_0) {
- return true;
- }
- ASSERT(result == WAIT_TIMEOUT);
- now = TimeTicks::Now();
- } else {
- DWORD result = WaitForSingleObject(
- native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec));
- if (result == WAIT_TIMEOUT) {
- return false;
- }
- ASSERT(result == WAIT_OBJECT_0);
- return true;
- }
- }
-}
-
-#endif // V8_OS_MACOSX
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_SEMAPHORE_H_
-#define V8_PLATFORM_SEMAPHORE_H_
-
-#include "src/base/lazy-instance.h"
-#if V8_OS_WIN
-#include "src/base/win32-headers.h"
-#endif
-
-#if V8_OS_MACOSX
-#include <mach/semaphore.h> // NOLINT
-#elif V8_OS_POSIX
-#include <semaphore.h> // NOLINT
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class TimeDelta;
-
-// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero, threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore V8_FINAL {
- public:
- explicit Semaphore(int count);
- ~Semaphore();
-
- // Increments the semaphore counter.
- void Signal();
-
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
- void Wait();
-
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned.
- bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
-
-#if V8_OS_MACOSX
- typedef semaphore_t NativeHandle;
-#elif V8_OS_POSIX
- typedef sem_t NativeHandle;
-#elif V8_OS_WIN
- typedef HANDLE NativeHandle;
-#endif
-
- NativeHandle& native_handle() {
- return native_handle_;
- }
- const NativeHandle& native_handle() const {
- return native_handle_;
- }
-
- private:
- NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(Semaphore);
-};
-
-
-// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// // The following semaphore starts at 0.
-// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
-//
-// void my_function() {
-// // Do something with my_semaphore.Pointer().
-// }
-//
-
-template <int N>
-struct CreateSemaphoreTrait {
- static Semaphore* Create() {
- return new Semaphore(N);
- }
-};
-
-template <int N>
-struct LazySemaphore {
- typedef typename v8::base::LazyDynamicInstance<
- Semaphore,
- CreateSemaphoreTrait<N>,
- v8::base::ThreadSafeInitOnceTrait>::type type;
-};
-
-#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_SEMAPHORE_H_
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/platform/time.h"
-
-#if V8_OS_POSIX
-#include <sys/time.h>
-#endif
-#if V8_OS_MACOSX
-#include <mach/mach_time.h>
-#endif
-
-#include <string.h>
-
-#if V8_OS_WIN
-#include "src/base/lazy-instance.h"
-#include "src/base/win32-headers.h"
-#endif
-#include "src/checks.h"
-#include "src/cpu.h"
-#include "src/platform.h"
-
-namespace v8 {
-namespace internal {
-
-TimeDelta TimeDelta::FromDays(int days) {
- return TimeDelta(days * Time::kMicrosecondsPerDay);
-}
-
-
-TimeDelta TimeDelta::FromHours(int hours) {
- return TimeDelta(hours * Time::kMicrosecondsPerHour);
-}
-
-
-TimeDelta TimeDelta::FromMinutes(int minutes) {
- return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
-}
-
-
-TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
- return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
-}
-
-
-TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
- return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
-}
-
-
-TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
- return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
-}
-
-
-int TimeDelta::InDays() const {
- return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
-}
-
-
-int TimeDelta::InHours() const {
- return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
-}
-
-
-int TimeDelta::InMinutes() const {
- return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
-}
-
-
-double TimeDelta::InSecondsF() const {
- return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
-}
-
-
-int64_t TimeDelta::InSeconds() const {
- return delta_ / Time::kMicrosecondsPerSecond;
-}
-
-
-double TimeDelta::InMillisecondsF() const {
- return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
-}
-
-
-int64_t TimeDelta::InMilliseconds() const {
- return delta_ / Time::kMicrosecondsPerMillisecond;
-}
-
-
-int64_t TimeDelta::InNanoseconds() const {
- return delta_ * Time::kNanosecondsPerMicrosecond;
-}
-
-
-#if V8_OS_MACOSX
-
-TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
- ASSERT_GE(ts.tv_nsec, 0);
- ASSERT_LT(ts.tv_nsec,
- static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
- return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
- ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-}
-
-
-struct mach_timespec TimeDelta::ToMachTimespec() const {
- struct mach_timespec ts;
- ASSERT(delta_ >= 0);
- ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
- ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
- Time::kNanosecondsPerMicrosecond;
- return ts;
-}
-
-#endif // V8_OS_MACOSX
-
-
-#if V8_OS_POSIX
-
-TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
- ASSERT_GE(ts.tv_nsec, 0);
- ASSERT_LT(ts.tv_nsec,
- static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
- return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
- ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-}
-
-
-struct timespec TimeDelta::ToTimespec() const {
- struct timespec ts;
- ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
- ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
- Time::kNanosecondsPerMicrosecond;
- return ts;
-}
-
-#endif // V8_OS_POSIX
-
-
-#if V8_OS_WIN
-
-// We implement time using the high-resolution timers so that we can get
-// timeouts which are smaller than 10-15ms. To avoid any drift, we
-// periodically resync the internal clock to the system clock.
-class Clock V8_FINAL {
- public:
- Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
-
- Time Now() {
- // Time between resampling the un-granular clock for this API (1 minute).
- const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
-
- LockGuard<Mutex> lock_guard(&mutex_);
-
- // Determine current time and ticks.
- TimeTicks ticks = GetSystemTicks();
- Time time = GetSystemTime();
-
- // Check if we need to synchronize with the system clock due to a backwards
- // time change or the amount of time elapsed.
- TimeDelta elapsed = ticks - initial_ticks_;
- if (time < initial_time_ || elapsed > kMaxElapsedTime) {
- initial_ticks_ = ticks;
- initial_time_ = time;
- return time;
- }
-
- return initial_time_ + elapsed;
- }
-
- Time NowFromSystemTime() {
- LockGuard<Mutex> lock_guard(&mutex_);
- initial_ticks_ = GetSystemTicks();
- initial_time_ = GetSystemTime();
- return initial_time_;
- }
-
- private:
- static TimeTicks GetSystemTicks() {
- return TimeTicks::Now();
- }
-
- static Time GetSystemTime() {
- FILETIME ft;
- ::GetSystemTimeAsFileTime(&ft);
- return Time::FromFiletime(ft);
- }
-
- TimeTicks initial_ticks_;
- Time initial_time_;
- Mutex mutex_;
-};
-
-
-static base::LazyStaticInstance<Clock, base::DefaultConstructTrait<Clock>,
- base::ThreadSafeInitOnceTrait>::type clock =
- LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-Time Time::Now() {
- return clock.Pointer()->Now();
-}
-
-
-Time Time::NowFromSystemTime() {
- return clock.Pointer()->NowFromSystemTime();
-}
-
-
-// Time between windows epoch and standard epoch.
-static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
-
-
-Time Time::FromFiletime(FILETIME ft) {
- if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
- return Time();
- }
- if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
- ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
- return Max();
- }
- int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
- (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
- return Time(us - kTimeToEpochInMicroseconds);
-}
-
-
-FILETIME Time::ToFiletime() const {
- ASSERT(us_ >= 0);
- FILETIME ft;
- if (IsNull()) {
- ft.dwLowDateTime = 0;
- ft.dwHighDateTime = 0;
- return ft;
- }
- if (IsMax()) {
- ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
- ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
- return ft;
- }
- uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
- ft.dwLowDateTime = static_cast<DWORD>(us);
- ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
- return ft;
-}
-
-#elif V8_OS_POSIX
-
-Time Time::Now() {
- struct timeval tv;
- int result = gettimeofday(&tv, NULL);
- ASSERT_EQ(0, result);
- USE(result);
- return FromTimeval(tv);
-}
-
-
-Time Time::NowFromSystemTime() {
- return Now();
-}
-
-
-Time Time::FromTimespec(struct timespec ts) {
- ASSERT(ts.tv_nsec >= 0);
- ASSERT(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
- if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
- return Time();
- }
- if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
- ts.tv_sec == std::numeric_limits<time_t>::max()) {
- return Max();
- }
- return Time(ts.tv_sec * kMicrosecondsPerSecond +
- ts.tv_nsec / kNanosecondsPerMicrosecond);
-}
-
-
-struct timespec Time::ToTimespec() const {
- struct timespec ts;
- if (IsNull()) {
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- return ts;
- }
- if (IsMax()) {
- ts.tv_sec = std::numeric_limits<time_t>::max();
- ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
- return ts;
- }
- ts.tv_sec = us_ / kMicrosecondsPerSecond;
- ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
- return ts;
-}
-
-
-Time Time::FromTimeval(struct timeval tv) {
- ASSERT(tv.tv_usec >= 0);
- ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
- if (tv.tv_usec == 0 && tv.tv_sec == 0) {
- return Time();
- }
- if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
- tv.tv_sec == std::numeric_limits<time_t>::max()) {
- return Max();
- }
- return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
-}
-
-
-struct timeval Time::ToTimeval() const {
- struct timeval tv;
- if (IsNull()) {
- tv.tv_sec = 0;
- tv.tv_usec = 0;
- return tv;
- }
- if (IsMax()) {
- tv.tv_sec = std::numeric_limits<time_t>::max();
- tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
- return tv;
- }
- tv.tv_sec = us_ / kMicrosecondsPerSecond;
- tv.tv_usec = us_ % kMicrosecondsPerSecond;
- return tv;
-}
-
-#endif // V8_OS_WIN
-
-
-Time Time::FromJsTime(double ms_since_epoch) {
- // The epoch is a valid time, so this constructor doesn't interpret
- // 0 as the null time.
- if (ms_since_epoch == std::numeric_limits<double>::max()) {
- return Max();
- }
- return Time(
- static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
-}
-
-
-double Time::ToJsTime() const {
- if (IsNull()) {
- // Preserve 0 so the invalid result doesn't depend on the platform.
- return 0;
- }
- if (IsMax()) {
- // Preserve max without offset to prevent overflow.
- return std::numeric_limits<double>::max();
- }
- return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
-}
-
-
-#if V8_OS_WIN
-
-class TickClock {
- public:
- virtual ~TickClock() {}
- virtual int64_t Now() = 0;
- virtual bool IsHighResolution() = 0;
-};
-
-
-// Overview of time counters:
-// (1) CPU cycle counter. (Retrieved via RDTSC)
-// The CPU counter provides the highest resolution time stamp and is the least
-// expensive to retrieve. However, the CPU counter is unreliable and should not
-// be used in production. Its biggest issue is that it is per processor and it
-// is not synchronized between processors. Also, on some computers, the counters
-// will change frequency due to thermal and power changes, and stop in some
-// states.
-//
-// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
-// resolution (100 nanoseconds) time stamp but is comparatively more expensive
-// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
-// (with some help from ACPI).
-// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
-// in the worst case, it gets the counter from the rollover interrupt on the
-// programmable interrupt timer. In best cases, the HAL may conclude that the
-// RDTSC counter runs at a constant frequency, then it uses that instead. On
-// multiprocessor machines, it will try to verify the values returned from
-// RDTSC on each processor are consistent with each other, and apply a handful
-// of workarounds for known buggy hardware. In other words, QPC is supposed to
-// give consistent result on a multiprocessor computer, but it is unreliable in
-// reality due to bugs in BIOS or HAL on some, especially old computers.
-// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
-// it should be used with caution.
-//
-// (3) System time. The system time provides a low-resolution (typically 10ms
-// to 55 milliseconds) time stamp but is comparatively less expensive to
-// retrieve and more reliable.
-class HighResolutionTickClock V8_FINAL : public TickClock {
- public:
- explicit HighResolutionTickClock(int64_t ticks_per_second)
- : ticks_per_second_(ticks_per_second) {
- ASSERT_LT(0, ticks_per_second);
- }
- virtual ~HighResolutionTickClock() {}
-
- virtual int64_t Now() V8_OVERRIDE {
- LARGE_INTEGER now;
- BOOL result = QueryPerformanceCounter(&now);
- ASSERT(result);
- USE(result);
-
- // Intentionally calculate microseconds in a round about manner to avoid
- // overflow and precision issues. Think twice before simplifying!
- int64_t whole_seconds = now.QuadPart / ticks_per_second_;
- int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
- int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
- ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
-
- // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
- // will never return 0.
- return ticks + 1;
- }
-
- virtual bool IsHighResolution() V8_OVERRIDE {
- return true;
- }
-
- private:
- int64_t ticks_per_second_;
-};
-
-
-class RolloverProtectedTickClock V8_FINAL : public TickClock {
- public:
- // We initialize rollover_ms_ to 1 to ensure that we will never
- // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
- RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
- virtual ~RolloverProtectedTickClock() {}
-
- virtual int64_t Now() V8_OVERRIDE {
- LockGuard<Mutex> lock_guard(&mutex_);
- // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
- // every ~49.7 days. We try to track rollover ourselves, which works if
- // TimeTicks::Now() is called at least every 49 days.
- // Note that we do not use GetTickCount() here, since timeGetTime() gives
- // more predictable delta values, as described here:
- // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for V8 wants fast timers, it
- // can use timeBeginPeriod() to increase the resolution.
- DWORD now = timeGetTime();
- if (now < last_seen_now_) {
- rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
- }
- last_seen_now_ = now;
- return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
- }
-
- virtual bool IsHighResolution() V8_OVERRIDE {
- return false;
- }
-
- private:
- Mutex mutex_;
- DWORD last_seen_now_;
- int64_t rollover_ms_;
-};
-
-
-static base::LazyStaticInstance<
- RolloverProtectedTickClock,
- base::DefaultConstructTrait<RolloverProtectedTickClock>,
- base::ThreadSafeInitOnceTrait>::type tick_clock =
- LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-struct CreateHighResTickClockTrait {
- static TickClock* Create() {
- // Check if the installed hardware supports a high-resolution performance
- // counter, and if not fallback to the low-resolution tick clock.
- LARGE_INTEGER ticks_per_second;
- if (!QueryPerformanceFrequency(&ticks_per_second)) {
- return tick_clock.Pointer();
- }
-
- // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
- // is unreliable, fallback to the low-resolution tick clock.
- CPU cpu;
- if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
- return tick_clock.Pointer();
- }
-
- return new HighResolutionTickClock(ticks_per_second.QuadPart);
- }
-};
-
-
-static base::LazyDynamicInstance<TickClock,
- CreateHighResTickClockTrait,
- base::ThreadSafeInitOnceTrait>::type high_res_tick_clock =
- LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
-
-TimeTicks TimeTicks::Now() {
- // Make sure we never return 0 here.
- TimeTicks ticks(tick_clock.Pointer()->Now());
- ASSERT(!ticks.IsNull());
- return ticks;
-}
-
-
-TimeTicks TimeTicks::HighResolutionNow() {
- // Make sure we never return 0 here.
- TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
- ASSERT(!ticks.IsNull());
- return ticks;
-}
-
-
-// static
-bool TimeTicks::IsHighResolutionClockWorking() {
- return high_res_tick_clock.Pointer()->IsHighResolution();
-}
-
-#else // V8_OS_WIN
-
-TimeTicks TimeTicks::Now() {
- return HighResolutionNow();
-}
-
-
-TimeTicks TimeTicks::HighResolutionNow() {
- int64_t ticks;
-#if V8_OS_MACOSX
- static struct mach_timebase_info info;
- if (info.denom == 0) {
- kern_return_t result = mach_timebase_info(&info);
- ASSERT_EQ(KERN_SUCCESS, result);
- USE(result);
- }
- ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
- info.numer / info.denom);
-#elif V8_OS_SOLARIS
- ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
-#elif V8_LIBRT_NOT_AVAILABLE
- // TODO(bmeurer): This is a temporary hack to support cross-compiling
- // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
- // cleanup the tools/gyp/v8.gyp file.
- struct timeval tv;
- int result = gettimeofday(&tv, NULL);
- ASSERT_EQ(0, result);
- USE(result);
- ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
-#elif V8_OS_POSIX
- struct timespec ts;
- int result = clock_gettime(CLOCK_MONOTONIC, &ts);
- ASSERT_EQ(0, result);
- USE(result);
- ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
- ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-#endif // V8_OS_MACOSX
- // Make sure we never return 0 here.
- return TimeTicks(ticks + 1);
-}
-
-
-// static
-bool TimeTicks::IsHighResolutionClockWorking() {
- return true;
-}
-
-#endif // V8_OS_WIN
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_TIME_H_
-#define V8_PLATFORM_TIME_H_
-
-#include <time.h>
-#include <limits>
-
-#include "src/base/macros.h"
-
-// Forward declarations.
-extern "C" {
-struct _FILETIME;
-struct mach_timespec;
-struct timespec;
-struct timeval;
-}
-
-namespace v8 {
-namespace internal {
-
-class Time;
-class TimeTicks;
-
-// -----------------------------------------------------------------------------
-// TimeDelta
-//
-// This class represents a duration of time, internally represented in
-// microseonds.
-
-class TimeDelta V8_FINAL {
- public:
- TimeDelta() : delta_(0) {}
-
- // Converts units of time to TimeDeltas.
- static TimeDelta FromDays(int days);
- static TimeDelta FromHours(int hours);
- static TimeDelta FromMinutes(int minutes);
- static TimeDelta FromSeconds(int64_t seconds);
- static TimeDelta FromMilliseconds(int64_t milliseconds);
- static TimeDelta FromMicroseconds(int64_t microseconds) {
- return TimeDelta(microseconds);
- }
- static TimeDelta FromNanoseconds(int64_t nanoseconds);
-
- // Returns the time delta in some unit. The F versions return a floating
- // point value, the "regular" versions return a rounded-down value.
- //
- // InMillisecondsRoundedUp() instead returns an integer that is rounded up
- // to the next full millisecond.
- int InDays() const;
- int InHours() const;
- int InMinutes() const;
- double InSecondsF() const;
- int64_t InSeconds() const;
- double InMillisecondsF() const;
- int64_t InMilliseconds() const;
- int64_t InMillisecondsRoundedUp() const;
- int64_t InMicroseconds() const { return delta_; }
- int64_t InNanoseconds() const;
-
- // Converts to/from Mach time specs.
- static TimeDelta FromMachTimespec(struct mach_timespec ts);
- struct mach_timespec ToMachTimespec() const;
-
- // Converts to/from POSIX time specs.
- static TimeDelta FromTimespec(struct timespec ts);
- struct timespec ToTimespec() const;
-
- TimeDelta& operator=(const TimeDelta& other) {
- delta_ = other.delta_;
- return *this;
- }
-
- // Computations with other deltas.
- TimeDelta operator+(const TimeDelta& other) const {
- return TimeDelta(delta_ + other.delta_);
- }
- TimeDelta operator-(const TimeDelta& other) const {
- return TimeDelta(delta_ - other.delta_);
- }
-
- TimeDelta& operator+=(const TimeDelta& other) {
- delta_ += other.delta_;
- return *this;
- }
- TimeDelta& operator-=(const TimeDelta& other) {
- delta_ -= other.delta_;
- return *this;
- }
- TimeDelta operator-() const {
- return TimeDelta(-delta_);
- }
-
- double TimesOf(const TimeDelta& other) const {
- return static_cast<double>(delta_) / static_cast<double>(other.delta_);
- }
- double PercentOf(const TimeDelta& other) const {
- return TimesOf(other) * 100.0;
- }
-
- // Computations with ints, note that we only allow multiplicative operations
- // with ints, and additive operations with other deltas.
- TimeDelta operator*(int64_t a) const {
- return TimeDelta(delta_ * a);
- }
- TimeDelta operator/(int64_t a) const {
- return TimeDelta(delta_ / a);
- }
- TimeDelta& operator*=(int64_t a) {
- delta_ *= a;
- return *this;
- }
- TimeDelta& operator/=(int64_t a) {
- delta_ /= a;
- return *this;
- }
- int64_t operator/(const TimeDelta& other) const {
- return delta_ / other.delta_;
- }
-
- // Comparison operators.
- bool operator==(const TimeDelta& other) const {
- return delta_ == other.delta_;
- }
- bool operator!=(const TimeDelta& other) const {
- return delta_ != other.delta_;
- }
- bool operator<(const TimeDelta& other) const {
- return delta_ < other.delta_;
- }
- bool operator<=(const TimeDelta& other) const {
- return delta_ <= other.delta_;
- }
- bool operator>(const TimeDelta& other) const {
- return delta_ > other.delta_;
- }
- bool operator>=(const TimeDelta& other) const {
- return delta_ >= other.delta_;
- }
-
- private:
- // Constructs a delta given the duration in microseconds. This is private
- // to avoid confusion by callers with an integer constructor. Use
- // FromSeconds, FromMilliseconds, etc. instead.
- explicit TimeDelta(int64_t delta) : delta_(delta) {}
-
- // Delta in microseconds.
- int64_t delta_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Time
-//
-// This class represents an absolute point in time, internally represented as
-// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
-
-class Time V8_FINAL {
- public:
- static const int64_t kMillisecondsPerSecond = 1000;
- static const int64_t kMicrosecondsPerMillisecond = 1000;
- static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
- kMillisecondsPerSecond;
- static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
- static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
- static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
- static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
- static const int64_t kNanosecondsPerMicrosecond = 1000;
- static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
- kMicrosecondsPerSecond;
-
- // Contains the NULL time. Use Time::Now() to get the current time.
- Time() : us_(0) {}
-
- // Returns true if the time object has not been initialized.
- bool IsNull() const { return us_ == 0; }
-
- // Returns true if the time object is the maximum time.
- bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
-
- // Returns the current time. Watch out, the system might adjust its clock
- // in which case time will actually go backwards. We don't guarantee that
- // times are increasing, or that two calls to Now() won't be the same.
- static Time Now();
-
- // Returns the current time. Same as Now() except that this function always
- // uses system time so that there are no discrepancies between the returned
- // time and system time even on virtual environments including our test bot.
- // For timing sensitive unittests, this function should be used.
- static Time NowFromSystemTime();
-
- // Returns the time for epoch in Unix-like system (Jan 1, 1970).
- static Time UnixEpoch() { return Time(0); }
-
- // Returns the maximum time, which should be greater than any reasonable time
- // with which we might compare it.
- static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
-
- // Converts to/from internal values. The meaning of the "internal value" is
- // completely up to the implementation, so it should be treated as opaque.
- static Time FromInternalValue(int64_t value) {
- return Time(value);
- }
- int64_t ToInternalValue() const {
- return us_;
- }
-
- // Converts to/from POSIX time specs.
- static Time FromTimespec(struct timespec ts);
- struct timespec ToTimespec() const;
-
- // Converts to/from POSIX time values.
- static Time FromTimeval(struct timeval tv);
- struct timeval ToTimeval() const;
-
- // Converts to/from Windows file times.
- static Time FromFiletime(struct _FILETIME ft);
- struct _FILETIME ToFiletime() const;
-
- // Converts to/from the Javascript convention for times, a number of
- // milliseconds since the epoch:
- static Time FromJsTime(double ms_since_epoch);
- double ToJsTime() const;
-
- Time& operator=(const Time& other) {
- us_ = other.us_;
- return *this;
- }
-
- // Compute the difference between two times.
- TimeDelta operator-(const Time& other) const {
- return TimeDelta::FromMicroseconds(us_ - other.us_);
- }
-
- // Modify by some time delta.
- Time& operator+=(const TimeDelta& delta) {
- us_ += delta.InMicroseconds();
- return *this;
- }
- Time& operator-=(const TimeDelta& delta) {
- us_ -= delta.InMicroseconds();
- return *this;
- }
-
- // Return a new time modified by some delta.
- Time operator+(const TimeDelta& delta) const {
- return Time(us_ + delta.InMicroseconds());
- }
- Time operator-(const TimeDelta& delta) const {
- return Time(us_ - delta.InMicroseconds());
- }
-
- // Comparison operators
- bool operator==(const Time& other) const {
- return us_ == other.us_;
- }
- bool operator!=(const Time& other) const {
- return us_ != other.us_;
- }
- bool operator<(const Time& other) const {
- return us_ < other.us_;
- }
- bool operator<=(const Time& other) const {
- return us_ <= other.us_;
- }
- bool operator>(const Time& other) const {
- return us_ > other.us_;
- }
- bool operator>=(const Time& other) const {
- return us_ >= other.us_;
- }
-
- private:
- explicit Time(int64_t us) : us_(us) {}
-
- // Time in microseconds in UTC.
- int64_t us_;
-};
-
-inline Time operator+(const TimeDelta& delta, const Time& time) {
- return time + delta;
-}
-
-
-// -----------------------------------------------------------------------------
-// TimeTicks
-//
-// This class represents an abstract time that is most of the time incrementing
-// for use in measuring time durations. It is internally represented in
-// microseconds. It can not be converted to a human-readable time, but is
-// guaranteed not to decrease (if the user changes the computer clock,
-// Time::Now() may actually decrease or jump). But note that TimeTicks may
-// "stand still", for example if the computer suspended.
-
-class TimeTicks V8_FINAL {
- public:
- TimeTicks() : ticks_(0) {}
-
- // Platform-dependent tick count representing "right now."
- // The resolution of this clock is ~1-15ms. Resolution varies depending
- // on hardware/operating system configuration.
- // This method never returns a null TimeTicks.
- static TimeTicks Now();
-
- // Returns a platform-dependent high-resolution tick count. Implementation
- // is hardware dependent and may or may not return sub-millisecond
- // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
- // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
- // This method never returns a null TimeTicks.
- static TimeTicks HighResolutionNow();
-
- // Returns true if the high-resolution clock is working on this system.
- static bool IsHighResolutionClockWorking();
-
- // Returns true if this object has not been initialized.
- bool IsNull() const { return ticks_ == 0; }
-
- // Converts to/from internal values. The meaning of the "internal value" is
- // completely up to the implementation, so it should be treated as opaque.
- static TimeTicks FromInternalValue(int64_t value) {
- return TimeTicks(value);
- }
- int64_t ToInternalValue() const {
- return ticks_;
- }
-
- TimeTicks& operator=(const TimeTicks other) {
- ticks_ = other.ticks_;
- return *this;
- }
-
- // Compute the difference between two times.
- TimeDelta operator-(const TimeTicks other) const {
- return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
- }
-
- // Modify by some time delta.
- TimeTicks& operator+=(const TimeDelta& delta) {
- ticks_ += delta.InMicroseconds();
- return *this;
- }
- TimeTicks& operator-=(const TimeDelta& delta) {
- ticks_ -= delta.InMicroseconds();
- return *this;
- }
-
- // Return a new TimeTicks modified by some delta.
- TimeTicks operator+(const TimeDelta& delta) const {
- return TimeTicks(ticks_ + delta.InMicroseconds());
- }
- TimeTicks operator-(const TimeDelta& delta) const {
- return TimeTicks(ticks_ - delta.InMicroseconds());
- }
-
- // Comparison operators
- bool operator==(const TimeTicks& other) const {
- return ticks_ == other.ticks_;
- }
- bool operator!=(const TimeTicks& other) const {
- return ticks_ != other.ticks_;
- }
- bool operator<(const TimeTicks& other) const {
- return ticks_ < other.ticks_;
- }
- bool operator<=(const TimeTicks& other) const {
- return ticks_ <= other.ticks_;
- }
- bool operator>(const TimeTicks& other) const {
- return ticks_ > other.ticks_;
- }
- bool operator>=(const TimeTicks& other) const {
- return ticks_ >= other.ticks_;
- }
-
- private:
- // Please use Now() to create a new object. This is for internal use
- // and testing. Ticks is in microseconds.
- explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
-
- // Tick count in microseconds.
- int64_t ticks_;
-};
-
-inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
- return ticks + delta;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_TIME_H_
// found in the LICENSE file.
#include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/globals.h"
#include "src/hashmap.h"
#include "src/preparse-data.h"
#include "include/v8stdint.h"
#include "src/allocation.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/conversions-inl.h"
#include "src/conversions.h"
#include "src/globals.h"
#include "src/v8.h"
#include "src/ast-value-factory.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "src/prettyprinter.h"
#include "src/scopes.h"
void ProfileNode::Print(int indent) {
- OS::Print("%5u %*s %s%s %d #%d %s",
- self_ticks_,
- indent, "",
- entry_->name_prefix(),
- entry_->name(),
- entry_->script_id(),
- id(),
- entry_->bailout_reason());
+ base::OS::Print("%5u %*s %s%s %d #%d %s", self_ticks_, indent, "",
+ entry_->name_prefix(), entry_->name(), entry_->script_id(),
+ id(), entry_->bailout_reason());
if (entry_->resource_name()[0] != '\0')
- OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
- OS::Print("\n");
+ base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
+ base::OS::Print("\n");
for (HashMap::Entry* p = children_.Start();
p != NULL;
p = children_.Next(p)) {
CpuProfile::CpuProfile(const char* title, bool record_samples)
: title_(title),
record_samples_(record_samples),
- start_time_(TimeTicks::HighResolutionNow()) {
+ start_time_(base::TimeTicks::HighResolutionNow()) {
}
-void CpuProfile::AddPath(TimeTicks timestamp, const Vector<CodeEntry*>& path) {
+void CpuProfile::AddPath(base::TimeTicks timestamp,
+ const Vector<CodeEntry*>& path) {
ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
if (record_samples_) {
timestamps_.Add(timestamp);
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
- end_time_ = TimeTicks::HighResolutionNow();
+ end_time_ = base::TimeTicks::HighResolutionNow();
}
void CpuProfile::Print() {
- OS::Print("[Top down]:\n");
+ base::OS::Print("[Top down]:\n");
top_down_.Print();
}
const Address& key, const CodeMap::CodeEntryInfo& value) {
// For shared function entries, 'size' field is used to store their IDs.
if (value.entry == kSharedFunctionCodeEntry) {
- OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
+ base::OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
} else {
- OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
+ base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
}
}
void CpuProfilesCollection::AddPathToCurrentProfiles(
- TimeTicks timestamp, const Vector<CodeEntry*>& path) {
+ base::TimeTicks timestamp, const Vector<CodeEntry*>& path) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
CpuProfile(const char* title, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
- void AddPath(TimeTicks timestamp, const Vector<CodeEntry*>& path);
+ void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path);
void CalculateTotalTicksAndSamplingRate();
const char* title() const { return title_; }
int samples_count() const { return samples_.length(); }
ProfileNode* sample(int index) const { return samples_.at(index); }
- TimeTicks sample_timestamp(int index) const { return timestamps_.at(index); }
+ base::TimeTicks sample_timestamp(int index) const {
+ return timestamps_.at(index);
+ }
- TimeTicks start_time() const { return start_time_; }
- TimeTicks end_time() const { return end_time_; }
+ base::TimeTicks start_time() const { return start_time_; }
+ base::TimeTicks end_time() const { return end_time_; }
void UpdateTicksScale();
private:
const char* title_;
bool record_samples_;
- TimeTicks start_time_;
- TimeTicks end_time_;
+ base::TimeTicks start_time_;
+ base::TimeTicks end_time_;
List<ProfileNode*> samples_;
- List<TimeTicks> timestamps_;
+ List<base::TimeTicks> timestamps_;
ProfileTree top_down_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
// Called from profile generator thread.
void AddPathToCurrentProfiles(
- TimeTicks timestamp, const Vector<CodeEntry*>& path);
+ base::TimeTicks timestamp, const Vector<CodeEntry*>& path);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
- Semaphore current_profiles_semaphore_;
+ base::Semaphore current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_QNX_MATH_H_
-#define V8_QNX_MATH_H_
-
-#include <cmath>
-
-#undef fpclassify
-#undef isfinite
-#undef isinf
-#undef isnan
-#undef isnormal
-#undef signbit
-
-using std::lrint;
-
-#endif // V8_QNX_MATH_H_
#include "src/runtime-profiler.h"
#include "src/assembler.h"
+#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/global-handles.h"
#include "src/isolate-inl.h"
#include "src/mark-compact.h"
-#include "src/platform.h"
#include "src/scopeinfo.h"
namespace v8 {
#include "src/allocation.h"
namespace v8 {
+
+namespace base {
+class Semaphore;
+}
+
namespace internal {
class Isolate;
class JSFunction;
class Object;
-class Semaphore;
class RuntimeProfiler {
public:
#include "src/allocation-site-scopes.h"
#include "src/api.h"
#include "src/arguments.h"
+#include "src/base/cpu.h"
+#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
#include "src/conversions.h"
-#include "src/cpu.h"
#include "src/cpu-profiler.h"
#include "src/date.h"
#include "src/dateparser-inl.h"
#include "src/liveedit.h"
#include "src/misc-intrinsics.h"
#include "src/parser.h"
-#include "src/platform.h"
#include "src/runtime.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
subject = String::Flatten(subject);
- double value = StringToDouble(
- isolate->unicode_cache(), *subject, ALLOW_TRAILING_JUNK, OS::nan_value());
+ double value = StringToDouble(isolate->unicode_cache(), *subject,
+ ALLOW_TRAILING_JUNK, base::OS::nan_value());
return *isolate->factory()->NewNumber(value);
}
sync_with_compiler_thread) {
while (function->IsInOptimizationQueue()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- OS::Sleep(50);
+ base::OS::Sleep(50);
}
}
if (FLAG_always_opt) {
millis = 1388534400000.0; // Jan 1 2014 00:00:00 GMT+0000
millis += std::floor(isolate->heap()->synthetic_time());
} else {
- millis = std::floor(OS::TimeCurrentMillis());
+ millis = std::floor(base::OS::TimeCurrentMillis());
}
return *isolate->factory()->NewNumber(millis);
}
RUNTIME_FUNCTION(Runtime_SystemBreak) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- OS::DebugBreak();
+ base::OS::DebugBreak();
return isolate->heap()->undefined_value();
}
CONVERT_SMI_ARG_CHECKED(message_id, 0);
const char* message = GetBailoutReason(
static_cast<BailoutReason>(message_id));
- OS::PrintError("abort: %s\n", message);
+ base::OS::PrintError("abort: %s\n", message);
isolate->PrintStack(stderr);
- OS::Abort();
+ base::OS::Abort();
UNREACHABLE();
return NULL;
}
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
- OS::PrintError("abort: %s\n", message->ToCString().get());
+ base::OS::PrintError("abort: %s\n", message->ToCString().get());
isolate->PrintStack(stderr);
- OS::Abort();
+ base::OS::Abort();
UNREACHABLE();
return NULL;
}
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/cpu-profiler-inl.h"
#include "src/flags.h"
#include "src/frames-inl.h"
#include "src/log.h"
-#include "src/platform.h"
#include "src/simulator.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
class SignalHandler : public AllStatic {
public:
- static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
static void TearDown() { delete mutex_; }
static void IncreaseSamplerCount() {
- LockGuard<Mutex> lock_guard(mutex_);
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
if (++client_count_ == 1) Install();
}
static void DecreaseSamplerCount() {
- LockGuard<Mutex> lock_guard(mutex_);
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
if (--client_count_ == 0) Restore();
}
static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
// Protects the process wide state below.
- static Mutex* mutex_;
+ static base::Mutex* mutex_;
static int client_count_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
};
-Mutex* SignalHandler::mutex_ = NULL;
+base::Mutex* SignalHandler::mutex_ = NULL;
int SignalHandler::client_count_ = 0;
struct sigaction SignalHandler::old_signal_handler_;
bool SignalHandler::signal_handler_installed_ = false;
#endif
-class SamplerThread : public Thread {
+class SamplerThread : public base::Thread {
public:
static const int kSamplerThreadStackSize = 64 * KB;
explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
+ : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
- static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
static void TearDown() { delete mutex_; mutex_ = NULL; }
static void AddActiveSampler(Sampler* sampler) {
bool need_to_start = false;
- LockGuard<Mutex> lock_guard(mutex_);
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
static void RemoveActiveSampler(Sampler* sampler) {
SamplerThread* instance_to_remove = NULL;
{
- LockGuard<Mutex> lock_guard(mutex_);
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
ASSERT(sampler->IsActive());
bool removed = instance_->active_samplers_.RemoveElement(sampler);
virtual void Run() {
while (true) {
{
- LockGuard<Mutex> lock_guard(mutex_);
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
if (active_samplers_.is_empty()) break;
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
sampler->DoSample();
}
}
- OS::Sleep(interval_);
+ base::OS::Sleep(interval_);
}
}
private:
// Protects the process wide state below.
- static Mutex* mutex_;
+ static base::Mutex* mutex_;
static SamplerThread* instance_;
const int interval_;
};
-Mutex* SamplerThread::mutex_ = NULL;
+base::Mutex* SamplerThread::mutex_ = NULL;
SamplerThread* SamplerThread::instance_ = NULL;
DISABLE_ASAN void TickSample::Init(Isolate* isolate,
const RegisterState& regs) {
ASSERT(isolate->IsInitialized());
- timestamp = TimeTicks::HighResolutionNow();
+ timestamp = base::TimeTicks::HighResolutionNow();
pc = regs.pc;
state = isolate->current_vm_state();
static const unsigned kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
Address stack[kMaxFramesCount]; // Call stack.
- TimeTicks timestamp;
+ base::TimeTicks timestamp;
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
StackFrame::Type top_frame_type : 4;
#define V8_SCANNER_H_
#include "src/allocation.h"
+#include "src/base/logging.h"
#include "src/char-predicates.h"
-#include "src/checks.h"
#include "src/globals.h"
#include "src/hashmap.h"
#include "src/list.h"
#include "src/accessors.h"
#include "src/api.h"
+#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
#include "src/ic-inl.h"
#include "src/natives.h"
-#include "src/platform.h"
#include "src/runtime.h"
#include "src/serialize.h"
#include "src/snapshot.h"
PageIterator it(isolate_->heap()->code_space());
while (it.has_next()) {
Page* p = it.next();
- CPU::FlushICache(p->area_start(), p->area_end() - p->area_start());
+ CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start());
}
}
#ifndef V8_SMALL_POINTER_LIST_H_
#define V8_SMALL_POINTER_LIST_H_
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/globals.h"
#include "src/zone.h"
#include "src/v8.h"
#include "src/api.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "src/serialize.h"
#include "src/snapshot.h"
bool Snapshot::Initialize() {
if (size_ > 0) {
- ElapsedTimer timer;
+ base::ElapsedTimer timer;
if (FLAG_profile_deserialization) {
timer.Start();
}
#include "src/snapshot-source-sink.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/handles-inl.h"
#include "src/serialize.h" // for SerializerDeserializer::nop() in AtEOF()
#ifndef V8_SNAPSHOT_SOURCE_SINK_H_
#define V8_SNAPSHOT_SOURCE_SINK_H_
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/utils.h"
namespace v8 {
#ifdef ENABLE_HEAP_PROTECTION
void MemoryAllocator::Protect(Address start, size_t size) {
- OS::Protect(start, size);
+ base::OS::Protect(start, size);
}
void MemoryAllocator::Unprotect(Address start,
size_t size,
Executability executable) {
- OS::Unprotect(start, size, executable);
+ base::OS::Unprotect(start, size, executable);
}
void MemoryAllocator::ProtectChunkFromPage(Page* page) {
int id = GetChunkId(page);
- OS::Protect(chunks_[id].address(), chunks_[id].size());
+ base::OS::Protect(chunks_[id].address(), chunks_[id].size());
}
void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
int id = GetChunkId(page);
- OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
- chunks_[id].owner()->executable() == EXECUTABLE);
+ base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+ chunks_[id].owner()->executable() == EXECUTABLE);
}
#endif
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/full-codegen.h"
#include "src/macro-assembler.h"
#include "src/mark-compact.h"
#include "src/msan.h"
-#include "src/platform.h"
namespace v8 {
namespace internal {
}
ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
- code_range_ = new VirtualMemory(requested);
+ code_range_ = new base::VirtualMemory(requested);
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
bool MemoryAllocator::CommitMemory(Address base,
size_t size,
Executability executable) {
- if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
+ if (!base::VirtualMemory::CommitRegion(base, size,
+ executable == EXECUTABLE)) {
return false;
}
UpdateAllocatedSpaceLimits(base, base + size);
}
-void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
+void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
ASSERT(reservation->IsReserved());
ASSERT(executable == NOT_EXECUTABLE ||
isolate_->code_range() == NULL ||
!isolate_->code_range()->valid());
- bool result = VirtualMemory::ReleaseRegion(base, size);
+ bool result = base::VirtualMemory::ReleaseRegion(base, size);
USE(result);
ASSERT(result);
}
Address MemoryAllocator::ReserveAlignedMemory(size_t size,
size_t alignment,
- VirtualMemory* controller) {
- VirtualMemory reservation(size, alignment);
+ base::VirtualMemory* controller) {
+ base::VirtualMemory reservation(size, alignment);
if (!reservation.IsReserved()) return NULL;
size_ += reservation.size();
}
-Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
- size_t commit_size,
- size_t alignment,
- Executability executable,
- VirtualMemory* controller) {
+Address MemoryAllocator::AllocateAlignedMemory(
+ size_t reserve_size, size_t commit_size, size_t alignment,
+ Executability executable, base::VirtualMemory* controller) {
ASSERT(commit_size <= reserve_size);
- VirtualMemory reservation;
+ base::VirtualMemory reservation;
Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
if (base == NULL) return NULL;
size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
MemoryAllocator::CodePageGuardSize() : 0;
size_t header_size = area_start() - address() - guard_size;
- size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
+ size_t commit_size =
+ RoundUp(header_size + requested, base::OS::CommitPageSize());
size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
- OS::CommitPageSize());
+ base::OS::CommitPageSize());
if (commit_size > committed_size) {
// Commit size should be less or equal than the reserved size.
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = NULL;
- VirtualMemory reservation;
+ base::VirtualMemory reservation;
Address area_start = NULL;
Address area_end = NULL;
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
- OS::CommitPageSize()) + CodePageGuardSize();
+ base::OS::CommitPageSize()) + CodePageGuardSize();
// Check executable memory limit.
if (size_executable_ + chunk_size > capacity_executable_) {
// Size of header (not executable) plus area (executable).
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
- OS::CommitPageSize());
+ base::OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
area_end = area_start + commit_area_size;
} else {
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- OS::CommitPageSize());
+ base::OS::CommitPageSize());
size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
- commit_area_size, OS::CommitPageSize());
+ commit_area_size, base::OS::CommitPageSize());
base = AllocateAlignedMemory(chunk_size,
commit_size,
MemoryChunk::kAlignment,
delete chunk->slots_buffer();
delete chunk->skip_list();
- VirtualMemory* reservation = chunk->reserved_memory();
+ base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
FreeMemory(reservation, chunk->executable());
} else {
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
- if (!VirtualMemory::UncommitRegion(start, size)) return false;
+ if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
return true;
}
int MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
- return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
+ return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
}
int MemoryAllocator::CodePageGuardSize() {
- return static_cast<int>(OS::CommitPageSize());
+ return static_cast<int>(base::OS::CommitPageSize());
}
int MemoryAllocator::CodePageAreaEndOffset() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
- return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
+ return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
}
-bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
+bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
Address start,
size_t commit_size,
size_t reserved_size) {
size_t PagedSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
PageIterator it(this);
size_t delta = new_capacity - capacity_;
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+ ASSERT(IsAligned(delta, base::OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
start_ + capacity_, delta, executable())) {
return false;
ASSERT(new_capacity < capacity_);
if (is_committed()) {
size_t delta = capacity_ - new_capacity;
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+ ASSERT(IsAligned(delta, base::OS::AllocateAlignment()));
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
size_t NewSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
- LockGuard<Mutex> target_lock_guard(mutex());
- LockGuard<Mutex> source_lock_guard(category->mutex());
+ base::LockGuard<base::Mutex> target_lock_guard(mutex());
+ base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
ASSERT(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
size_t LargeObjectSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
+ if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
size_t size = 0;
LargePage* current = first_page_;
while (current != NULL) {
#include "src/allocation.h"
#include "src/base/atomicops.h"
+#include "src/base/platform/mutex.h"
#include "src/hashmap.h"
#include "src/list.h"
#include "src/log.h"
-#include "src/platform/mutex.h"
#include "src/utils.h"
namespace v8 {
kFailureTag);
}
- VirtualMemory* reserved_memory() {
+ base::VirtualMemory* reserved_memory() {
return &reservation_;
}
reservation_.Reset();
}
- void set_reserved_memory(VirtualMemory* reservation) {
+ void set_reserved_memory(base::VirtualMemory* reservation) {
ASSERT_NOT_NULL(reservation);
reservation_.TakeControl(reservation);
}
Address area_end_;
// If the chunk needs to remember its memory reservation, it is stored here.
- VirtualMemory reservation_;
+ base::VirtualMemory reservation_;
// The identity of the owning space. This is tagged as a failure pointer, but
// no failure can be in an object, so this can be distinguished from any entry
// in a fixed array.
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
- VirtualMemory* code_range_;
+ base::VirtualMemory* code_range_;
// Plain old data class, just a struct plus a constructor.
class FreeBlock {
public:
Address ReserveAlignedMemory(size_t requested,
size_t alignment,
- VirtualMemory* controller);
+ base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size,
size_t commit_size,
size_t alignment,
Executability executable,
- VirtualMemory* controller);
+ base::VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable);
- void FreeMemory(VirtualMemory* reservation, Executability executable);
+ void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
// Commit a contiguous block of memory from the initial chunk. Assumes that
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
- MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
Address start,
size_t commit_size,
size_t reserved_size);
int available() const { return available_; }
void set_available(int available) { available_ = available; }
- Mutex* mutex() { return &mutex_; }
+ base::Mutex* mutex() { return &mutex_; }
bool IsEmpty() {
return top() == 0;
// top_ points to the top FreeListNode* in the free list category.
base::AtomicWord top_;
FreeListNode* end_;
- Mutex mutex_;
+ base::Mutex mutex_;
// Total available bytes in all blocks of this free list category.
int available_;
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
- VirtualMemory reservation_;
+ base::VirtualMemory reservation_;
int pages_used_;
// Start address and bit mask for containment testing.
void StoreBuffer::SetUp() {
- virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
+ virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_ =
limit_ = start_ + (kStoreBufferSize / kPointerSize);
old_virtual_memory_ =
- new VirtualMemory(kOldStoreBufferLength * kPointerSize);
+ new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
old_top_ = old_start_ =
reinterpret_cast<Address*>(old_virtual_memory_->address());
// Don't know the alignment requirements of the OS, but it is certainly not
// less than 0xfff.
ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
- int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
+ int initial_length =
+ static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
ASSERT(initial_length > 0);
ASSERT(initial_length <= kOldStoreBufferLength);
old_limit_ = old_start_ + initial_length;
#define V8_STORE_BUFFER_H_
#include "src/allocation.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
#include "src/globals.h"
-#include "src/platform.h"
namespace v8 {
namespace internal {
Address* old_limit_;
Address* old_top_;
Address* old_reserved_limit_;
- VirtualMemory* old_virtual_memory_;
+ base::VirtualMemory* old_virtual_memory_;
bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_;
StoreBufferCallback callback_;
bool may_move_store_buffer_entries_;
- VirtualMemory* virtual_memory_;
+ base::VirtualMemory* virtual_memory_;
// Two hash sets used for filtering.
// If address is in the hash set then it is guaranteed to be in the
bool SweeperThread::SweepingCompleted() {
- bool value = end_sweeping_semaphore_.WaitFor(TimeDelta::FromSeconds(0));
+ bool value = end_sweeping_semaphore_.WaitFor(base::TimeDelta::FromSeconds(0));
if (value) {
end_sweeping_semaphore_.Signal();
}
#define V8_SWEEPER_THREAD_H_
#include "src/base/atomicops.h"
+#include "src/base/platform/platform.h"
#include "src/flags.h"
-#include "src/platform.h"
#include "src/utils.h"
#include "src/spaces.h"
namespace v8 {
namespace internal {
-class SweeperThread : public Thread {
+class SweeperThread : public base::Thread {
public:
explicit SweeperThread(Isolate* isolate);
~SweeperThread() {}
Isolate* isolate_;
Heap* heap_;
MarkCompactCollector* collector_;
- Semaphore start_sweeping_semaphore_;
- Semaphore end_sweeping_semaphore_;
- Semaphore stop_semaphore_;
+ base::Semaphore start_sweeping_semaphore_;
+ base::Semaphore end_sweeping_semaphore_;
+ base::Semaphore stop_semaphore_;
volatile base::AtomicWord stop_thread_;
};
#ifndef V8_TOKEN_H_
#define V8_TOKEN_H_
-#include "src/checks.h"
+#include "src/base/logging.h"
namespace v8 {
namespace internal {
#ifndef V8_TRANSITIONS_H_
#define V8_TRANSITIONS_H_
+#include "src/checks.h"
#include "src/elements-kind.h"
#include "src/heap.h"
#include "src/isolate.h"
#include "src/objects.h"
-#include "src/v8checks.h"
namespace v8 {
namespace internal {
#define V8_UNICODE_INL_H_
#include "src/unicode.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/utils.h"
namespace unibrow {
#include "src/v8.h"
-#include "src/checks.h"
-#include "src/platform.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
#include "src/utils.h"
namespace v8 {
void PrintF(const char* format, ...) {
va_list arguments;
va_start(arguments, format);
- OS::VPrint(format, arguments);
+ base::OS::VPrint(format, arguments);
va_end(arguments);
}
void PrintF(FILE* out, const char* format, ...) {
va_list arguments;
va_start(arguments, format);
- OS::VFPrint(out, format, arguments);
+ base::OS::VFPrint(out, format, arguments);
va_end(arguments);
}
void PrintPID(const char* format, ...) {
- OS::Print("[%d] ", OS::GetCurrentProcessId());
+ base::OS::Print("[%d] ", base::OS::GetCurrentProcessId());
va_list arguments;
va_start(arguments, format);
- OS::VPrint(format, arguments);
+ base::OS::VPrint(format, arguments);
va_end(arguments);
}
int VSNPrintF(Vector<char> str, const char* format, va_list args) {
- return OS::VSNPrintF(str.start(), str.length(), format, args);
+ return base::OS::VSNPrintF(str.start(), str.length(), format, args);
}
void StrNCpy(Vector<char> dest, const char* src, size_t n) {
- OS::StrNCpy(dest.start(), dest.length(), src, n);
+ base::OS::StrNCpy(dest.start(), dest.length(), src, n);
}
const char* filename) {
if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
if (verbose) {
- OS::PrintError("Cannot read from file %s.\n", filename);
+ base::OS::PrintError("Cannot read from file %s.\n", filename);
}
return NULL;
}
int* size,
int extra_space,
bool verbose) {
- FILE* file = OS::FOpen(filename, "rb");
+ FILE* file = base::OS::FOpen(filename, "rb");
char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
if (file != NULL) fclose(file);
return result;
const char* str,
int size,
bool verbose) {
- FILE* f = OS::FOpen(filename, "ab");
+ FILE* f = base::OS::FOpen(filename, "ab");
if (f == NULL) {
if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
+ base::OS::PrintError("Cannot open file %s for writing.\n", filename);
}
return 0;
}
const char* str,
int size,
bool verbose) {
- FILE* f = OS::FOpen(filename, "wb");
+ FILE* f = base::OS::FOpen(filename, "wb");
if (f == NULL) {
if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
+ base::OS::PrintError("Cannot open file %s for writing.\n", filename);
}
return 0;
}
#include <string.h>
#include "src/allocation.h"
+#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/checks.h"
+#include "src/base/platform/platform.h"
#include "src/globals.h"
#include "src/list.h"
-#include "src/platform.h"
#include "src/vector.h"
namespace v8 {
// ----------------------------------------------------------------------------
// General helper functions
-// Returns true iff x is a power of 2. Cannot be used with the maximally
-// negative value of the type T (the -1 overflows).
-template <typename T>
-inline bool IsPowerOf2(T x) {
- return IS_POWER_OF_TWO(x);
-}
-
-
// X must be a power of 2. Returns the number of trailing zeros.
inline int WhichPowerOf2(uint32_t x) {
ASSERT(IsPowerOf2(x));
}
-// Compute the 0-relative offset of some absolute value x of type T.
-// This allows conversion of Addresses and integral types into
-// 0-relative int offsets.
-template <typename T>
-inline intptr_t OffsetFrom(T x) {
- return x - static_cast<T>(0);
-}
-
-
-// Compute the absolute value of type T for some 0-relative offset x.
-// This allows conversion of 0-relative int offsets into Addresses and
-// integral types.
-template <typename T>
-inline T AddressFrom(intptr_t x) {
- return static_cast<T>(static_cast<T>(0) + x);
-}
-
-
-// Return the largest multiple of m which is <= x.
-template <typename T>
-inline T RoundDown(T x, intptr_t m) {
- ASSERT(IsPowerOf2(m));
- return AddressFrom<T>(OffsetFrom(x) & -m);
-}
-
-
-// Return the smallest multiple of m which is >= x.
-template <typename T>
-inline T RoundUp(T x, intptr_t m) {
- return RoundDown<T>(static_cast<T>(x + m - 1), m);
-}
-
-
-// Increment a pointer until it has the specified alignment.
-// This works like RoundUp, but it works correctly on pointer types where
-// sizeof(*pointer) might not be 1.
-template<class T>
-T AlignUp(T pointer, size_t alignment) {
- ASSERT(sizeof(pointer) == sizeof(uintptr_t));
- uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
- return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
-}
-
-
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
}
-// Returns the smallest power of two which is >= x. If you pass in a
-// number that is already a power of two, it is returned as is.
-// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
-// figure 3-3, page 48, where the function is called clp2.
-inline uint32_t RoundUpToPowerOf2(uint32_t x) {
- ASSERT(x <= 0x80000000u);
- x = x - 1;
- x = x | (x >> 1);
- x = x | (x >> 2);
- x = x | (x >> 4);
- x = x | (x >> 8);
- x = x | (x >> 16);
- return x + 1;
-}
-
-
-inline uint32_t RoundDownToPowerOf2(uint32_t x) {
- uint32_t rounded_up = RoundUpToPowerOf2(x);
- if (rounded_up > x) return rounded_up >> 1;
- return rounded_up;
-}
-
-
-template <typename T, typename U>
-inline bool IsAligned(T value, U alignment) {
- return (value & (alignment - 1)) == 0;
-}
-
-
// Returns true if (addr + offset) is aligned.
inline bool IsAddressAligned(Address addr,
intptr_t alignment,
+++ /dev/null
-include_rules = [
- "-src",
- "+src/base",
- "+src/platform",
-]
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/utils/random-number-generator.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <new>
-
-#include "src/base/macros.h"
-#include "src/platform/mutex.h"
-#include "src/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-static RandomNumberGenerator::EntropySource entropy_source = NULL;
-
-
-// static
-void RandomNumberGenerator::SetEntropySource(EntropySource source) {
- LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
- entropy_source = source;
-}
-
-
-RandomNumberGenerator::RandomNumberGenerator() {
- // Check if embedder supplied an entropy source.
- { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
- if (entropy_source != NULL) {
- int64_t seed;
- if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
- sizeof(seed))) {
- SetSeed(seed);
- return;
- }
- }
- }
-
-#if V8_OS_CYGWIN || V8_OS_WIN
- // Use rand_s() to gather entropy on Windows. See:
- // https://code.google.com/p/v8/issues/detail?id=2905
- unsigned first_half, second_half;
- errno_t result = rand_s(&first_half);
- ASSERT_EQ(0, result);
- result = rand_s(&second_half);
- ASSERT_EQ(0, result);
- SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
-#else
- // Gather entropy from /dev/urandom if available.
- FILE* fp = fopen("/dev/urandom", "rb");
- if (fp != NULL) {
- int64_t seed;
- size_t n = fread(&seed, sizeof(seed), 1, fp);
- fclose(fp);
- if (n == 1) {
- SetSeed(seed);
- return;
- }
- }
-
- // We cannot assume that random() or rand() were seeded
- // properly, so instead of relying on random() or rand(),
- // we just seed our PRNG using timing data as fallback.
- // This is weak entropy, but it's sufficient, because
- // it is the responsibility of the embedder to install
- // an entropy source using v8::V8::SetEntropySource(),
- // which provides reasonable entropy, see:
- // https://code.google.com/p/v8/issues/detail?id=2905
- int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
- seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
- seed ^= TimeTicks::Now().ToInternalValue() << 8;
- SetSeed(seed);
-#endif // V8_OS_CYGWIN || V8_OS_WIN
-}
-
-
-int RandomNumberGenerator::NextInt(int max) {
- ASSERT_LE(0, max);
-
- // Fast path if max is a power of 2.
- if (IS_POWER_OF_TWO(max)) {
- return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
- }
-
- while (true) {
- int rnd = Next(31);
- int val = rnd % max;
- if (rnd - val + (max - 1) >= 0) {
- return val;
- }
- }
-}
-
-
-double RandomNumberGenerator::NextDouble() {
- return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
- static_cast<double>(static_cast<int64_t>(1) << 53);
-}
-
-
-void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
- for (size_t n = 0; n < buflen; ++n) {
- static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
- }
-}
-
-
-int RandomNumberGenerator::Next(int bits) {
- ASSERT_LT(0, bits);
- ASSERT_GE(32, bits);
- // Do unsigned multiplication, which has the intended modulo semantics, while
- // signed multiplication would expose undefined behavior.
- uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
- // Assigning a uint64_t to an int64_t is implementation defined, but this
- // should be OK. Use a static_cast to explicitly state that we know what we're
- // doing. (Famous last words...)
- int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
- seed_ = seed;
- return static_cast<int>(seed >> (48 - bits));
-}
-
-
-void RandomNumberGenerator::SetSeed(int64_t seed) {
- seed_ = (seed ^ kMultiplier) & kMask;
-}
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
-#define V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
-
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// RandomNumberGenerator
-//
-// This class is used to generate a stream of pseudorandom numbers. The class
-// uses a 48-bit seed, which is modified using a linear congruential formula.
-// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
-// If two instances of RandomNumberGenerator are created with the same seed, and
-// the same sequence of method calls is made for each, they will generate and
-// return identical sequences of numbers.
-// This class uses (probably) weak entropy by default, but it's sufficient,
-// because it is the responsibility of the embedder to install an entropy source
-// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
-// https://code.google.com/p/v8/issues/detail?id=2905
-// This class is neither reentrant nor threadsafe.
-
-class RandomNumberGenerator V8_FINAL {
- public:
- // EntropySource is used as a callback function when V8 needs a source of
- // entropy.
- typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen);
- static void SetEntropySource(EntropySource entropy_source);
-
- RandomNumberGenerator();
- explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); }
-
- // Returns the next pseudorandom, uniformly distributed int value from this
- // random number generator's sequence. The general contract of |NextInt()| is
- // that one int value is pseudorandomly generated and returned.
- // All 2^32 possible integer values are produced with (approximately) equal
- // probability.
- V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT {
- return Next(32);
- }
-
- // Returns a pseudorandom, uniformly distributed int value between 0
- // (inclusive) and the specified max value (exclusive), drawn from this random
- // number generator's sequence. The general contract of |NextInt(int)| is that
- // one int value in the specified range is pseudorandomly generated and
- // returned. All max possible int values are produced with (approximately)
- // equal probability.
- int NextInt(int max) V8_WARN_UNUSED_RESULT;
-
- // Returns the next pseudorandom, uniformly distributed boolean value from
- // this random number generator's sequence. The general contract of
- // |NextBoolean()| is that one boolean value is pseudorandomly generated and
- // returned. The values true and false are produced with (approximately) equal
- // probability.
- V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT {
- return Next(1) != 0;
- }
-
- // Returns the next pseudorandom, uniformly distributed double value between
- // 0.0 and 1.0 from this random number generator's sequence.
- // The general contract of |NextDouble()| is that one double value, chosen
- // (approximately) uniformly from the range 0.0 (inclusive) to 1.0
- // (exclusive), is pseudorandomly generated and returned.
- double NextDouble() V8_WARN_UNUSED_RESULT;
-
- // Fills the elements of a specified array of bytes with random numbers.
- void NextBytes(void* buffer, size_t buflen);
-
- // Override the current ssed.
- void SetSeed(int64_t seed);
-
- private:
- static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
- static const int64_t kAddend = 0xb;
- static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
-
- int Next(int bits) V8_WARN_UNUSED_RESULT;
-
- int64_t seed_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
#include "src/assembler.h"
#include "src/base/once.h"
+#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#endif
#include "src/lithium-allocator.h"
#include "src/objects.h"
-#include "src/platform.h"
#include "src/runtime-profiler.h"
#include "src/sampler.h"
#include "src/serialize.h"
FLAG_max_semi_space_size = 1;
}
- OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
+ base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
#ifdef V8_USE_DEFAULT_PLATFORM
platform_ = new DefaultPlatform;
// Basic includes
#include "include/v8.h"
#include "include/v8-platform.h"
-#include "src/v8checks.h" // NOLINT
+#include "src/checks.h" // NOLINT
#include "src/allocation.h" // NOLINT
#include "src/assert-scope.h" // NOLINT
#include "src/utils.h" // NOLINT
+++ /dev/null
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_V8CHECKS_H_
-#define V8_V8CHECKS_H_
-
-#include "src/checks.h"
-
-namespace v8 {
- class Value;
- template <class T> class Handle;
-
-namespace internal {
- intptr_t HeapObjectTagMask();
-
-} } // namespace v8::internal
-
-
-void CheckNonEqualsHelper(const char* file,
- int line,
- const char* unexpected_source,
- v8::Handle<v8::Value> unexpected,
- const char* value_source,
- v8::Handle<v8::Value> value);
-
-void CheckEqualsHelper(const char* file,
- int line,
- const char* expected_source,
- v8::Handle<v8::Value> expected,
- const char* value_source,
- v8::Handle<v8::Value> value);
-
-#define ASSERT_TAG_ALIGNED(address) \
- ASSERT((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0)
-
-#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & HeapObjectTagMask()) == 0)
-
-#endif // V8_V8CHECKS_H_
void EagerlyArchiveThread();
- Mutex mutex_;
+ base::Mutex mutex_;
ThreadId mutex_owner_;
ThreadId lazily_archived_thread_;
ThreadState* lazily_archived_thread_state_;
+++ /dev/null
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
-// refer to The Open Group Base Specification for specification of the correct
-// semantics for these functions.
-// (http://www.opengroup.org/onlinepubs/000095399/)
-#if defined(_MSC_VER) && (_MSC_VER < 1800)
-
-#include "src/base/win32-headers.h"
-#include <float.h> // Required for DBL_MAX and on Win32 for finite()
-#include <limits.h> // Required for INT_MAX etc.
-#include <cmath>
-#include "src/win32-math.h"
-
-#include "src/checks.h"
-
-
-namespace std {
-
-// Test for a NaN (not a number) value - usually defined in math.h
-int isnan(double x) {
- return _isnan(x);
-}
-
-
-// Test for infinity - usually defined in math.h
-int isinf(double x) {
- return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
-}
-
-
-// Test for finite value - usually defined in math.h
-int isfinite(double x) {
- return _finite(x);
-}
-
-
-// Test if x is less than y and both nominal - usually defined in math.h
-int isless(double x, double y) {
- return isnan(x) || isnan(y) ? 0 : x < y;
-}
-
-
-// Test if x is greater than y and both nominal - usually defined in math.h
-int isgreater(double x, double y) {
- return isnan(x) || isnan(y) ? 0 : x > y;
-}
-
-
-// Classify floating point number - usually defined in math.h
-int fpclassify(double x) {
- // Use the MS-specific _fpclass() for classification.
- int flags = _fpclass(x);
-
- // Determine class. We cannot use a switch statement because
- // the _FPCLASS_ constants are defined as flags.
- if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
- if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
- if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
- if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
-
- // All cases should be covered by the code above.
- ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
- return FP_NAN;
-}
-
-
-// Test sign - usually defined in math.h
-int signbit(double x) {
- // We need to take care of the special case of both positive
- // and negative versions of zero.
- if (x == 0)
- return _fpclass(x) & _FPCLASS_NZ;
- else
- return x < 0;
-}
-
-} // namespace std
-
-#endif // _MSC_VER
+++ /dev/null
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
-// refer to The Open Group Base Specification for specification of the correct
-// semantics for these functions.
-// (http://www.opengroup.org/onlinepubs/000095399/)
-
-#ifndef V8_WIN32_MATH_H_
-#define V8_WIN32_MATH_H_
-
-#ifndef _MSC_VER
-#error Wrong environment, expected MSVC.
-#endif // _MSC_VER
-
-// MSVC 2013+ provides implementations of all standard math functions.
-#if (_MSC_VER < 1800)
-enum {
- FP_NAN,
- FP_INFINITE,
- FP_ZERO,
- FP_SUBNORMAL,
- FP_NORMAL
-};
-
-
-namespace std {
-
-int isfinite(double x);
-int isinf(double x);
-int isnan(double x);
-int isless(double x, double y);
-int isgreater(double x, double y);
-int fpclassify(double x);
-int signbit(double x);
-
-} // namespace std
-
-#endif // _MSC_VER < 1800
-
-#endif // V8_WIN32_MATH_H_
#include "src/x64/assembler-x64.h"
-#include "src/cpu.h"
+#include "src/base/cpu.h"
#include "src/debug.h"
#include "src/v8memory.h"
ICacheFlushMode icache_flush_mode) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc, sizeof(int32_t));
+ CpuFeatures::FlushICache(pc, sizeof(int32_t));
}
}
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += static_cast<int32_t>(delta);
- if (flush_icache) CPU::FlushICache(pc_, sizeof(Address));
+ if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
- if (flush_icache) CPU::FlushICache(pc_, sizeof(int32_t));
+ if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(int32_t));
} else if (rmode_ == CODE_AGE_SEQUENCE) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= static_cast<int32_t>(delta); // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
}
}
ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL) {
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
target;
- CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
- sizeof(Address));
+ CpuFeatures::FlushICache(
+ pc_ + Assembler::kRealPatchReturnSequenceAddressOffset, sizeof(Address));
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
// Implementation of CpuFeatures
void CpuFeatures::ProbeImpl(bool cross_compile) {
- CPU cpu;
+ base::CPU cpu;
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
CHECK(cpu.has_cmov()); // CMOV support is mandatory.
}
// Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
+ CpuFeatures::FlushICache(pc_, instruction_count);
}
break;
}
ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
+ CpuFeatures::FlushICache(stub->instruction_start(), 7);
}
private:
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
// Define custom fmod implementation.
ModuloFunction CreateModuloFunction() {
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(
+ base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler masm(NULL, buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
CodeDesc desc;
masm.GetCode(&desc);
- OS::ProtectCode(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
// Call the function from C++ through this pointer.
return FUNCTION_CAST<ModuloFunction>(buffer);
}
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CPU::FlushICache(sequence, young_length);
+ CpuFeatures::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
#if V8_TARGET_ARCH_X64
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
// No need to flush the instruction cache on Intel. On Intel instruction
// cache flushing is only necessary when multiple cores running the same
// code simultaneously. V8 (and JavaScript) is single threaded and when code
#include "src/x64/lithium-x64.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/deoptimizer.h"
#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
}
// Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
ASSERT(is_int8(kFrameAlignment));
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
ASSERT(num_arguments >= 0);
}
call(function);
- ASSERT(OS::ActivationFrameAlignment() != 0);
+ ASSERT(base::OS::ActivationFrameAlignment() != 0);
ASSERT(num_arguments >= 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
#include "src/x87/assembler-x87.h"
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/debug.h"
namespace v8 {
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == CODE_AGE_SEQUENCE) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry.
- if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
}
ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
+ CpuFeatures::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CPU::FlushICache(p, sizeof(int32_t));
+ CpuFeatures::FlushICache(p, sizeof(int32_t));
}
}
// Implementation of CpuFeatures
void CpuFeatures::ProbeImpl(bool cross_compile) {
- CPU cpu;
+ base::CPU cpu;
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
}
// Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
+ CpuFeatures::FlushICache(pc_, instruction_count);
}
break;
}
ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
+ CpuFeatures::FlushICache(stub->instruction_start(), 7);
}
private:
MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
LabelConverter conv(buffer);
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CPU::FlushICache(sequence, young_length);
+ CpuFeatures::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
#if V8_TARGET_ARCH_X87
-#include "src/cpu.h"
+#include "src/assembler.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
// No need to flush the instruction cache on Intel. On Intel instruction
// cache flushing is only necessary when multiple cores running the same
// code simultaneously. V8 (and JavaScript) is single threaded and when code
#include "src/x87/lithium-x87.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/deoptimizer.h"
#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
sub(esp, Immediate(argc * kPointerSize));
// Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
and_(esp, -kFrameAlignment);
void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
}
call(function);
- if (OS::ActivationFrameAlignment() != 0) {
+ if (base::OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
add(esp, Immediate(num_arguments * kPointerSize));
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
#define V8_ZONE_H_
#include "src/allocation.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/globals.h"
#include "src/hashmap.h"
#include "src/list.h"
// thread fuzzing test. In the thread fuzzing test it will
// pseudorandomly select a successor thread and switch execution
// to that thread, suspending the current test.
-class ApiTestFuzzer: public v8::internal::Thread {
+class ApiTestFuzzer: public v8::base::Thread {
public:
void CallTest();
static int active_tests_;
static bool NextThread();
int test_number_;
- v8::internal::Semaphore gate_;
+ v8::base::Semaphore gate_;
bool active_;
void ContextSwitch();
static int GetNextTestNumber();
- static v8::internal::Semaphore all_tests_done_;
+ static v8::base::Semaphore all_tests_done_;
};
//
// Tests of profiles generator and utilities.
+#include "src/base/logging.h"
#include "test/cctest/profiler-extension.h"
-#include "src/checks.h"
namespace v8 {
namespace internal {
#include "include/v8-util.h"
#include "src/api.h"
#include "src/arguments.h"
+#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/cpu-profiler.h"
#include "src/execution.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/parser.h"
-#include "src/platform.h"
#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
CHECK(v8_num(1)->StrictEquals(v8_num(1)));
CHECK(!v8_num(1)->StrictEquals(v8_num(2)));
CHECK(v8_num(0.0)->StrictEquals(v8_num(-0.0)));
- Local<Value> not_a_number = v8_num(i::OS::nan_value());
+ Local<Value> not_a_number = v8_num(v8::base::OS::nan_value());
CHECK(!not_a_number->StrictEquals(not_a_number));
CHECK(v8::False(isolate)->StrictEquals(v8::False(isolate)));
CHECK(!v8::False(isolate)->StrictEquals(v8::Undefined(isolate)));
bool ApiTestFuzzer::fuzzing_ = false;
-i::Semaphore ApiTestFuzzer::all_tests_done_(0);
+v8::base::Semaphore ApiTestFuzzer::all_tests_done_(0);
int ApiTestFuzzer::active_tests_;
int ApiTestFuzzer::tests_being_run_;
int ApiTestFuzzer::current_;
} regexp_interruption_data;
-class RegExpInterruptionThread : public i::Thread {
+class RegExpInterruptionThread : public v8::base::Thread {
public:
explicit RegExpInterruptionThread(v8::Isolate* isolate)
: Thread("TimeoutThread"), isolate_(isolate) {}
for (regexp_interruption_data.loop_count = 0;
regexp_interruption_data.loop_count < 7;
regexp_interruption_data.loop_count++) {
- i::OS::Sleep(50); // Wait a bit before requesting GC.
+ v8::base::OS::Sleep(50); // Wait a bit before requesting GC.
reinterpret_cast<i::Isolate*>(isolate_)->stack_guard()->RequestGC();
}
- i::OS::Sleep(50); // Wait a bit before terminating.
+ v8::base::OS::Sleep(50); // Wait a bit before terminating.
v8::V8::TerminateExecution(isolate_);
}
CHECK_EQ(0, result->Int32Value());
if (array_type == v8::kExternalFloat64Array ||
array_type == v8::kExternalFloat32Array) {
- CHECK_EQ(static_cast<int>(i::OS::nan_value()),
+ CHECK_EQ(static_cast<int>(v8::base::OS::nan_value()),
static_cast<int>(
i::Object::GetElement(
isolate, jsobj, 7).ToHandleChecked()->Number()));
static double DoubleToDateTime(double input) {
double date_limit = 864e13;
if (std::isnan(input) || input < -date_limit || input > date_limit) {
- return i::OS::nan_value();
+ return v8::base::OS::nan_value();
}
return (input < 0) ? -(std::floor(-input)) : std::floor(input);
}
return static_cast<int>(value->NumberValue());
}
-class IsolateThread : public v8::internal::Thread {
+class IsolateThread : public v8::base::Thread {
public:
IsolateThread(v8::Isolate* isolate, int fib_limit)
: Thread("IsolateThread"),
isolate->Dispose();
}
-class InitDefaultIsolateThread : public v8::internal::Thread {
+class InitDefaultIsolateThread : public v8::base::Thread {
public:
enum TestCase {
SetResourceConstraints,
void CallCompletedCallback1() {
- i::OS::Print("Firing callback 1.\n");
+ v8::base::OS::Print("Firing callback 1.\n");
callback_fired ^= 1; // Toggle first bit.
}
void CallCompletedCallback2() {
- i::OS::Print("Firing callback 2.\n");
+ v8::base::OS::Print("Firing callback 2.\n");
callback_fired ^= 2; // Toggle second bit.
}
int32_t level = args[0]->Int32Value();
if (level < 3) {
level++;
- i::OS::Print("Entering recursion level %d.\n", level);
+ v8::base::OS::Print("Entering recursion level %d.\n", level);
char script[64];
i::Vector<char> script_vector(script, sizeof(script));
i::SNPrintF(script_vector, "recursion(%d)", level);
CompileRun(script_vector.start());
- i::OS::Print("Leaving recursion level %d.\n", level);
+ v8::base::OS::Print("Leaving recursion level %d.\n", level);
CHECK_EQ(0, callback_fired);
} else {
- i::OS::Print("Recursion ends.\n");
+ v8::base::OS::Print("Recursion ends.\n");
CHECK_EQ(0, callback_fired);
}
}
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback1);
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback1);
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback2);
- i::OS::Print("--- Script (1) ---\n");
+ v8::base::OS::Print("--- Script (1) ---\n");
Local<Script> script = v8::Script::Compile(
v8::String::NewFromUtf8(env->GetIsolate(), "recursion(0)"));
script->Run();
CHECK_EQ(3, callback_fired);
- i::OS::Print("\n--- Script (2) ---\n");
+ v8::base::OS::Print("\n--- Script (2) ---\n");
callback_fired = 0;
env->GetIsolate()->RemoveCallCompletedCallback(CallCompletedCallback1);
script->Run();
CHECK_EQ(2, callback_fired);
- i::OS::Print("\n--- Function ---\n");
+ v8::base::OS::Print("\n--- Function ---\n");
callback_fired = 0;
Local<Function> recursive_function =
Local<Function>::Cast(env->Global()->Get(v8_str("recursion")));
private:
static const int kExpectedValue = 1;
- class InterruptThread : public i::Thread {
+ class InterruptThread : public v8::base::Thread {
public:
explicit InterruptThread(ThreadInterruptTest* test)
: Thread("InterruptThread"), test_(test) {}
struct sigaction action;
// Ensure that we'll enter waiting condition
- i::OS::Sleep(100);
+ v8::base::OS::Sleep(100);
// Setup signal handler
memset(&action, 0, sizeof(action));
kill(getpid(), SIGCHLD);
// Ensure that if wait has returned because of error
- i::OS::Sleep(100);
+ v8::base::OS::Sleep(100);
// Set value and signal semaphore
test_->sem_value_ = 1;
ThreadInterruptTest* test_;
};
- i::Semaphore sem_;
+ v8::base::Semaphore sem_;
volatile int sem_value_;
};
LocalContext env_;
v8::Isolate* isolate_;
- i::Semaphore sem_;
+ v8::base::Semaphore sem_;
int warmup_;
bool should_continue_;
};
}
private:
- class InterruptThread : public i::Thread {
+ class InterruptThread : public v8::base::Thread {
public:
explicit InterruptThread(RequestInterruptTestBase* test)
: Thread("RequestInterruptTest"), test_(test) {}
}
private:
- class InterruptThread : public i::Thread {
+ class InterruptThread : public v8::base::Thread {
public:
explicit InterruptThread(ClearInterruptFromAnotherThread* test)
: Thread("RequestInterruptTest"), test_(test) {}
ClearInterruptFromAnotherThread* test =
reinterpret_cast<ClearInterruptFromAnotherThread*>(data);
test->sem_.Signal();
- bool success = test->sem2_.WaitFor(i::TimeDelta::FromSeconds(2));
+ bool success = test->sem2_.WaitFor(v8::base::TimeDelta::FromSeconds(2));
// Crash instead of timeout to make this failure more prominent.
CHECK(success);
test->should_continue_ = false;
};
InterruptThread i_thread;
- i::Semaphore sem2_;
+ v8::base::Semaphore sem2_;
};
RESET(); \
START_AFTER_RESET();
-#define RUN() \
- CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \
- { \
- void (*test_function)(void); \
- memcpy(&test_function, &buf, sizeof(buf)); \
- test_function(); \
+#define RUN() \
+ CpuFeatures::FlushICache(buf, masm.SizeOfGeneratedCode()); \
+ { \
+ void (*test_function)(void); \
+ memcpy(&test_function, &buf, sizeof(buf)); \
+ test_function(); \
}
#define END() \
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/disassembler.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/serialize.h"
#include "test/cctest/cctest.h"
CHECK_EQ(kLess, f(1.1, 2.2));
CHECK_EQ(kEqual, f(2.2, 2.2));
CHECK_EQ(kGreater, f(3.3, 2.2));
- CHECK_EQ(kNaN, f(OS::nan_value(), 1.1));
+ CHECK_EQ(kNaN, f(v8::base::OS::nan_value(), 1.1));
}
TEST(StackAlignmentForSSE2) {
CcTest::InitializeVM();
- CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
+ CHECK_EQ(0, v8::base::OS::ActivationFrameAlignment() % 16);
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/serialize.h"
#include "test/cctest/cctest.h"
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
CcTest::InitializeVM();
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble two loops using rax as counter, and verify the ending counts.
TEST(StackAlignmentForSSE2) {
CcTest::InitializeVM();
- CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
+ CHECK_EQ(0, v8::base::OS::ActivationFrameAlignment() % 16);
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/disassembler.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/serialize.h"
#include "test/cctest/cctest.h"
CHECK_EQ(kLess, f(1.1, 2.2));
CHECK_EQ(kEqual, f(2.2, 2.2));
CHECK_EQ(kGreater, f(3.3, 2.2));
- CHECK_EQ(kNaN, f(OS::nan_value(), 1.1));
+ CHECK_EQ(kNaN, f(v8::base::OS::nan_value(), 1.1));
}
#include "src/bignum-dtoa.h"
+#include "src/base/platform/platform.h"
#include "src/double.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "test/cctest/gay-fixed.h"
#include "test/cctest/gay-precision.h"
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/bignum.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
typedef v8::base::AtomicWord Record;
typedef SamplingCircularQueue<Record, 12> TestSampleQueue;
-class ProducerThread: public i::Thread {
+class ProducerThread: public v8::base::Thread {
public:
ProducerThread(TestSampleQueue* scq,
int records_per_chunk,
Record value,
- i::Semaphore* finished)
+ v8::base::Semaphore* finished)
: Thread("producer"),
scq_(scq),
records_per_chunk_(records_per_chunk),
TestSampleQueue* scq_;
const int records_per_chunk_;
Record value_;
- i::Semaphore* finished_;
+ v8::base::Semaphore* finished_;
};
} // namespace
const int kRecordsPerChunk = 4;
TestSampleQueue scq;
- i::Semaphore semaphore(0);
+ v8::base::Semaphore semaphore(0);
ProducerThread producer1(&scq, kRecordsPerChunk, 1, &semaphore);
ProducerThread producer2(&scq, kRecordsPerChunk, 10, &semaphore);
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
bool inline_fastpath) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
CodeDesc desc;
masm.GetCode(&desc);
- CPU::FlushICache(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
bool inline_fastpath) {
// Allocate an executable page of memory.
size_t actual_size = 4 * Assembler::kMinimalBufferSize;
- byte* buffer = static_cast<byte*>(OS::Allocate(actual_size,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(
+ v8::base::OS::Allocate(actual_size, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
CodeDesc desc;
masm.GetCode(&desc);
- CPU::FlushICache(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/mips/constants-mips.h"
-#include "src/platform.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
bool inline_fastpath) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
CodeDesc desc;
masm.GetCode(&desc);
- CPU::FlushICache(buffer, actual_size);
+ CpuFeatures::FlushICache(buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
Register destination_reg) {
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
// #define NaN and Infinity so that it's possible to cut-and-paste these tests
// directly to a .js file and run them.
-#define NaN (OS::nan_value())
+#define NaN (v8::base::OS::nan_value())
#define Infinity (std::numeric_limits<double>::infinity())
#define RunOneTruncationTest(p1, p2) \
RunOneTruncationTestWithTest(callWrapper, func, p1, p2)
#include "src/v8.h"
-#include "src/platform/condition-variable.h"
-#include "src/platform/time.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/time.h"
#include "test/cctest/cctest.h"
using namespace ::v8::internal;
TEST(WaitForAfterNofityOnSameThread) {
for (int n = 0; n < 10; ++n) {
- Mutex mutex;
- ConditionVariable cv;
+ v8::base::Mutex mutex;
+ v8::base::ConditionVariable cv;
- LockGuard<Mutex> lock_guard(&mutex);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
cv.NotifyOne();
- CHECK_EQ(false, cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
+ CHECK_EQ(false,
+ cv.WaitFor(&mutex, v8::base::TimeDelta::FromMicroseconds(n)));
cv.NotifyAll();
- CHECK_EQ(false, cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
+ CHECK_EQ(false,
+ cv.WaitFor(&mutex, v8::base::TimeDelta::FromMicroseconds(n)));
}
}
-class ThreadWithMutexAndConditionVariable V8_FINAL : public Thread {
+class ThreadWithMutexAndConditionVariable V8_FINAL : public v8::base::Thread {
public:
ThreadWithMutexAndConditionVariable()
: Thread("ThreadWithMutexAndConditionVariable"),
virtual ~ThreadWithMutexAndConditionVariable() {}
virtual void Run() V8_OVERRIDE {
- LockGuard<Mutex> lock_guard(&mutex_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
running_ = true;
cv_.NotifyOne();
while (running_) {
bool running_;
bool finished_;
- ConditionVariable cv_;
- Mutex mutex_;
+ v8::base::ConditionVariable cv_;
+ v8::base::Mutex mutex_;
};
ThreadWithMutexAndConditionVariable threads[kThreadCount];
for (int n = 0; n < kThreadCount; ++n) {
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&threads[n].mutex_);
CHECK(!threads[n].running_);
CHECK(!threads[n].finished_);
threads[n].Start();
}
for (int n = kThreadCount - 1; n >= 0; --n) {
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&threads[n].mutex_);
CHECK(threads[n].running_);
CHECK(!threads[n].finished_);
}
for (int n = 0; n < kThreadCount; ++n) {
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&threads[n].mutex_);
CHECK(threads[n].running_);
CHECK(!threads[n].finished_);
// Tell the nth thread to quit.
for (int n = kThreadCount - 1; n >= 0; --n) {
// Wait for nth thread to quit.
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&threads[n].mutex_);
while (!threads[n].finished_) {
threads[n].cv_.Wait(&threads[n].mutex_);
}
for (int n = 0; n < kThreadCount; ++n) {
threads[n].Join();
- LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&threads[n].mutex_);
CHECK(!threads[n].running_);
CHECK(threads[n].finished_);
}
}
-class ThreadWithSharedMutexAndConditionVariable V8_FINAL : public Thread {
+class ThreadWithSharedMutexAndConditionVariable V8_FINAL
+ : public v8::base::Thread {
public:
ThreadWithSharedMutexAndConditionVariable()
: Thread("ThreadWithSharedMutexAndConditionVariable"),
virtual ~ThreadWithSharedMutexAndConditionVariable() {}
virtual void Run() V8_OVERRIDE {
- LockGuard<Mutex> lock_guard(mutex_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(mutex_);
running_ = true;
cv_->NotifyAll();
while (running_) {
bool running_;
bool finished_;
- ConditionVariable* cv_;
- Mutex* mutex_;
+ v8::base::ConditionVariable* cv_;
+ v8::base::Mutex* mutex_;
};
TEST(MultipleThreadsWithSharedSeparateConditionVariables) {
static const int kThreadCount = 128;
ThreadWithSharedMutexAndConditionVariable threads[kThreadCount];
- ConditionVariable cv;
- Mutex mutex;
+ v8::base::ConditionVariable cv;
+ v8::base::Mutex mutex;
for (int n = 0; n < kThreadCount; ++n) {
threads[n].mutex_ = &mutex;
// Start all threads.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
for (int n = 0; n < kThreadCount; ++n) {
CHECK(!threads[n].running_);
CHECK(!threads[n].finished_);
// Wait for all threads to start.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
for (int n = kThreadCount - 1; n >= 0; --n) {
while (!threads[n].running_) {
cv.Wait(&mutex);
// Make sure that all threads are running.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
for (int n = 0; n < kThreadCount; ++n) {
CHECK(threads[n].running_);
CHECK(!threads[n].finished_);
// Tell all threads to quit.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
for (int n = kThreadCount - 1; n >= 0; --n) {
CHECK(threads[n].running_);
CHECK(!threads[n].finished_);
// Wait for all threads to quit.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
for (int n = 0; n < kThreadCount; ++n) {
while (!threads[n].finished_) {
cv.Wait(&mutex);
// Make sure all threads are finished.
{
- LockGuard<Mutex> lock_guard(&mutex);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
for (int n = kThreadCount - 1; n >= 0; --n) {
CHECK(!threads[n].running_);
CHECK(threads[n].finished_);
}
-class LoopIncrementThread V8_FINAL : public Thread {
+class LoopIncrementThread V8_FINAL : public v8::base::Thread {
public:
LoopIncrementThread(int rem,
int* counter,
int limit,
int thread_count,
- ConditionVariable* cv,
- Mutex* mutex)
+ v8::base::ConditionVariable* cv,
+ v8::base::Mutex* mutex)
: Thread("LoopIncrementThread"), rem_(rem), counter_(counter),
limit_(limit), thread_count_(thread_count), cv_(cv), mutex_(mutex) {
CHECK_LT(rem, thread_count);
virtual void Run() V8_OVERRIDE {
int last_count = -1;
while (true) {
- LockGuard<Mutex> lock_guard(mutex_);
+ v8::base::LockGuard<v8::base::Mutex> lock_guard(mutex_);
int count = *counter_;
while (count % thread_count_ != rem_ && count < limit_) {
cv_->Wait(mutex_);
int* counter_;
const int limit_;
const int thread_count_;
- ConditionVariable* cv_;
- Mutex* mutex_;
+ v8::base::ConditionVariable* cv_;
+ v8::base::Mutex* mutex_;
};
TEST(LoopIncrement) {
static const int kMaxThreadCount = 16;
- Mutex mutex;
- ConditionVariable cv;
+ v8::base::Mutex mutex;
+ v8::base::ConditionVariable cv;
for (int thread_count = 1; thread_count < kMaxThreadCount; ++thread_count) {
int limit = thread_count * 100;
int counter = 0;
// Setup the threads.
- Thread** threads = new Thread*[thread_count];
+ v8::base::Thread** threads = new v8::base::Thread*[thread_count];
for (int n = 0; n < thread_count; ++n) {
threads[n] = new LoopIncrementThread(
n, &counter, limit, thread_count, &cv, &mutex);
#include "src/v8.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
TEST(NonStrDecimalLiteral) {
UnicodeCache uc;
- CHECK(std::isnan(StringToDouble(&uc, " ", NO_FLAGS, OS::nan_value())));
- CHECK(std::isnan(StringToDouble(&uc, "", NO_FLAGS, OS::nan_value())));
- CHECK(std::isnan(StringToDouble(&uc, " ", NO_FLAGS, OS::nan_value())));
+ CHECK(std::isnan(
+ StringToDouble(&uc, " ", NO_FLAGS, v8::base::OS::nan_value())));
+ CHECK(
+ std::isnan(StringToDouble(&uc, "", NO_FLAGS, v8::base::OS::nan_value())));
+ CHECK(std::isnan(
+ StringToDouble(&uc, " ", NO_FLAGS, v8::base::OS::nan_value())));
CHECK_EQ(0.0, StringToDouble(&uc, "", NO_FLAGS));
CHECK_EQ(0.0, StringToDouble(&uc, " ", NO_FLAGS));
}
#include "src/v8.h"
-#include "src/cpu.h"
+#include "src/base/cpu.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
TEST(RequiredFeaturesX64) {
// Test for the features required by every x86 CPU in compat/legacy mode.
- CPU cpu;
+ v8::base::CPU cpu;
CHECK(cpu.has_sahf());
}
#include "src/v8.h"
#include "include/v8-profiler.h"
+#include "src/base/platform/platform.h"
#include "src/cpu-profiler-inl.h"
-#include "src/platform.h"
#include "src/smart-pointers.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
using i::ProfilerEventsProcessor;
using i::ScopedVector;
using i::SmartPointer;
-using i::TimeDelta;
using i::Vector;
CpuProfilesCollection profiles(isolate->heap());
ProfileGenerator generator(&profiles);
SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, TimeDelta::FromMicroseconds(100)));
+ &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
processor->StopSynchronously();
}
profiles->StartProfiling("", false);
ProfileGenerator generator(profiles);
SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, TimeDelta::FromMicroseconds(100)));
+ &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
CpuProfiler profiler(isolate, profiles, &generator, processor.get());
profiles->StartProfiling("", false);
ProfileGenerator generator(profiles);
SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, TimeDelta::FromMicroseconds(100)));
+ &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
CpuProfiler profiler(isolate, profiles, &generator, processor.get());
profiles->StartProfiling("", false);
ProfileGenerator generator(profiles);
SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, TimeDelta::FromMicroseconds(100)));
+ &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
CpuProfiler profiler(isolate, profiles, &generator, processor.get());
private:
void Wait() {
if (is_warming_up_) return;
- double start = i::OS::TimeCurrentMillis();
+ double start = v8::base::OS::TimeCurrentMillis();
double duration = 0;
while (duration < min_duration_ms_) {
- i::OS::Sleep(1);
- duration = i::OS::TimeCurrentMillis() - start;
+ v8::base::OS::Sleep(1);
+ duration = v8::base::OS::TimeCurrentMillis() - start;
}
}
#include "src/v8.h"
-#include "src/cpu.h"
+#include "src/base/cpu.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
TEST(RequiredFeaturesX64) {
// Test for the features required by every x64 CPU.
- CPU cpu;
+ v8::base::CPU cpu;
CHECK(cpu.has_fpu());
CHECK(cpu.has_cmov());
CHECK(cpu.has_mmx());
#include "src/v8.h"
-#include "src/cpu.h"
+#include "src/base/cpu.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
TEST(RequiredFeaturesX64) {
// Test for the features required by every x86 CPU in compat/legacy mode.
- CPU cpu;
+ v8::base::CPU cpu;
CHECK(cpu.has_sahf());
}
#include "src/v8.h"
-#include "src/cpu.h"
+#include "src/base/cpu.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
TEST(FeatureImplications) {
// Test for features implied by other features.
- CPU cpu;
+ v8::base::CPU cpu;
// ia32 and x64 features
CHECK(!cpu.has_sse() || cpu.has_mmx());
TEST(NumberOfProcessorsOnline) {
- CHECK_GT(OS::NumberOfProcessorsOnline(), 0);
+ CHECK_GT(v8::base::OS::NumberOfProcessorsOnline(), 0);
}
#include "src/v8.h"
#include "src/api.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/frames.h"
-#include "src/platform.h"
-#include "src/platform/condition-variable.h"
#include "src/stub-cache.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
-using ::v8::internal::Mutex;
-using ::v8::internal::LockGuard;
-using ::v8::internal::ConditionVariable;
-using ::v8::internal::Semaphore;
+using ::v8::base::Mutex;
+using ::v8::base::LockGuard;
+using ::v8::base::ConditionVariable;
+using ::v8::base::OS;
+using ::v8::base::Semaphore;
using ::v8::internal::EmbeddedVector;
using ::v8::internal::Object;
-using ::v8::internal::OS;
using ::v8::internal::Handle;
using ::v8::internal::Heap;
using ::v8::internal::JSGlobalProxy;
ThreadBarrier<2> barrier_3;
ThreadBarrier<2> barrier_4;
ThreadBarrier<2> barrier_5;
- v8::internal::Semaphore semaphore_1;
- v8::internal::Semaphore semaphore_2;
+ v8::base::Semaphore semaphore_1;
+ v8::base::Semaphore semaphore_2;
};
// This is the debugger thread, that executes no v8 calls except
// placing JSON debugger commands in the queue.
-class MessageQueueDebuggerThread : public v8::internal::Thread {
+class MessageQueueDebuggerThread : public v8::base::Thread {
public:
MessageQueueDebuggerThread()
: Thread("MessageQueueDebuggerThread") { }
Barriers threaded_debugging_barriers;
-class V8Thread : public v8::internal::Thread {
+class V8Thread : public v8::base::Thread {
public:
V8Thread() : Thread("V8Thread") { }
void Run();
};
-class DebuggerThread : public v8::internal::Thread {
+class DebuggerThread : public v8::base::Thread {
public:
DebuggerThread() : Thread("DebuggerThread") { }
void Run();
* breakpoint is hit when enabled, and missed when disabled.
*/
-class BreakpointsV8Thread : public v8::internal::Thread {
+class BreakpointsV8Thread : public v8::base::Thread {
public:
BreakpointsV8Thread() : Thread("BreakpointsV8Thread") { }
void Run();
};
-class BreakpointsDebuggerThread : public v8::internal::Thread {
+class BreakpointsDebuggerThread : public v8::base::Thread {
public:
explicit BreakpointsDebuggerThread(bool global_evaluate)
: Thread("BreakpointsDebuggerThread"),
}
-class SendCommandThread : public v8::internal::Thread {
+class SendCommandThread : public v8::base::Thread {
public:
explicit SendCommandThread(v8::Isolate* isolate)
: Thread("SendCommandThread"),
static void ProcessDebugMessages(v8::Isolate* isolate, void* data) {
v8::Debug::ProcessDebugMessages();
- reinterpret_cast<v8::internal::Semaphore*>(data)->Signal();
+ reinterpret_cast<v8::base::Semaphore*>(data)->Signal();
}
virtual void Run() {
}
private:
- v8::internal::Semaphore semaphore_;
+ v8::base::Semaphore semaphore_;
v8::Isolate* isolate_;
};
}
-v8::internal::Semaphore terminate_requested_semaphore(0);
-v8::internal::Semaphore terminate_fired_semaphore(0);
+v8::base::Semaphore terminate_requested_semaphore(0);
+v8::base::Semaphore terminate_fired_semaphore(0);
bool terminate_already_fired = false;
if (event_details.GetEvent() != v8::Break || terminate_already_fired) return;
terminate_requested_semaphore.Signal();
// Wait for at most 2 seconds for the terminate request.
- CHECK(terminate_fired_semaphore.WaitFor(i::TimeDelta::FromSeconds(2)));
+ CHECK(terminate_fired_semaphore.WaitFor(v8::base::TimeDelta::FromSeconds(2)));
terminate_already_fired = true;
v8::internal::Isolate* isolate =
v8::Utils::OpenHandle(*event_details.GetEventContext())->GetIsolate();
}
-class TerminationThread : public v8::internal::Thread {
+class TerminationThread : public v8::base::Thread {
public:
explicit TerminationThread(v8::Isolate* isolate) : Thread("terminator"),
isolate_(isolate) { }
#include "src/v8.h"
#include "src/api.h"
+#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/isolate.h"
-#include "src/platform.h"
#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
+using ::v8::base::OS;
using ::v8::internal::Deoptimizer;
using ::v8::internal::EmbeddedVector;
using ::v8::internal::Handle;
using ::v8::internal::Isolate;
using ::v8::internal::JSFunction;
-using ::v8::internal::OS;
using ::v8::internal::Object;
// Size of temp buffer for formatting small strings.
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/diy-fp.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/diy-fp.h"
#include "src/double.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
TEST(IsSpecial) {
CHECK(Double(V8_INFINITY).IsSpecial());
CHECK(Double(-V8_INFINITY).IsSpecial());
- CHECK(Double(OS::nan_value()).IsSpecial());
+ CHECK(Double(v8::base::OS::nan_value()).IsSpecial());
uint64_t bits = V8_2PART_UINT64_C(0xFFF12345, 00000000);
CHECK(Double(bits).IsSpecial());
// Denormals are not special:
TEST(IsInfinite) {
CHECK(Double(V8_INFINITY).IsInfinite());
CHECK(Double(-V8_INFINITY).IsInfinite());
- CHECK(!Double(OS::nan_value()).IsInfinite());
+ CHECK(!Double(v8::base::OS::nan_value()).IsInfinite());
CHECK(!Double(0.0).IsInfinite());
CHECK(!Double(-0.0).IsInfinite());
CHECK(!Double(1.0).IsInfinite());
#include "src/dtoa.h"
+#include "src/base/platform/platform.h"
#include "src/double.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "test/cctest/gay-fixed.h"
#include "test/cctest/gay-precision.h"
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/diy-fp.h"
#include "src/double.h"
#include "src/fast-dtoa.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "test/cctest/gay-precision.h"
#include "test/cctest/gay-shortest.h"
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/double.h"
#include "src/fixed-dtoa.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
#include "test/cctest/gay-fixed.h"
#include "src/v8.h"
#include "src/api.h"
+#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
#include "src/parser.h"
-#include "src/platform.h"
#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/v8.h"
#include "src/api.h"
+#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
#include "src/parser.h"
-#include "src/platform.h"
#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
~TaskCounter() { CHECK_EQ(0, counter_); }
int GetCount() const {
- LockGuard<Mutex> guard(&lock_);
+ v8::base::LockGuard<v8::base::Mutex> guard(&lock_);
return counter_;
}
void Inc() {
- LockGuard<Mutex> guard(&lock_);
+ v8::base::LockGuard<v8::base::Mutex> guard(&lock_);
++counter_;
}
void Dec() {
- LockGuard<Mutex> guard(&lock_);
+ v8::base::LockGuard<v8::base::Mutex> guard(&lock_);
--counter_;
}
private:
- mutable Mutex lock_;
+ mutable v8::base::Mutex lock_;
int counter_;
DISALLOW_COPY_AND_ASSIGN(TaskCounter);
};
-class TestWorkerThread : public Thread {
+class TestWorkerThread : public v8::base::Thread {
public:
explicit TestWorkerThread(v8::Task* task)
: Thread("libplatform TestWorkerThread"), semaphore_(0), task_(task) {}
}
private:
- Semaphore semaphore_;
+ v8::base::Semaphore semaphore_;
v8::Task* task_;
DISALLOW_COPY_AND_ASSIGN(TestWorkerThread);
#include "src/v8.h"
#include "src/api.h"
+#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
#include "src/parser.h"
-#include "src/platform.h"
#include "src/smart-pointers.h"
#include "src/snapshot.h"
#include "src/unicode-inl.h"
// Migrating an isolate
-class KangarooThread : public v8::internal::Thread {
+class KangarooThread : public v8::base::Thread {
public:
KangarooThread(v8::Isolate* isolate, v8::Handle<v8::Context> context)
: Thread("KangarooThread"),
virtual void Run() = 0;
private:
- class ThreadWithSemaphore : public i::Thread {
+ class ThreadWithSemaphore : public v8::base::Thread {
public:
explicit ThreadWithSemaphore(JoinableThread* joinable_thread)
: Thread(joinable_thread->name_),
};
const char* name_;
- i::Semaphore semaphore_;
+ v8::base::Semaphore semaphore_;
ThreadWithSemaphore thread_;
friend class ThreadWithSemaphore;
size_t act_size;
// Allocate two blocks to copy data between.
- byte* src_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+ byte* src_buffer =
+ static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(src_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
- byte* dest_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+ byte* dest_buffer =
+ static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(dest_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
#include "src/v8.h"
#include "test/cctest/cctest.h"
+#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/serialize.h"
using namespace v8::internal;
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t act_size;
// Allocate two blocks to copy data between.
- byte* src_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+ byte* src_buffer =
+ static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(src_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
- byte* dest_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+ byte* dest_buffer =
+ static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
CHECK(dest_buffer);
CHECK(act_size >= static_cast<size_t>(data_size));
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/serialize.h"
#include "test/cctest/cctest.h"
using i::Isolate;
using i::Label;
using i::MacroAssembler;
-using i::OS;
using i::Operand;
using i::RelocInfo;
using i::Representation;
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 3, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 4, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 5,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 5, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 7,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 7, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 5,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 5, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 3, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
i::V8::Initialize(NULL);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 4, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
#include "src/v8.h"
#include "test/cctest/cctest.h"
+#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/platform.h"
#include "src/serialize.h"
using namespace v8::internal;
// Allocate an executable page of memory.
size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
#include "src/v8.h"
-#include "src/platform/mutex.h"
+#include "src/base/platform/mutex.h"
#include "test/cctest/cctest.h"
using namespace ::v8::internal;
TEST(LockGuardMutex) {
- Mutex mutex;
- { LockGuard<Mutex> lock_guard(&mutex);
+ v8::base::Mutex mutex;
+ { v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
}
- { LockGuard<Mutex> lock_guard(&mutex);
+ { v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex);
}
}
TEST(LockGuardRecursiveMutex) {
- RecursiveMutex recursive_mutex;
- { LockGuard<RecursiveMutex> lock_guard(&recursive_mutex);
+ v8::base::RecursiveMutex recursive_mutex;
+ { v8::base::LockGuard<v8::base::RecursiveMutex> lock_guard(&recursive_mutex);
}
- { LockGuard<RecursiveMutex> lock_guard1(&recursive_mutex);
- LockGuard<RecursiveMutex> lock_guard2(&recursive_mutex);
+ { v8::base::LockGuard<v8::base::RecursiveMutex> lock_guard1(&recursive_mutex);
+ v8::base::LockGuard<v8::base::RecursiveMutex> lock_guard2(&recursive_mutex);
}
}
TEST(LockGuardLazyMutex) {
- LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
- { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer());
+ v8::base::LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
+ { v8::base::LockGuard<v8::base::Mutex> lock_guard(lazy_mutex.Pointer());
}
- { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer());
+ { v8::base::LockGuard<v8::base::Mutex> lock_guard(lazy_mutex.Pointer());
}
}
TEST(LockGuardLazyRecursiveMutex) {
- LazyRecursiveMutex lazy_recursive_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
- { LockGuard<RecursiveMutex> lock_guard(lazy_recursive_mutex.Pointer());
+ v8::base::LazyRecursiveMutex lazy_recursive_mutex =
+ LAZY_RECURSIVE_MUTEX_INITIALIZER;
+ {
+ v8::base::LockGuard<v8::base::RecursiveMutex> lock_guard(
+ lazy_recursive_mutex.Pointer());
}
- { LockGuard<RecursiveMutex> lock_guard1(lazy_recursive_mutex.Pointer());
- LockGuard<RecursiveMutex> lock_guard2(lazy_recursive_mutex.Pointer());
+ {
+ v8::base::LockGuard<v8::base::RecursiveMutex> lock_guard1(
+ lazy_recursive_mutex.Pointer());
+ v8::base::LockGuard<v8::base::RecursiveMutex> lock_guard2(
+ lazy_recursive_mutex.Pointer());
}
}
TEST(MultipleMutexes) {
- Mutex mutex1;
- Mutex mutex2;
- Mutex mutex3;
+ v8::base::Mutex mutex1;
+ v8::base::Mutex mutex2;
+ v8::base::Mutex mutex3;
// Order 1
mutex1.Lock();
mutex2.Lock();
TEST(MultipleRecursiveMutexes) {
- RecursiveMutex recursive_mutex1;
- RecursiveMutex recursive_mutex2;
+ v8::base::RecursiveMutex recursive_mutex1;
+ v8::base::RecursiveMutex recursive_mutex2;
// Order 1
recursive_mutex1.Lock();
recursive_mutex2.Lock();
isolate, exception_handle, "message").ToHandleChecked());
if (result == kSuccess) {
- i::OS::Print(
+ v8::base::OS::Print(
"Parser failed on:\n"
"\t%s\n"
"with error:\n"
}
if (!data.has_error()) {
- i::OS::Print(
+ v8::base::OS::Print(
"Parser failed on:\n"
"\t%s\n"
"with error:\n"
// Check that preparser and parser produce the same error.
i::Handle<i::String> preparser_message = FormatMessage(&data);
if (!i::String::Equals(message_string, preparser_message)) {
- i::OS::Print(
+ v8::base::OS::Print(
"Expected parser and preparser to produce the same error on:\n"
"\t%s\n"
"However, found the following error messages\n"
CHECK(false);
}
} else if (data.has_error()) {
- i::OS::Print(
+ v8::base::OS::Print(
"Preparser failed on:\n"
"\t%s\n"
"with error:\n"
source->ToCString().get(), FormatMessage(&data)->ToCString().get());
CHECK(false);
} else if (result == kError) {
- i::OS::Print(
+ v8::base::OS::Print(
"Expected error on:\n"
"\t%s\n"
"However, parser and preparser succeeded",
CHECK(!data->HasError());
if (data->function_count() != test_cases[i].functions) {
- i::OS::Print(
+ v8::base::OS::Print(
"Expected preparse data for program:\n"
"\t%s\n"
"to contain %d functions, however, received %d functions.\n",
#include "src/v8.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
using namespace ::v8::internal;
TEST(VirtualMemory) {
- VirtualMemory* vm = new VirtualMemory(1 * MB);
+ v8::base::VirtualMemory* vm = new v8::base::VirtualMemory(1 * MB);
CHECK(vm->IsReserved());
void* block_addr = vm->address();
size_t block_size = 4 * KB;
TEST(GetCurrentProcessId) {
- CHECK_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
+ CHECK_EQ(static_cast<int>(getpid()), v8::base::OS::GetCurrentProcessId());
}
#include "src/v8.h"
-#include "src/checks.h"
-#include "src/platform.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
-using v8::internal::Thread;
+using v8::base::Thread;
static const int kValueCount = 128;
#include "src/v8.h"
+#include "src/base/platform/platform.h"
#include "src/base/win32-headers.h"
-#include "src/platform.h"
#include "test/cctest/cctest.h"
using namespace ::v8::internal;
TEST(VirtualMemory) {
- VirtualMemory* vm = new VirtualMemory(1 * MB);
+ v8::base::VirtualMemory* vm = new v8::base::VirtualMemory(1 * MB);
CHECK(vm->IsReserved());
void* block_addr = vm->address();
size_t block_size = 4 * KB;
TEST(GetCurrentProcessId) {
CHECK_EQ(static_cast<int>(::GetCurrentProcessId()),
- OS::GetCurrentProcessId());
+ v8::base::OS::GetCurrentProcessId());
}
#include <stdlib.h>
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
using namespace ::v8::internal;
v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
v8::Local<v8::Value> result = foo->Call(global_object, 0, NULL);
- CHECK_EQ(0, result->Int32Value() % OS::ActivationFrameAlignment());
+ CHECK_EQ(0, result->Int32Value() % v8::base::OS::ActivationFrameAlignment());
}
#undef GET_STACK_POINTERS
#include "src/v8.h"
#include "test/cctest/cctest.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/isolate-inl.h"
-#include "src/utils/random-number-generator.h"
using namespace v8::internal;
TEST(NextIntWithMaxValue) {
for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
- RandomNumberGenerator rng(kRandomSeeds[n]);
+ v8::base::RandomNumberGenerator rng(kRandomSeeds[n]);
for (int max = 1; max <= kMaxRuns; ++max) {
int n = rng.NextInt(max);
CHECK_LE(0, n);
TEST(NextBoolReturnsBooleanValue) {
for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
- RandomNumberGenerator rng(kRandomSeeds[n]);
+ v8::base::RandomNumberGenerator rng(kRandomSeeds[n]);
for (int k = 0; k < kMaxRuns; ++k) {
bool b = rng.NextBool();
CHECK(b == false || b == true);
TEST(NextDoubleRange) {
for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
- RandomNumberGenerator rng(kRandomSeeds[n]);
+ v8::base::RandomNumberGenerator rng(kRandomSeeds[n]);
for (int k = 0; k < kMaxRuns; ++k) {
double d = rng.NextDouble();
CHECK_LE(0.0, d);
for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
FLAG_random_seed = kRandomSeeds[n];
v8::Isolate* i = v8::Isolate::New();
- RandomNumberGenerator& rng1 =
+ v8::base::RandomNumberGenerator& rng1 =
*reinterpret_cast<Isolate*>(i)->random_number_generator();
- RandomNumberGenerator rng2(kRandomSeeds[n]);
+ v8::base::RandomNumberGenerator rng2(kRandomSeeds[n]);
for (int k = 1; k <= kMaxRuns; ++k) {
int64_t i1, i2;
rng1.NextBytes(&i1, sizeof(i1));
#include "src/v8.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
using namespace ::v8::internal;
-class WaitAndSignalThread V8_FINAL : public Thread {
+class WaitAndSignalThread V8_FINAL : public v8::base::Thread {
public:
- explicit WaitAndSignalThread(Semaphore* semaphore)
+ explicit WaitAndSignalThread(v8::base::Semaphore* semaphore)
: Thread("WaitAndSignalThread"), semaphore_(semaphore) {}
virtual ~WaitAndSignalThread() {}
virtual void Run() V8_OVERRIDE {
for (int n = 0; n < 1000; ++n) {
semaphore_->Wait();
- bool result = semaphore_->WaitFor(TimeDelta::FromMicroseconds(1));
+ bool result =
+ semaphore_->WaitFor(v8::base::TimeDelta::FromMicroseconds(1));
ASSERT(!result);
USE(result);
semaphore_->Signal();
}
private:
- Semaphore* semaphore_;
+ v8::base::Semaphore* semaphore_;
};
TEST(WaitAndSignal) {
- Semaphore semaphore(0);
+ v8::base::Semaphore semaphore(0);
WaitAndSignalThread t1(&semaphore);
WaitAndSignalThread t2(&semaphore);
semaphore.Wait();
- bool result = semaphore.WaitFor(TimeDelta::FromMicroseconds(1));
+ bool result = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(1));
ASSERT(!result);
USE(result);
}
TEST(WaitFor) {
bool ok;
- Semaphore semaphore(0);
+ v8::base::Semaphore semaphore(0);
// Semaphore not signalled - timeout.
- ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(0));
+ ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(0));
CHECK(!ok);
- ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(100));
+ ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(100));
CHECK(!ok);
- ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(1000));
+ ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(1000));
CHECK(!ok);
// Semaphore signalled - no timeout.
semaphore.Signal();
- ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(0));
+ ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(0));
CHECK(ok);
semaphore.Signal();
- ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(100));
+ ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(100));
CHECK(ok);
semaphore.Signal();
- ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(1000));
+ ok = semaphore.WaitFor(v8::base::TimeDelta::FromMicroseconds(1000));
CHECK(ok);
}
static char buffer[kBufferSize];
static const int kDataSize = kBufferSize * kAlphabetSize * 10;
-static Semaphore free_space(kBufferSize);
-static Semaphore used_space(0);
+static v8::base::Semaphore free_space(kBufferSize);
+static v8::base::Semaphore used_space(0);
-class ProducerThread V8_FINAL : public Thread {
+class ProducerThread V8_FINAL : public v8::base::Thread {
public:
ProducerThread() : Thread("ProducerThread") {}
virtual ~ProducerThread() {}
};
-class ConsumerThread V8_FINAL : public Thread {
+class ConsumerThread V8_FINAL : public v8::base::Thread {
public:
ConsumerThread() : Thread("ConsumerThread") {}
virtual ~ConsumerThread() {}
class FileByteSink : public SnapshotByteSink {
public:
explicit FileByteSink(const char* snapshot_file) {
- fp_ = OS::FOpen(snapshot_file, "wb");
+ fp_ = v8::base::OS::FOpen(snapshot_file, "wb");
file_name_ = snapshot_file;
if (fp_ == NULL) {
PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
int file_name_length = StrLength(file_name_) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
SNPrintF(name, "%s.size", file_name_);
- FILE* fp = OS::FOpen(name.start(), "w");
+ FILE* fp = v8::base::OS::FOpen(name.start(), "w");
name.Dispose();
fprintf(fp, "new %d\n", new_space_used);
fprintf(fp, "pointer %d\n", pointer_space_used);
int file_name_length = StrLength(file_name) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
SNPrintF(name, "%s.size", file_name);
- FILE* fp = OS::FOpen(name.start(), "r");
+ FILE* fp = v8::base::OS::FOpen(name.start(), "r");
name.Dispose();
int new_size, pointer_size, data_size, code_size, map_size, cell_size,
property_cell_size;
executable,
NULL);
size_t alignment = code_range != NULL && code_range->valid() ?
- MemoryChunk::kAlignment : OS::CommitPageSize();
- size_t reserved_size = ((executable == EXECUTABLE))
- ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
- alignment)
- : RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
+ MemoryChunk::kAlignment : v8::base::OS::CommitPageSize();
+ size_t reserved_size =
+ ((executable == EXECUTABLE))
+ ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
+ alignment)
+ : RoundUp(header_size + reserve_area_size,
+ v8::base::OS::CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() < memory_chunk->address() +
memory_chunk->size());
#include "src/v8.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/bignum.h"
#include "src/diy-fp.h"
#include "src/double.h"
#include "src/strtod.h"
-#include "src/utils/random-number-generator.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
static const int kLargeStrtodRandomCount = 2;
TEST(RandomStrtod) {
- RandomNumberGenerator rng;
+ v8::base::RandomNumberGenerator rng;
char buffer[kBufferSize];
for (int length = 1; length < 15; length++) {
for (int i = 0; i < kShortStrtodRandomCount; ++i) {
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
-v8::internal::Semaphore* semaphore = NULL;
+v8::base::Semaphore* semaphore = NULL;
void Signal(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
-class TerminatorThread : public v8::internal::Thread {
+class TerminatorThread : public v8::base::Thread {
public:
explicit TerminatorThread(i::Isolate* isolate)
: Thread("TerminatorThread"),
// Test that a single thread of JavaScript execution can be terminated
// from the side by another thread.
TEST(TerminateOnlyV8ThreadFromOtherThread) {
- semaphore = new v8::internal::Semaphore(0);
+ semaphore = new v8::base::Semaphore(0);
TerminatorThread thread(CcTest::i_isolate());
thread.Start();
TEST(TerminateFromOtherThreadWhileMicrotaskRunning) {
- semaphore = new v8::internal::Semaphore(0);
+ semaphore = new v8::base::Semaphore(0);
TerminatorThread thread(CcTest::i_isolate());
thread.Start();
#include "src/v8.h"
#include "test/cctest/cctest.h"
+#include "src/base/platform/platform.h"
#include "src/isolate.h"
-#include "src/platform.h"
enum Turn {
static Turn turn = FILL_CACHE;
-class ThreadA : public v8::internal::Thread {
+class ThreadA : public v8::base::Thread {
public:
ThreadA() : Thread("ThreadA") { }
void Run() {
};
-class ThreadB : public v8::internal::Thread {
+class ThreadB : public v8::base::Thread {
public:
ThreadB() : Thread("ThreadB") { }
void Run() {
CHECK_EQ(DONE, turn);
}
-class ThreadIdValidationThread : public v8::internal::Thread {
+class ThreadIdValidationThread : public v8::base::Thread {
public:
- ThreadIdValidationThread(i::Thread* thread_to_start,
+ ThreadIdValidationThread(v8::base::Thread* thread_to_start,
i::List<i::ThreadId>* refs,
unsigned int thread_no,
- i::Semaphore* semaphore)
+ v8::base::Semaphore* semaphore)
: Thread("ThreadRefValidationThread"),
refs_(refs), thread_no_(thread_no), thread_to_start_(thread_to_start),
semaphore_(semaphore) {
private:
i::List<i::ThreadId>* refs_;
int thread_no_;
- i::Thread* thread_to_start_;
- i::Semaphore* semaphore_;
+ v8::base::Thread* thread_to_start_;
+ v8::base::Semaphore* semaphore_;
};
const int kNThreads = 100;
i::List<ThreadIdValidationThread*> threads(kNThreads);
i::List<i::ThreadId> refs(kNThreads);
- i::Semaphore semaphore(0);
+ v8::base::Semaphore semaphore(0);
ThreadIdValidationThread* prev = NULL;
for (int i = kNThreads - 1; i >= 0; i--) {
ThreadIdValidationThread* newThread =
}
-class ThreadC : public v8::internal::Thread {
+class ThreadC : public v8::base::Thread {
public:
ThreadC() : Thread("ThreadC") { }
void Run() {
#include "src/base/win32-headers.h"
#endif
+using namespace v8::base;
using namespace v8::internal;
#include <vector>
+#include "src/base/utils/random-number-generator.h"
#include "src/hydrogen-types.h"
#include "src/types.h"
-#include "src/utils/random-number-generator.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
private:
Region* region_;
- RandomNumberGenerator rng_;
+ v8::base::RandomNumberGenerator rng_;
};
CHECK(T.Constant(fac->NewNumber(-10.1))->Is(T.OtherNumber));
CHECK(T.Constant(fac->NewNumber(10e60))->Is(T.OtherNumber));
CHECK(T.Constant(fac->NewNumber(-1.0*0.0))->Is(T.MinusZero));
- CHECK(T.Constant(fac->NewNumber(OS::nan_value()))->Is(T.NaN));
+ CHECK(T.Constant(fac->NewNumber(v8::base::OS::nan_value()))->Is(T.NaN));
CHECK(T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.OtherNumber));
CHECK(T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.OtherNumber));
}
#include "src/v8.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
#include "src/utils-inl.h"
#include "test/cctest/cctest.h"
CHECK_EQ(INT_MAX, FastD2IChecked(1.0e100));
CHECK_EQ(INT_MIN, FastD2IChecked(-1.0e100));
- CHECK_EQ(INT_MIN, FastD2IChecked(OS::nan_value()));
+ CHECK_EQ(INT_MIN, FastD2IChecked(v8::base::OS::nan_value()));
}
'../../src/cpu-profiler-inl.h',
'../../src/cpu-profiler.cc',
'../../src/cpu-profiler.h',
- '../../src/cpu.cc',
- '../../src/cpu.h',
'../../src/data-flow.cc',
'../../src/data-flow.h',
'../../src/date.cc',
'../../src/ostreams.h',
'../../src/parser.cc',
'../../src/parser.h',
- '../../src/platform/elapsed-timer.h',
- '../../src/platform/time.cc',
- '../../src/platform/time.h',
- '../../src/platform.h',
- '../../src/platform/condition-variable.cc',
- '../../src/platform/condition-variable.h',
- '../../src/platform/mutex.cc',
- '../../src/platform/mutex.h',
- '../../src/platform/semaphore.cc',
- '../../src/platform/semaphore.h',
'../../src/preparse-data-format.h',
'../../src/preparse-data.cc',
'../../src/preparse-data.h',
'../../src/utils-inl.h',
'../../src/utils.cc',
'../../src/utils.h',
- '../../src/utils/random-number-generator.cc',
- '../../src/utils/random-number-generator.h',
'../../src/v8.cc',
'../../src/v8.h',
- '../../src/v8checks.h',
'../../src/v8memory.h',
'../../src/v8threads.cc',
'../../src/v8threads.h',
]
}],
],
+ },
+ }
+ ],
+ ['OS=="win"', {
+ 'variables': {
+ 'gyp_generators': '<!(echo $GYP_GENERATORS)',
+ },
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ 'V8_SHARED',
+ ],
+ }],
+ ['v8_postmortem_support=="true"', {
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+ ]
+ }],
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ]
+ }, { # v8_enable_i18n_support==0
+ 'sources!': [
+ '../../src/i18n.cc',
+ '../../src/i18n.h',
+ ],
+ }],
+ ['OS=="win" and v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icudata',
+ ],
+ }],
+ ['icu_use_data_file_flag==1', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
+ }, { # else icu_use_data_file_flag !=1
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
+ }, {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
+ }],
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_libbase',
+ 'type': 'static_library',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '../..',
+ ],
+ 'sources': [
+ '../../src/base/atomicops.h',
+ '../../src/base/atomicops_internals_arm64_gcc.h',
+ '../../src/base/atomicops_internals_arm_gcc.h',
+ '../../src/base/atomicops_internals_atomicword_compat.h',
+ '../../src/base/atomicops_internals_mac.h',
+ '../../src/base/atomicops_internals_mips_gcc.h',
+ '../../src/base/atomicops_internals_tsan.h',
+ '../../src/base/atomicops_internals_x86_gcc.cc',
+ '../../src/base/atomicops_internals_x86_gcc.h',
+ '../../src/base/atomicops_internals_x86_msvc.h',
+ '../../src/base/build_config.h',
+ '../../src/base/cpu.cc',
+ '../../src/base/cpu.h',
+ '../../src/base/lazy-instance.h',
+ '../../src/base/logging.cc',
+ '../../src/base/logging.h',
+ '../../src/base/macros.h',
+ '../../src/base/once.cc',
+ '../../src/base/once.h',
+ '../../src/base/platform/elapsed-timer.h',
+ '../../src/base/platform/time.cc',
+ '../../src/base/platform/time.h',
+ '../../src/base/platform/condition-variable.cc',
+ '../../src/base/platform/condition-variable.h',
+ '../../src/base/platform/mutex.cc',
+ '../../src/base/platform/mutex.h',
+ '../../src/base/platform/platform.h',
+ '../../src/base/platform/semaphore.cc',
+ '../../src/base/platform/semaphore.h',
+ '../../src/base/safe_conversions.h',
+ '../../src/base/safe_conversions_impl.h',
+ '../../src/base/safe_math.h',
+ '../../src/base/safe_math_impl.h',
+ '../../src/base/utils/random-number-generator.cc',
+ '../../src/base/utils/random-number-generator.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ 'V8_SHARED',
+ ],
+ }],
+ ['OS=="linux"', {
+ 'link_settings': {
'libraries': [
'-lrt'
]
},
- 'sources': [ ### gcmole(os:linux) ###
- '../../src/platform-linux.cc',
- '../../src/platform-posix.cc'
+ 'sources': [
+ '../../src/base/platform/platform-linux.cc',
+ '../../src/base/platform/platform-posix.cc'
],
}
],
['OS=="android"', {
'sources': [
- '../../src/platform-posix.cc'
+ '../../src/base/platform/platform-posix.cc'
],
'conditions': [
['host_os=="mac"', {
'target_conditions': [
['_toolset=="host"', {
'sources': [
- '../../src/platform-macos.cc'
+ '../../src/base/platform/platform-macos.cc'
]
}, {
'sources': [
- '../../src/platform-linux.cc'
+ '../../src/base/platform/platform-linux.cc'
]
}],
],
}],
],
'sources': [
- '../../src/platform-linux.cc'
+ '../../src/base/platform/platform-linux.cc'
]
}],
],
],
},
'sources': [
- '../../src/platform-posix.cc',
+ '../../src/base/platform/platform-posix.cc',
+ '../../src/base/qnx-math.h',
],
'target_conditions': [
['_toolset=="host" and host_os=="linux"', {
'sources': [
- '../../src/platform-linux.cc'
+ '../../src/base/platform/platform-linux.cc'
],
}],
['_toolset=="host" and host_os=="mac"', {
'sources': [
- '../../src/platform-macos.cc'
+ '../../src/base/platform/platform-macos.cc'
],
}],
['_toolset=="target"', {
'sources': [
- '../../src/platform-qnx.cc'
+ '../../src/base/platform/platform-qnx.cc'
],
}],
],
'-L/usr/local/lib -lexecinfo',
]},
'sources': [
- '../../src/platform-freebsd.cc',
- '../../src/platform-posix.cc'
+ '../../src/base/platform/platform-freebsd.cc',
+ '../../src/base/platform/platform-posix.cc'
],
}
],
'-L/usr/local/lib -lexecinfo',
]},
'sources': [
- '../../src/platform-openbsd.cc',
- '../../src/platform-posix.cc'
+ '../../src/base/platform/platform-openbsd.cc',
+ '../../src/base/platform/platform-posix.cc'
],
}
],
'-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
]},
'sources': [
- '../../src/platform-openbsd.cc',
- '../../src/platform-posix.cc'
+ '../../src/base/platform/platform-openbsd.cc',
+ '../../src/base/platform/platform-posix.cc'
],
}
],
'-lnsl',
]},
'sources': [
- '../../src/platform-solaris.cc',
- '../../src/platform-posix.cc'
+ '../../src/base/platform/platform-solaris.cc',
+ '../../src/base/platform/platform-posix.cc'
],
}
],
['OS=="mac"', {
'sources': [
- '../../src/platform-macos.cc',
- '../../src/platform-posix.cc'
+ '../../src/base/platform/platform-macos.cc',
+ '../../src/base/platform/platform-posix.cc'
]},
],
['OS=="win"', {
'conditions': [
['build_env=="Cygwin"', {
'sources': [
- '../../src/platform-cygwin.cc',
- '../../src/platform-posix.cc'
+ '../../src/base/platform/platform-cygwin.cc',
+ '../../src/base/platform/platform-posix.cc'
],
}, {
'sources': [
- '../../src/platform-win32.cc',
- '../../src/win32-math.cc',
- '../../src/win32-math.h'
+ '../../src/base/platform/platform-win32.cc',
+ '../../src/base/win32-headers.h',
+ '../../src/base/win32-math.cc',
+ '../../src/base/win32-math.h'
],
}],
],
},
}, {
'sources': [
- '../../src/platform-win32.cc',
- '../../src/win32-math.cc',
- '../../src/win32-math.h'
+ '../../src/base/platform/platform-win32.cc',
+ '../../src/base/win32-headers.h',
+ '../../src/base/win32-math.cc',
+ '../../src/base/win32-math.h'
],
'msvs_disabled_warnings': [4351, 4355, 4800],
'link_settings': {
'V8_SHARED',
],
}],
- ['v8_postmortem_support=="true"', {
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ]
- }],
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ]
- }, { # v8_enable_i18n_support==0
- 'sources!': [
- '../../src/i18n.cc',
- '../../src/i18n.h',
- ],
- }],
- ['OS=="win" and v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icudata',
- ],
- }],
- ['icu_use_data_file_flag==1', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
- }, { # else icu_use_data_file_flag !=1
- 'conditions': [
- ['OS=="win"', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
- }, {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
- }],
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_libbase',
- 'type': 'static_library',
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '../..',
- ],
- 'sources': [
- '../../src/base/atomicops.h',
- '../../src/base/atomicops_internals_arm64_gcc.h',
- '../../src/base/atomicops_internals_arm_gcc.h',
- '../../src/base/atomicops_internals_atomicword_compat.h',
- '../../src/base/atomicops_internals_mac.h',
- '../../src/base/atomicops_internals_mips_gcc.h',
- '../../src/base/atomicops_internals_tsan.h',
- '../../src/base/atomicops_internals_x86_gcc.cc',
- '../../src/base/atomicops_internals_x86_gcc.h',
- '../../src/base/atomicops_internals_x86_msvc.h',
- '../../src/base/build_config.h',
- '../../src/base/lazy-instance.h',
- '../../src/base/macros.h',
- '../../src/base/once.cc',
- '../../src/base/once.h',
- '../../src/base/safe_conversions.h',
- '../../src/base/safe_conversions_impl.h',
- '../../src/base/safe_math.h',
- '../../src/base/safe_math_impl.h',
- '../../src/base/win32-headers.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- 'V8_SHARED',
- ],
- }],
],
},
{
#include "src/v8.h"
#include "src/api.h"
+#include "src/base/platform/platform.h"
#include "src/messages.h"
-#include "src/platform.h"
#include "src/runtime.h"
#include "src/scanner-character-streams.h"
#include "src/scopeinfo.h"
BaselineScanner(const char* fname,
Isolate* isolate,
Encoding encoding,
- ElapsedTimer* timer,
+ v8::base::ElapsedTimer* timer,
int repeat)
: stream_(NULL) {
int length = 0;
};
-TimeDelta RunBaselineScanner(const char* fname,
- Isolate* isolate,
- Encoding encoding,
- bool dump_tokens,
- std::vector<TokenWithLocation>* tokens,
- int repeat) {
- ElapsedTimer timer;
+v8::base::TimeDelta RunBaselineScanner(const char* fname, Isolate* isolate,
+ Encoding encoding, bool dump_tokens,
+ std::vector<TokenWithLocation>* tokens,
+ int repeat) {
+ v8::base::ElapsedTimer timer;
BaselineScanner scanner(fname, isolate, encoding, &timer, repeat);
Token::Value token;
int beg, end;
}
-TimeDelta ProcessFile(
+v8::base::TimeDelta ProcessFile(
const char* fname,
Encoding encoding,
Isolate* isolate,
}
HandleScope handle_scope(isolate);
std::vector<TokenWithLocation> baseline_tokens;
- TimeDelta baseline_time;
+ v8::base::TimeDelta baseline_time;
baseline_time = RunBaselineScanner(
fname, isolate, encoding, print_tokens,
&baseline_tokens, repeat);
v8::Context::Scope scope(context);
double baseline_total = 0;
for (size_t i = 0; i < fnames.size(); i++) {
- TimeDelta time;
+ v8::base::TimeDelta time;
time = ProcessFile(fnames[i].c_str(), encoding,
reinterpret_cast<Isolate*>(isolate), print_tokens,
repeat);
int length_;
};
-std::pair<TimeDelta, TimeDelta> RunBaselineParser(
+std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
const char* fname, Encoding encoding, int repeat, v8::Isolate* isolate,
v8::Handle<v8::Context> context) {
int length = 0;
break;
}
}
- TimeDelta parse_time1, parse_time2;
+ v8::base::TimeDelta parse_time1, parse_time2;
Handle<Script> script = Isolate::Current()->factory()->NewScript(
v8::Utils::OpenHandle(*source_handle));
i::ScriptData* cached_data_impl = NULL;
CompilationInfoWithZone info(script);
info.MarkAsGlobal();
info.SetCachedData(&cached_data_impl, i::PRODUCE_CACHED_DATA);
- ElapsedTimer timer;
+ v8::base::ElapsedTimer timer;
timer.Start();
// Allow lazy parsing; otherwise we won't produce cached data.
bool success = Parser::Parse(&info, true);
parse_time1 = timer.Elapsed();
if (!success) {
fprintf(stderr, "Parsing failed\n");
- return std::make_pair(TimeDelta(), TimeDelta());
+ return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta());
}
}
// Second round of parsing (consume cached data).
CompilationInfoWithZone info(script);
info.MarkAsGlobal();
info.SetCachedData(&cached_data_impl, i::CONSUME_CACHED_DATA);
- ElapsedTimer timer;
+ v8::base::ElapsedTimer timer;
timer.Start();
// Allow lazy parsing; otherwise cached data won't help.
bool success = Parser::Parse(&info, true);
parse_time2 = timer.Elapsed();
if (!success) {
fprintf(stderr, "Parsing failed\n");
- return std::make_pair(TimeDelta(), TimeDelta());
+ return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta());
}
}
return std::make_pair(parse_time1, parse_time2);
double first_parse_total = 0;
double second_parse_total = 0;
for (size_t i = 0; i < fnames.size(); i++) {
- std::pair<TimeDelta, TimeDelta> time = RunBaselineParser(
- fnames[i].c_str(), encoding, repeat, isolate, context);
+ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> time =
+ RunBaselineParser(fnames[i].c_str(), encoding, repeat, isolate,
+ context);
first_parse_total += time.first.InMillisecondsF();
second_parse_total += time.second.InMillisecondsF();
}