'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
+ 'DEBUG'
],
'msvs_settings': {
'VCCLCompilerTool': {
},
},
'conditions': [
- ['v8_optimized_debug==2', {
- 'defines': [
- 'NDEBUG',
- ],
- }, {
- 'defines': [
- 'DEBUG',
- ],
- }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
'-fdata-sections',
'-ffunction-sections',
],
+ 'defines': [
+ 'OPTIMIZED_DEBUG'
+ ],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
namespace v8 { namespace internal {
- bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
-
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
} } // namespace v8::internal
#endif
+#ifdef DEBUG
+#ifndef OPTIMIZED_DEBUG
+#define ENABLE_SLOW_ASSERTS 1
+#endif
+#endif
+
+namespace v8 {
+namespace internal {
+#ifdef ENABLE_SLOW_ASSERTS
+#define SLOW_ASSERT(condition) \
+ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
extern bool FLAG_enable_slow_asserts;
+#else
+#define SLOW_ASSERT(condition) ((void) 0)
+const bool FLAG_enable_slow_asserts = false;
+#endif
+} // namespace internal
+} // namespace v8
// The ASSERT macro is equivalent to CHECK except that it only
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
-#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
return JunkStringValue();
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
exponent--;
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
void SetCallerFp(unsigned offset, intptr_t value);
intptr_t GetRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(registers_));
+#if DEBUG
+ // This convoluted ASSERT is needed to work around a gcc problem that
+ // improperly detects an array bounds overflow in optimized debug builds
+ // when using a plain ASSERT.
+ if (n >= ARRAY_SIZE(registers_)) {
+ ASSERT(false);
+ return 0;
+ }
+#endif
return registers_[n];
}
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len0; i++) {
ASSERT(!to->get(i)->IsTheHole());
#endif
// checks.cc
+#ifndef OPTIMIZED_DEBUG
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
+#endif
// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#ifdef DEBUG
+#if ENABLE_SLOW_ASSERTS
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
// backing store (e.g. Add).
inline T& operator[](int i) const {
ASSERT(0 <= i);
- ASSERT(i < length_);
+ SLOW_ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
- ASSERT(object->Is##type()); \
+ SLOW_ASSERT(object->Is##type()); \
return reinterpret_cast<type*>(object); \
}
Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- ASSERT(heap != NULL);
+ SLOW_ASSERT(heap != NULL);
return heap;
}
void JSObject::ValidateElements() {
-#if DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = GetElementsAccessor();
accessor->Validate(this);
Object* FixedArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
+ SLOW_ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
Handle<Map>::cast(result)->SharedMapVerify();
}
#endif
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
// except for the code cache, which can contain some ics which can be
accessor->AddElementsToFixedArray(array, array, this);
FixedArray* result;
if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
if (!maybe_result->To(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
if (Hash() != other->Hash()) {
bool found_difference = false;
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
+ SLOW_ASSERT(to <= length_);
+ SLOW_ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}