From: Benedikt Meurer Date: Fri, 30 Jan 2015 07:19:40 +0000 (+0100) Subject: Revert "Make GCC happy again." and "Initial switch to Chromium-style CHECK_* and... X-Git-Tag: upstream/4.7.83~4698 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=883852293a3191006597853dbb6643f7e6f0da0d;p=platform%2Fupstream%2Fv8.git Revert "Make GCC happy again." and "Initial switch to Chromium-style CHECK_* and DCHECK_* macros.". This reverts commit 6a4c0a3bae60b1a2239f35e3b759bd215e3886fe and commit 0deaa4b629faeae6832046c294dc7c034a0a6653 for breaking GCC bots. TBR=svenpanne@chromium.org Review URL: https://codereview.chromium.org/893533003 Cr-Commit-Position: refs/heads/master@{#26342} --- diff --git a/include/v8-profiler.h b/include/v8-profiler.h index 368c89c..d021520 100644 --- a/include/v8-profiler.h +++ b/include/v8-profiler.h @@ -448,7 +448,7 @@ class V8_EXPORT HeapProfiler { * it in case heap profiler cannot find id for the object passed as * parameter. HeapSnapshot::GetNodeById will always return NULL for such id. */ - enum { kUnknownObjectId }; + static const SnapshotObjectId kUnknownObjectId = 0; /** * Callback interface for retrieving user friendly names of global objects. diff --git a/src/api.cc b/src/api.cc index d4eee16..fd84341 100644 --- a/src/api.cc +++ b/src/api.cc @@ -4996,7 +4996,7 @@ void v8::Object::SetInternalField(int index, v8::Handle value) { if (!InternalFieldOK(obj, index, location)) return; i::Handle val = Utils::OpenHandle(*value); obj->SetInternalField(index, *val); - DCHECK(value->Equals(GetInternalField(index))); + DCHECK_EQ(value, GetInternalField(index)); } diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index a8c374f..2e585ba 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -561,7 +561,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); - DCHECK_EQ(static_cast(0), Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); __ and_(r2, lhs, Operand(rhs)); __ JumpIfNotSmi(r2, ¬_smis); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index ad0c817..47525c9 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -3813,7 +3813,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NOT_NULL(args->at(1)->AsLiteral()); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4161,7 +4161,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NOT_NULL(args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index 9d46165..88a68a9 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -944,7 +944,7 @@ class MacroAssembler: public Assembler { ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); tst(type, Operand(kIsNotStringMask), cond); - DCHECK_EQ(0u, kStringTag); + DCHECK_EQ(0, kStringTag); return eq; } diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc index a013543..fd07d74 100644 --- a/src/arm64/full-codegen-arm64.cc +++ b/src/arm64/full-codegen-arm64.cc @@ -3522,7 +3522,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NOT_NULL(args->at(1)->AsLiteral()); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -3868,7 +3868,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NOT_NULL(args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc index c49ecb7..8a300e2 100644 --- a/src/arm64/lithium-codegen-arm64.cc +++ b/src/arm64/lithium-codegen-arm64.cc @@ -2820,7 +2820,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ Sdiv(result, dividend, divisor); if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { - DCHECK(!instr->temp()); + DCHECK_EQ(NULL, instr->temp()); return; } diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc index 37739d9..0644c34 100644 --- a/src/arm64/macro-assembler-arm64.cc +++ b/src/arm64/macro-assembler-arm64.cc @@ -3936,7 +3936,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck( Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); Check(lt, kIndexIsTooLarge); - DCHECK_EQ(static_cast(0), Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); Cmp(index, 0); Check(ge, kIndexIsNegative); } diff --git a/src/base/logging.cc b/src/base/logging.cc index 25d77bb..c3f609f 100644 --- a/src/base/logging.cc +++ b/src/base/logging.cc @@ -10,45 +10,14 @@ #elif V8_OS_QNX # include #endif // V8_LIBC_GLIBC || V8_OS_BSD - -#include -#include +#include +#include #include "src/base/platform/platform.h" namespace v8 { namespace base { -// Explicit instantiations for commonly used comparisons. -#define DEFINE_MAKE_CHECK_OP_STRING(type) \ - template std::string* MakeCheckOpString( \ - type const&, type const&, char const*); -DEFINE_MAKE_CHECK_OP_STRING(int) -DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int) -DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int) -DEFINE_MAKE_CHECK_OP_STRING(unsigned int) -DEFINE_MAKE_CHECK_OP_STRING(unsigned long) // NOLINT(runtime/int) -DEFINE_MAKE_CHECK_OP_STRING(unsigned long long) // NOLINT(runtime/int) -DEFINE_MAKE_CHECK_OP_STRING(char const*) -DEFINE_MAKE_CHECK_OP_STRING(void const*) -#undef DEFINE_MAKE_CHECK_OP_STRING - - -// Explicit instantiations for floating point checks. -#define DEFINE_CHECK_OP_IMPL(NAME) \ - template std::string* Check##NAME##Impl( \ - float const& lhs, float const& rhs, char const* msg); \ - template std::string* Check##NAME##Impl( \ - double const& lhs, double const& rhs, char const* msg); -DEFINE_CHECK_OP_IMPL(EQ) -DEFINE_CHECK_OP_IMPL(NE) -DEFINE_CHECK_OP_IMPL(LE) -DEFINE_CHECK_OP_IMPL(LT) -DEFINE_CHECK_OP_IMPL(GE) -DEFINE_CHECK_OP_IMPL(GT) -#undef DEFINE_CHECK_OP_IMPL - - // Attempts to dump a backtrace (if supported). void DumpBacktrace() { #if V8_LIBC_GLIBC || V8_OS_BSD @@ -99,8 +68,7 @@ void DumpBacktrace() { #endif // V8_LIBC_GLIBC || V8_OS_BSD } -} // namespace base -} // namespace v8 +} } // namespace v8::base // Contains protection against recursive calls (faults while handling faults). diff --git a/src/base/logging.h b/src/base/logging.h index f54f10c..e73fac4 100644 --- a/src/base/logging.h +++ b/src/base/logging.h @@ -5,9 +5,8 @@ #ifndef V8_BASE_LOGGING_H_ #define V8_BASE_LOGGING_H_ -#include -#include -#include +#include +#include #include "src/base/build_config.h" @@ -32,124 +31,169 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...); #endif -namespace v8 { -namespace base { - -// CHECK dies with a fatal error if condition is not true. It is *not* -// controlled by DEBUG, so the check will be executed regardless of -// compilation mode. -// -// We make sure CHECK et al. always evaluates their arguments, as -// doing CHECK(FunctionWithSideEffect()) is a common idiom. -#define CHECK(condition) \ - do { \ - if (V8_UNLIKELY(!(condition))) { \ - V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", #condition); \ - } \ +// The CHECK macro checks that the given condition is true; if not, it +// prints a message to stderr and aborts. +#define CHECK(condition) \ + do { \ + if (V8_UNLIKELY(!(condition))) { \ + V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \ + } \ } while (0) -#ifdef DEBUG +// Helper function used by the CHECK_EQ function when given int +// arguments. Should not be called directly. +inline void CheckEqualsHelper(const char* file, int line, + const char* expected_source, int expected, + const char* value_source, int value) { + if (V8_UNLIKELY(expected != value)) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i", + expected_source, value_source, expected, value); + } +} -// Helper macro for binary operators. -// Don't use this macro directly in your code, use CHECK_EQ et al below. -#define CHECK_OP(name, op, lhs, rhs) \ - do { \ - if (std::string* _msg = ::v8::base::Check##name##Impl( \ - (lhs), (rhs), #lhs " " #op " " #rhs)) { \ - V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \ - delete _msg; \ - } \ - } while (0) -#else +// Helper function used by the CHECK_EQ function when given int64_t +// arguments. Should not be called directly. +inline void CheckEqualsHelper(const char* file, int line, + const char* expected_source, + int64_t expected, + const char* value_source, + int64_t value) { + if (V8_UNLIKELY(expected != value)) { + // Print int64_t values in hex, as two int32s, + // to avoid platform-dependencies. + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n#" + " Expected: 0x%08x%08x\n# Found: 0x%08x%08x", + expected_source, value_source, + static_cast(expected >> 32), + static_cast(expected), + static_cast(value >> 32), + static_cast(value)); + } +} -// Make all CHECK functions discard their log strings to reduce code -// bloat for official release builds. -#define CHECK_OP(name, op, lhs, rhs) CHECK((lhs)op(rhs)) +// Helper function used by the CHECK_NE function when given int +// arguments. Should not be called directly. +inline void CheckNonEqualsHelper(const char* file, + int line, + const char* unexpected_source, + int unexpected, + const char* value_source, + int value) { + if (V8_UNLIKELY(unexpected == value)) { + V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i", + unexpected_source, value_source, value); + } +} -#endif + +// Helper function used by the CHECK function when given string +// arguments. Should not be called directly. +inline void CheckEqualsHelper(const char* file, + int line, + const char* expected_source, + const char* expected, + const char* value_source, + const char* value) { + if (V8_UNLIKELY((expected == NULL && value != NULL) || + (expected != NULL && value == NULL) || + (expected != NULL && value != NULL && + strcmp(expected, value) != 0))) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s", + expected_source, value_source, expected, value); + } +} + + +inline void CheckNonEqualsHelper(const char* file, + int line, + const char* expected_source, + const char* expected, + const char* value_source, + const char* value) { + if (V8_UNLIKELY(expected == value || (expected != NULL && value != NULL && + strcmp(expected, value) == 0))) { + V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s", + expected_source, value_source, value); + } +} -// Build the error message string. This is separate from the "Impl" -// function template because it is not performance critical and so can -// be out of line, while the "Impl" code should be inline. Caller -// takes ownership of the returned string. -template -std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs, - char const* msg) { - std::ostringstream ss; - ss << msg << " (" << lhs << " vs. " << rhs << ")"; - return new std::string(ss.str()); +// Helper function used by the CHECK function when given pointer +// arguments. Should not be called directly. +inline void CheckEqualsHelper(const char* file, + int line, + const char* expected_source, + const void* expected, + const char* value_source, + const void* value) { + if (V8_UNLIKELY(expected != value)) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p", + expected_source, value_source, + expected, value); + } } -// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated -// in logging.cc. -#define DEFINE_MAKE_CHECK_OP_STRING(type) \ - extern template std::string* MakeCheckOpString( \ - type const&, type const&, char const*); -DEFINE_MAKE_CHECK_OP_STRING(int) -DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int) -DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int) -DEFINE_MAKE_CHECK_OP_STRING(unsigned int) -DEFINE_MAKE_CHECK_OP_STRING(unsigned long) // NOLINT(runtime/int) -DEFINE_MAKE_CHECK_OP_STRING(unsigned long long) // NOLINT(runtime/int) -DEFINE_MAKE_CHECK_OP_STRING(char const*) -DEFINE_MAKE_CHECK_OP_STRING(void const*) -#undef DEFINE_MAKE_CHECK_OP_STRING - - -// Helper functions for CHECK_OP macro. -// The (int, int) specialization works around the issue that the compiler -// will not instantiate the template version of the function on values of -// unnamed enum type - see comment below. -// The (float, float) and (double, double) instantiations are explicitly -// externialized to ensure proper 32/64-bit comparisons on x86. -#define DEFINE_CHECK_OP_IMPL(NAME, op) \ - template \ - V8_INLINE std::string* Check##NAME##Impl(Lhs const& lhs, Rhs const& rhs, \ - char const* msg) { \ - return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \ - } \ - V8_INLINE std::string* Check##NAME##Impl(int lhs, int rhs, \ - char const* msg) { \ - return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \ - } \ - extern template std::string* Check##NAME##Impl( \ - float const& lhs, float const& rhs, char const* msg); \ - extern template std::string* Check##NAME##Impl( \ - double const& lhs, double const& rhs, char const* msg); -DEFINE_CHECK_OP_IMPL(EQ, ==) -DEFINE_CHECK_OP_IMPL(NE, !=) -DEFINE_CHECK_OP_IMPL(LE, <=) -DEFINE_CHECK_OP_IMPL(LT, < ) -DEFINE_CHECK_OP_IMPL(GE, >=) -DEFINE_CHECK_OP_IMPL(GT, > ) -#undef DEFINE_CHECK_OP_IMPL - -#define CHECK_EQ(lhs, rhs) CHECK_OP(EQ, ==, lhs, rhs) -#define CHECK_NE(lhs, rhs) CHECK_OP(NE, !=, lhs, rhs) -#define CHECK_LE(lhs, rhs) CHECK_OP(LE, <=, lhs, rhs) -#define CHECK_LT(lhs, rhs) CHECK_OP(LT, <, lhs, rhs) -#define CHECK_GE(lhs, rhs) CHECK_OP(GE, >=, lhs, rhs) -#define CHECK_GT(lhs, rhs) CHECK_OP(GT, >, lhs, rhs) -#define CHECK_NULL(val) CHECK((val) == nullptr) -#define CHECK_NOT_NULL(val) CHECK((val) != nullptr) -#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs)) +inline void CheckNonEqualsHelper(const char* file, + int line, + const char* expected_source, + const void* expected, + const char* value_source, + const void* value) { + if (V8_UNLIKELY(expected == value)) { + V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p", + expected_source, value_source, value); + } +} + + +inline void CheckNonEqualsHelper(const char* file, + int line, + const char* expected_source, + int64_t expected, + const char* value_source, + int64_t value) { + if (V8_UNLIKELY(expected == value)) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", + expected_source, value_source, expected, value); + } +} + + +#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \ + #expected, expected, #value, value) + + +#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \ + #unexpected, unexpected, #value, value) + + +#define CHECK_GT(a, b) CHECK((a) > (b)) +#define CHECK_GE(a, b) CHECK((a) >= (b)) +#define CHECK_LT(a, b) CHECK((a) < (b)) +#define CHECK_LE(a, b) CHECK((a) <= (b)) + + +namespace v8 { +namespace base { // Exposed for making debugging easier (to see where your function is being // called, just add a call to DumpBacktrace). void DumpBacktrace(); -} // namespace base -} // namespace v8 +} } // namespace v8::base // The DCHECK macro is equivalent to CHECK except that it only // generates code in debug builds. -// TODO(bmeurer): DCHECK_RESULT(expr) must die! #ifdef DEBUG #define DCHECK_RESULT(expr) CHECK(expr) #define DCHECK(condition) CHECK(condition) @@ -158,9 +202,6 @@ void DumpBacktrace(); #define DCHECK_GE(v1, v2) CHECK_GE(v1, v2) #define DCHECK_LT(v1, v2) CHECK_LT(v1, v2) #define DCHECK_LE(v1, v2) CHECK_LE(v1, v2) -#define DCHECK_NULL(val) CHECK_NULL(val) -#define DCHECK_NOT_NULL(val) CHECK_NOT_NULL(val) -#define DCHECK_IMPLIES(v1, v2) CHECK_IMPLIES(v1, v2) #else #define DCHECK_RESULT(expr) (expr) #define DCHECK(condition) ((void) 0) @@ -169,9 +210,8 @@ void DumpBacktrace(); #define DCHECK_GE(v1, v2) ((void) 0) #define DCHECK_LT(v1, v2) ((void) 0) #define DCHECK_LE(v1, v2) ((void) 0) -#define DCHECK_NULL(val) ((void) 0) -#define DCHECK_NOT_NULL(val) ((void) 0) -#define DCHECK_IMPLIES(v1, v2) ((void) 0) #endif +#define DCHECK_NOT_NULL(p) DCHECK_NE(NULL, p) + #endif // V8_BASE_LOGGING_H_ diff --git a/src/base/platform/condition-variable.cc b/src/base/platform/condition-variable.cc index b91025a..4547b66 100644 --- a/src/base/platform/condition-variable.cc +++ b/src/base/platform/condition-variable.cc @@ -182,7 +182,7 @@ void ConditionVariable::NativeHandle::Post(Event* event, bool result) { // Remove the event from the wait list. for (Event** wep = &waitlist_;; wep = &(*wep)->next_) { - DCHECK(*wep); + DCHECK_NE(NULL, *wep); if (*wep == event) { *wep = event->next_; break; diff --git a/src/base/platform/time.cc b/src/base/platform/time.cc index 6734218..40dd188 100644 --- a/src/base/platform/time.cc +++ b/src/base/platform/time.cc @@ -13,8 +13,7 @@ #include #endif -#include -#include +#include #if V8_OS_WIN #include "src/base/lazy-instance.h" @@ -356,11 +355,6 @@ double Time::ToJsTime() const { } -std::ostream& operator<<(std::ostream& os, const Time& time) { - return os << time.ToJsTime(); -} - - #if V8_OS_WIN class TickClock { diff --git a/src/base/platform/time.h b/src/base/platform/time.h index 887664e..9dfa47d 100644 --- a/src/base/platform/time.h +++ b/src/base/platform/time.h @@ -5,8 +5,7 @@ #ifndef V8_BASE_PLATFORM_TIME_H_ #define V8_BASE_PLATFORM_TIME_H_ -#include -#include +#include #include #include "src/base/macros.h" @@ -281,8 +280,6 @@ class Time FINAL { int64_t us_; }; -std::ostream& operator<<(std::ostream&, const Time&); - inline Time operator+(const TimeDelta& delta, const Time& time) { return time + delta; } diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc index f5baafc..aef9428 100644 --- a/src/bootstrapper.cc +++ b/src/bootstrapper.cc @@ -1491,7 +1491,7 @@ static Handle ResolveBuiltinIdHolder(Handle native_context, .ToHandleChecked()); } const char* inner = period_pos + 1; - DCHECK(!strchr(inner, '.')); + DCHECK_EQ(NULL, strchr(inner, '.')); Vector property(holder_expr, static_cast(period_pos - holder_expr)); Handle property_string = factory->InternalizeUtf8String(property); diff --git a/src/checks.cc b/src/checks.cc index 2871a66..e5a4caa 100644 --- a/src/checks.cc +++ b/src/checks.cc @@ -4,6 +4,85 @@ #include "src/checks.h" +#include "src/v8.h" + namespace v8 { -namespace internal {} // namespace internal -} // namespace v8 +namespace internal { + +intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; } + +} } // namespace v8::internal + + +static bool CheckEqualsStrict(volatile double* exp, volatile double* val) { + v8::internal::DoubleRepresentation exp_rep(*exp); + v8::internal::DoubleRepresentation val_rep(*val); + if (std::isnan(exp_rep.value) && std::isnan(val_rep.value)) return true; + return exp_rep.bits == val_rep.bits; +} + + +void CheckEqualsHelper(const char* file, int line, const char* expected_source, + double expected, const char* value_source, + double value) { + // Force values to 64 bit memory to truncate 80 bit precision on IA32. + volatile double* exp = new double[1]; + *exp = expected; + volatile double* val = new double[1]; + *val = value; + if (!CheckEqualsStrict(exp, val)) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", + expected_source, value_source, *exp, *val); + } + delete[] exp; + delete[] val; +} + + +void CheckNonEqualsHelper(const char* file, int line, + const char* expected_source, double expected, + const char* value_source, double value) { + // Force values to 64 bit memory to truncate 80 bit precision on IA32. + volatile double* exp = new double[1]; + *exp = expected; + volatile double* val = new double[1]; + *val = value; + if (CheckEqualsStrict(exp, val)) { + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f", + expected_source, value_source, *exp, *val); + } + delete[] exp; + delete[] val; +} + + +void CheckEqualsHelper(const char* file, + int line, + const char* expected_source, + v8::Handle expected, + const char* value_source, + v8::Handle value) { + if (!expected->Equals(value)) { + v8::String::Utf8Value value_str(value); + v8::String::Utf8Value expected_str(expected); + V8_Fatal(file, line, + "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s", + expected_source, value_source, *expected_str, *value_str); + } +} + + +void CheckNonEqualsHelper(const char* file, + int line, + const char* unexpected_source, + v8::Handle unexpected, + const char* value_source, + v8::Handle value) { + if (unexpected->Equals(value)) { + v8::String::Utf8Value value_str(value); + V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %s", + unexpected_source, value_source, *value_str); + } +} diff --git a/src/checks.h b/src/checks.h index 54ac926..6ba64c1 100644 --- a/src/checks.h +++ b/src/checks.h @@ -5,7 +5,6 @@ #ifndef V8_CHECKS_H_ #define V8_CHECKS_H_ -#include "include/v8.h" #include "src/base/logging.h" namespace v8 { @@ -15,6 +14,8 @@ template class Handle; namespace internal { +intptr_t HeapObjectTagMask(); + #ifdef ENABLE_SLOW_DCHECKS #define SLOW_DCHECK(condition) \ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition)) @@ -26,11 +27,30 @@ const bool FLAG_enable_slow_asserts = false; } } // namespace v8::internal -#define DCHECK_TAG_ALIGNED(address) \ - DCHECK((reinterpret_cast(address) & \ - ::v8::internal::kHeapObjectTagMask) == 0) -#define DCHECK_SIZE_TAG_ALIGNED(size) \ - DCHECK((size & ::v8::internal::kHeapObjectTagMask) == 0) +void CheckNonEqualsHelper(const char* file, int line, + const char* expected_source, double expected, + const char* value_source, double value); + +void CheckEqualsHelper(const char* file, int line, const char* expected_source, + double expected, const char* value_source, double value); + +void CheckNonEqualsHelper(const char* file, int line, + const char* unexpected_source, + v8::Handle unexpected, + const char* value_source, + v8::Handle value); + +void CheckEqualsHelper(const char* file, + int line, + const char* expected_source, + v8::Handle expected, + const char* value_source, + v8::Handle value); + +#define DCHECK_TAG_ALIGNED(address) \ + DCHECK((reinterpret_cast(address) & HeapObjectTagMask()) == 0) + +#define DCHECK_SIZE_TAG_ALIGNED(size) DCHECK((size & HeapObjectTagMask()) == 0) #endif // V8_CHECKS_H_ diff --git a/src/compiler.cc b/src/compiler.cc index d31ca9e..7766778 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -208,7 +208,7 @@ CompilationInfo::~CompilationInfo() { // Check that no dependent maps have been added or added dependent maps have // been rolled back or committed. for (int i = 0; i < DependentCode::kGroupCount; i++) { - DCHECK(!dependencies_[i]); + DCHECK_EQ(NULL, dependencies_[i]); } #endif // DEBUG } diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc index 894584b..51ac207 100644 --- a/src/compiler/arm/code-generator-arm.cc +++ b/src/compiler/arm/code-generator-arm.cc @@ -745,7 +745,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, // Materialize a full 32-bit 1 or 0 value. The result register is always the // last output of the instruction. - DCHECK_NE(0u, instr->OutputCount()); + DCHECK_NE(0, instr->OutputCount()); Register reg = i.OutputRegister(instr->OutputCount() - 1); Condition cc = FlagsConditionToCondition(condition); __ mov(reg, Operand(0)); diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc index 7219ca3..64393dd 100644 --- a/src/compiler/arm/instruction-selector-arm.cc +++ b/src/compiler/arm/instruction-selector-arm.cc @@ -233,8 +233,8 @@ void VisitBinop(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0u, input_count); - DCHECK_NE(0u, output_count); + DCHECK_NE(0, input_count); + DCHECK_NE(0, output_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); DCHECK_NE(kMode_None, AddressingModeField::decode(opcode)); @@ -448,8 +448,8 @@ void EmitBic(InstructionSelector* selector, Node* node, Node* left, void EmitUbfx(InstructionSelector* selector, Node* node, Node* left, uint32_t lsb, uint32_t width) { - DCHECK_LE(1u, width); - DCHECK_LE(width, 32u - lsb); + DCHECK_LE(1, width); + DCHECK_LE(width, 32 - lsb); ArmOperandGenerator g(selector); selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left), g.TempImmediate(lsb), g.TempImmediate(width)); @@ -481,7 +481,7 @@ void InstructionSelector::VisitWord32And(Node* node) { uint32_t msb = base::bits::CountLeadingZeros32(value); // Try to interpret this AND as UBFX. if (IsSupported(ARMv7) && width != 0 && msb + width == 32) { - DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value)); + DCHECK_EQ(0, base::bits::CountTrailingZeros32(value)); if (m.left().IsWord32Shr()) { Int32BinopMatcher mleft(m.left().node()); if (mleft.right().IsInRange(0, 31)) { @@ -550,11 +550,10 @@ void InstructionSelector::VisitWord32Xor(Node* node) { } -namespace { - template -void VisitShift(InstructionSelector* selector, Node* node, - TryMatchShift try_match_shift, FlagsContinuation* cont) { +static inline void VisitShift(InstructionSelector* selector, Node* node, + TryMatchShift try_match_shift, + FlagsContinuation* cont) { ArmOperandGenerator g(selector); InstructionCode opcode = kArmMov; InstructionOperand* inputs[4]; @@ -574,8 +573,8 @@ void VisitShift(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0u, input_count); - DCHECK_NE(0u, output_count); + DCHECK_NE(0, input_count); + DCHECK_NE(0, output_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); DCHECK_NE(kMode_None, AddressingModeField::decode(opcode)); @@ -587,14 +586,12 @@ void VisitShift(InstructionSelector* selector, Node* node, template -void VisitShift(InstructionSelector* selector, Node* node, +static inline void VisitShift(InstructionSelector* selector, Node* node, TryMatchShift try_match_shift) { FlagsContinuation cont; VisitShift(selector, node, try_match_shift, &cont); } -} // namespace - void InstructionSelector::VisitWord32Shl(Node* node) { VisitShift(this, node, TryMatchLSL); @@ -606,7 +603,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) { Int32BinopMatcher m(node); if (IsSupported(ARMv7) && m.left().IsWord32And() && m.right().IsInRange(0, 31)) { - uint32_t lsb = m.right().Value(); + int32_t lsb = m.right().Value(); Int32BinopMatcher mleft(m.left().node()); if (mleft.right().HasValue()) { uint32_t value = (mleft.right().Value() >> lsb) << lsb; @@ -1126,7 +1123,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0u, input_count); + DCHECK_NE(0, input_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc index bea4805..27cb4e7 100644 --- a/src/compiler/arm64/code-generator-arm64.cc +++ b/src/compiler/arm64/code-generator-arm64.cc @@ -846,7 +846,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, // Materialize a full 64-bit 1 or 0 value. The result register is always the // last output of the instruction. - DCHECK_NE(0u, instr->OutputCount()); + DCHECK_NE(0, instr->OutputCount()); Register reg = i.OutputRegister(instr->OutputCount() - 1); Condition cc = FlagsConditionToCondition(condition); __ Cset(reg, cc); diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc index 4063f8a..82c0bea 100644 --- a/src/compiler/arm64/instruction-selector-arm64.cc +++ b/src/compiler/arm64/instruction-selector-arm64.cc @@ -215,8 +215,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0u, input_count); - DCHECK_NE(0u, output_count); + DCHECK_NE(0, input_count); + DCHECK_NE(0, output_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); @@ -507,7 +507,7 @@ void InstructionSelector::VisitWord32And(Node* node) { uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); if ((mask_width != 0) && (mask_msb + mask_width == 32)) { // The mask must be contiguous, and occupy the least-significant bits. - DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); + DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask)); // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least // significant bits. @@ -544,7 +544,7 @@ void InstructionSelector::VisitWord64And(Node* node) { uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); if ((mask_width != 0) && (mask_msb + mask_width == 64)) { // The mask must be contiguous, and occupy the least-significant bits. - DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); + DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask)); // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least // significant bits. @@ -628,7 +628,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) { Arm64OperandGenerator g(this); Int32BinopMatcher m(node); if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) { - uint32_t lsb = m.right().Value(); + int32_t lsb = m.right().Value(); Int32BinopMatcher mleft(m.left().node()); if (mleft.right().HasValue()) { uint32_t mask = (mleft.right().Value() >> lsb) << lsb; @@ -653,7 +653,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) { Arm64OperandGenerator g(this); Int64BinopMatcher m(node); if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { - uint64_t lsb = m.right().Value(); + int64_t lsb = m.right().Value(); Int64BinopMatcher mleft(m.left().node()); if (mleft.right().HasValue()) { // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc index 3a56ece..72c6ec9 100644 --- a/src/compiler/code-generator.cc +++ b/src/compiler/code-generator.cc @@ -281,7 +281,7 @@ void CodeGenerator::PopulateDeoptimizationData(Handle code_object) { for (int i = 0; i < deopt_count; i++) { DeoptimizationState* deoptimization_state = deoptimization_states_[i]; data->SetAstId(i, deoptimization_state->bailout_id()); - CHECK(deoptimization_states_[i]); + CHECK_NE(NULL, deoptimization_states_[i]); data->SetTranslationIndex( i, Smi::FromInt(deoptimization_states_[i]->translation_id())); data->SetArgumentsStackHeight(i, Smi::FromInt(0)); diff --git a/src/compiler/control-reducer.cc b/src/compiler/control-reducer.cc index 105bdfe..b7dec36 100644 --- a/src/compiler/control-reducer.cc +++ b/src/compiler/control-reducer.cc @@ -296,7 +296,7 @@ class ControlReducerImpl { for (size_t j = 0; j < nodes.size(); j++) { Node* node = nodes[j]; for (Node* const input : node->inputs()) { - CHECK(input); + CHECK_NE(NULL, input); } for (Node* const use : node->uses()) { CHECK(marked.IsReachableFromEnd(use)); @@ -319,7 +319,7 @@ class ControlReducerImpl { // Recurse on an input if necessary. for (Node* const input : node->inputs()) { - DCHECK(input); + CHECK_NE(NULL, input); if (Recurse(input)) return; } diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc index 525451a..fd9b804 100644 --- a/src/compiler/ia32/code-generator-ia32.cc +++ b/src/compiler/ia32/code-generator-ia32.cc @@ -768,7 +768,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, // Materialize a full 32-bit 1 or 0 value. The result register is always the // last output of the instruction. Label check; - DCHECK_NE(0u, instr->OutputCount()); + DCHECK_NE(0, instr->OutputCount()); Register reg = i.OutputRegister(instr->OutputCount() - 1); Condition cc = no_condition; switch (condition) { diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc index b81210a..233fa37 100644 --- a/src/compiler/ia32/instruction-selector-ia32.cc +++ b/src/compiler/ia32/instruction-selector-ia32.cc @@ -370,8 +370,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node, outputs[output_count++] = g.DefineAsRegister(cont->result()); } - DCHECK_NE(0u, input_count); - DCHECK_NE(0u, output_count); + DCHECK_NE(0, input_count); + DCHECK_NE(0, output_count); DCHECK_GE(arraysize(inputs), input_count); DCHECK_GE(arraysize(outputs), output_count); diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc index fe9c287..da58698 100644 --- a/src/compiler/instruction-selector.cc +++ b/src/compiler/instruction-selector.cc @@ -40,7 +40,7 @@ void InstructionSelector::SelectInstructions() { BasicBlockVector* blocks = schedule()->rpo_order(); for (auto const block : *blocks) { if (!block->IsLoopHeader()) continue; - DCHECK_LE(2u, block->PredecessorCount()); + DCHECK_LE(2, block->PredecessorCount()); for (Node* const phi : *block) { if (phi->opcode() != IrOpcode::kPhi) continue; @@ -342,7 +342,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, if (use->opcode() != IrOpcode::kProjection) continue; size_t const index = ProjectionIndexOf(use->op()); DCHECK_LT(index, buffer->output_nodes.size()); - DCHECK(!buffer->output_nodes[index]); + DCHECK_EQ(nullptr, buffer->output_nodes[index]); buffer->output_nodes[index] = use; } } @@ -435,7 +435,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, if (static_cast(stack_index) >= buffer->pushed_nodes.size()) { buffer->pushed_nodes.resize(stack_index + 1, NULL); } - DCHECK(!buffer->pushed_nodes[stack_index]); + DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]); buffer->pushed_nodes[stack_index] = *iter; pushed_count++; } else { @@ -450,7 +450,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, void InstructionSelector::VisitBlock(BasicBlock* block) { - DCHECK(!current_block_); + DCHECK_EQ(NULL, current_block_); current_block_ = block; int current_block_end = static_cast(instructions_.size()); diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc index 34a3151..ce2c076 100644 --- a/src/compiler/instruction.cc +++ b/src/compiler/instruction.cc @@ -447,7 +447,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor( size_t rpo_number = 0; for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin(); it != schedule->rpo_order()->end(); ++it, ++rpo_number) { - DCHECK(!(*blocks)[rpo_number]); + DCHECK_EQ(NULL, (*blocks)[rpo_number]); DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number); (*blocks)[rpo_number] = InstructionBlockFor(zone, *it); } diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h index 21750e7..3dda702 100644 --- a/src/compiler/instruction.h +++ b/src/compiler/instruction.h @@ -520,7 +520,7 @@ class Instruction : public ZoneObject { void set_pointer_map(PointerMap* map) { DCHECK(NeedsPointerMap()); - DCHECK(!pointer_map_); + DCHECK_EQ(NULL, pointer_map_); pointer_map_ = map; } diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc index 42fc58e..d45877b 100644 --- a/src/compiler/js-inlining.cc +++ b/src/compiler/js-inlining.cc @@ -84,7 +84,7 @@ class Inlinee { // Counts only formal parameters. size_t formal_parameters() { - DCHECK_GE(total_parameters(), 3u); + DCHECK_GE(total_parameters(), 3); return total_parameters() - 3; } @@ -176,7 +176,7 @@ class CopyVisitor : public NullNodeVisitor { if (copy == NULL) { copy = GetSentinel(original); } - DCHECK(copy); + DCHECK_NE(NULL, copy); return copy; } @@ -193,7 +193,7 @@ class CopyVisitor : public NullNodeVisitor { Node* sentinel = sentinels_[id]; if (sentinel == NULL) continue; Node* copy = copies_[id]; - DCHECK(copy); + DCHECK_NE(NULL, copy); sentinel->ReplaceUses(copy); } } diff --git a/src/compiler/loop-peeling.cc b/src/compiler/loop-peeling.cc index 39f487f..1bdf71a 100644 --- a/src/compiler/loop-peeling.cc +++ b/src/compiler/loop-peeling.cc @@ -268,7 +268,7 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common, } } // There should be a merge or a return for each exit. - CHECK(found); + CHECK_NE(NULL, found); } // Return nodes, the end merge, and the phis associated with the end merge // must be duplicated as well. diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc index 8f91d49..95c6eaa 100644 --- a/src/compiler/machine-operator-reducer.cc +++ b/src/compiler/machine-operator-reducer.cc @@ -103,7 +103,7 @@ Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) { Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) { - DCHECK_LT(0u, divisor); + DCHECK_LT(0, divisor); // If the divisor is even, we can avoid using the expensive fixup by shifting // the dividend upfront. unsigned const shift = base::bits::CountTrailingZeros32(divisor); @@ -115,7 +115,7 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) { Node* quotient = graph()->NewNode(machine()->Uint32MulHigh(), dividend, Uint32Constant(mag.multiplier)); if (mag.add) { - DCHECK_LE(1u, mag.shift); + DCHECK_LE(1, mag.shift); quotient = Word32Shr( Int32Add(Word32Shr(Int32Sub(dividend, quotient), 1), quotient), mag.shift - 1); @@ -520,7 +520,7 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) { Node* quotient = dividend; if (base::bits::IsPowerOfTwo32(Abs(divisor))) { uint32_t const shift = WhichPowerOf2Abs(divisor); - DCHECK_NE(0u, shift); + DCHECK_NE(0, shift); if (shift > 1) { quotient = Word32Sar(quotient, 31); } diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc index 855256e..330f32f 100644 --- a/src/compiler/move-optimizer.cc +++ b/src/compiler/move-optimizer.cc @@ -83,11 +83,11 @@ static MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move, for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) { if (curr->IsEliminated()) continue; if (curr->destination()->Equals(move->source())) { - DCHECK(!replacement); + DCHECK_EQ(nullptr, replacement); replacement = curr; if (to_eliminate != nullptr) break; } else if (curr->destination()->Equals(move->destination())) { - DCHECK(!to_eliminate); + DCHECK_EQ(nullptr, to_eliminate); to_eliminate = curr; if (replacement != nullptr) break; } diff --git a/src/compiler/node.cc b/src/compiler/node.cc index d38e9ce..a4680e46 100644 --- a/src/compiler/node.cc +++ b/src/compiler/node.cc @@ -134,7 +134,7 @@ void Node::ReplaceUses(Node* replace_to) { use->from->GetInputRecordPtr(use->input_index)->to = replace_to; } if (!replace_to->last_use_) { - DCHECK(!replace_to->first_use_); + DCHECK_EQ(nullptr, replace_to->first_use_); replace_to->first_use_ = first_use_; replace_to->last_use_ = last_use_; } else if (first_use_) { diff --git a/src/compiler/osr.cc b/src/compiler/osr.cc index a4b8452..6f30963 100644 --- a/src/compiler/osr.cc +++ b/src/compiler/osr.cc @@ -40,18 +40,18 @@ bool OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common, if (osr_loop_entry == nullptr) { // No OSR entry found, do nothing. - CHECK(osr_normal_entry); + CHECK_NE(nullptr, osr_normal_entry); return true; } for (Node* use : osr_loop_entry->uses()) { if (use->opcode() == IrOpcode::kLoop) { - CHECK(!osr_loop); // should be only one OSR loop. + CHECK_EQ(nullptr, osr_loop); // should be only one OSR loop. osr_loop = use; // found the OSR loop. } } - CHECK(osr_loop); // Should have found the OSR loop. + CHECK_NE(nullptr, osr_loop); // Should have found the OSR loop. // Analyze the graph to determine how deeply nested the OSR loop is. LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone); diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index e0667ed..cf10078 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -145,19 +145,19 @@ class PipelineData { LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; } void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) { - DCHECK(!loop_assignment_); + DCHECK_EQ(nullptr, loop_assignment_); loop_assignment_ = loop_assignment; } Node* context_node() const { return context_node_; } void set_context_node(Node* context_node) { - DCHECK(!context_node_); + DCHECK_EQ(nullptr, context_node_); context_node_ = context_node; } Schedule* schedule() const { return schedule_; } void set_schedule(Schedule* schedule) { - DCHECK(!schedule_); + DCHECK_EQ(nullptr, schedule_); schedule_ = schedule; } @@ -194,7 +194,7 @@ class PipelineData { } void InitializeInstructionSequence() { - DCHECK(!sequence_); + DCHECK_EQ(nullptr, sequence_); InstructionBlocks* instruction_blocks = InstructionSequence::InstructionBlocksFor(instruction_zone(), schedule()); @@ -205,8 +205,8 @@ class PipelineData { void InitializeRegisterAllocator(Zone* local_zone, const RegisterConfiguration* config, const char* debug_name) { - DCHECK(!register_allocator_); - DCHECK(!frame_); + DCHECK_EQ(nullptr, register_allocator_); + DCHECK_EQ(nullptr, frame_); frame_ = new (instruction_zone()) Frame(); register_allocator_ = new (instruction_zone()) RegisterAllocator(config, local_zone, frame(), sequence(), debug_name); diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc index 0f05333..d4471ff 100644 --- a/src/compiler/register-allocator-verifier.cc +++ b/src/compiler/register-allocator-verifier.cc @@ -20,7 +20,7 @@ static void VerifyGapEmpty(const GapInstruction* gap) { i <= GapInstruction::LAST_INNER_POSITION; i++) { GapInstruction::InnerPosition inner_pos = static_cast(i); - CHECK(!gap->GetParallelMove(inner_pos)); + CHECK_EQ(NULL, gap->GetParallelMove(inner_pos)); } } @@ -432,14 +432,14 @@ class OperandMap : public ZoneObject { for (; p != nullptr; p = p->first_pred_phi) { if (p->virtual_register == v->use_vreg) break; } - CHECK(p); + CHECK_NE(nullptr, p); } // Mark the use. it->second->use_vreg = use_vreg; return; } // Use of a phi value without definition. - UNREACHABLE(); + CHECK(false); } private: diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc index 4e5e53b..e0487fa 100644 --- a/src/compiler/register-allocator.cc +++ b/src/compiler/register-allocator.cc @@ -183,7 +183,7 @@ void LiveRange::SetSpillOperand(InstructionOperand* operand) { void LiveRange::SetSpillRange(SpillRange* spill_range) { DCHECK(HasNoSpillType() || HasSpillRange()); - DCHECK(spill_range); + DCHECK_NE(spill_range, nullptr); spill_type_ = SpillType::kSpillRange; spill_range_ = spill_range; } diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc index 6281371..f512cd2 100644 --- a/src/compiler/scheduler.cc +++ b/src/compiler/scheduler.cc @@ -266,7 +266,7 @@ class CFGBuilder : public ZoneObject { // single-exit region that makes up a minimal component to be scheduled. if (IsSingleEntrySingleExitRegion(node, exit)) { Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic()); - DCHECK(!component_entry_); + DCHECK_EQ(NULL, component_entry_); component_entry_ = node; continue; } @@ -276,7 +276,7 @@ class CFGBuilder : public ZoneObject { Queue(node->InputAt(i)); } } - DCHECK(component_entry_); + DCHECK_NE(NULL, component_entry_); for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) { ConnectBlocks(*i); // Connect block to its predecessor/successors. @@ -370,16 +370,16 @@ class CFGBuilder : public ZoneObject { buffer[1] = NULL; for (Node* use : node->uses()) { if (use->opcode() == true_opcode) { - DCHECK(!buffer[0]); + DCHECK_EQ(NULL, buffer[0]); buffer[0] = use; } if (use->opcode() == false_opcode) { - DCHECK(!buffer[1]); + DCHECK_EQ(NULL, buffer[1]); buffer[1] = use; } } - DCHECK(buffer[0]); - DCHECK(buffer[1]); + DCHECK_NE(NULL, buffer[0]); + DCHECK_NE(NULL, buffer[1]); } void CollectSuccessorBlocks(Node* node, BasicBlock** buffer, @@ -448,7 +448,7 @@ class CFGBuilder : public ZoneObject { } void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { - DCHECK(block); + DCHECK_NE(NULL, block); if (succ == NULL) { Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(), block->id().ToInt()); @@ -533,7 +533,7 @@ class SpecialRPONumberer : public ZoneObject { // that is for the graph spanned between the schedule's start and end blocks. void ComputeSpecialRPO() { DCHECK(schedule_->end()->SuccessorCount() == 0); - DCHECK(!order_); // Main order does not exist yet. + DCHECK_EQ(NULL, order_); // Main order does not exist yet. ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end()); } @@ -541,7 +541,7 @@ class SpecialRPONumberer : public ZoneObject { // that is for the graph spanned between the given {entry} and {end} blocks, // then updates the existing ordering with this new information. void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) { - DCHECK(order_); // Main order to be updated is present. + DCHECK_NE(NULL, order_); // Main order to be updated is present. ComputeAndInsertSpecialRPO(entry, end); } diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc index e040cd2..3aad6ea 100644 --- a/src/compiler/verifier.cc +++ b/src/compiler/verifier.cc @@ -763,8 +763,8 @@ void Verifier::Visitor::Check(Node* node) { void Verifier::Run(Graph* graph, Typing typing) { - CHECK_NOT_NULL(graph->start()); - CHECK_NOT_NULL(graph->end()); + CHECK_NE(NULL, graph->start()); + CHECK_NE(NULL, graph->end()); Zone zone; Visitor visitor(&zone, typing); for (Node* node : AllNodes(&zone, graph).live) visitor.Check(node); @@ -868,10 +868,10 @@ void ScheduleVerifier::Run(Schedule* schedule) { BasicBlock* dom = block->dominator(); if (b == 0) { // All blocks except start should have a dominator. - CHECK_NULL(dom); + CHECK_EQ(NULL, dom); } else { // Check that the immediate dominator appears somewhere before the block. - CHECK_NOT_NULL(dom); + CHECK_NE(NULL, dom); CHECK_LT(dom->rpo_number(), block->rpo_number()); } } diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc index 2006a79..05907a9 100644 --- a/src/compiler/zone-pool.cc +++ b/src/compiler/zone-pool.cc @@ -106,7 +106,7 @@ Zone* ZonePool::NewEmptyZone() { zone = new Zone(); } used_.push_back(zone); - DCHECK_EQ(0u, zone->allocation_size()); + DCHECK_EQ(0, zone->allocation_size()); return zone; } @@ -129,7 +129,7 @@ void ZonePool::ReturnZone(Zone* zone) { delete zone; } else { zone->DeleteAll(); - DCHECK_EQ(0u, zone->allocation_size()); + DCHECK_EQ(0, zone->allocation_size()); unused_.push_back(zone); } } diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc index 4fc36d1..7ddd928 100644 --- a/src/deoptimizer.cc +++ b/src/deoptimizer.cc @@ -110,7 +110,7 @@ size_t Deoptimizer::GetMaxDeoptTableSize() { Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { Deoptimizer* result = isolate->deoptimizer_data()->current_; - CHECK_NOT_NULL(result); + CHECK_NE(result, NULL); result->DeleteFrameDescriptions(); isolate->deoptimizer_data()->current_ = NULL; return result; @@ -901,7 +901,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, bool is_bottommost = (0 == frame_index); bool is_topmost = (output_count_ - 1 == frame_index); CHECK(frame_index >= 0 && frame_index < output_count_); - CHECK_NULL(output_[frame_index]); + CHECK_EQ(output_[frame_index], NULL); output_[frame_index] = output_frame; // The top address for the bottommost output frame can be computed from @@ -1060,7 +1060,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, output_offset -= kPointerSize; DoTranslateCommand(iterator, frame_index, output_offset); } - CHECK_EQ(0u, output_offset); + CHECK_EQ(0, output_offset); // Compute this frame's PC, state, and continuation. Code* non_optimized_code = function->shared()->code(); @@ -1382,7 +1382,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, top_address + output_offset, output_offset, value); } - CHECK_EQ(0u, output_offset); + CHECK_EQ(0, output_offset); intptr_t pc = reinterpret_cast( construct_stub->instruction_start() + @@ -1429,7 +1429,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, // A frame for an accessor stub can not be the topmost or bottommost one. CHECK(frame_index > 0 && frame_index < output_count_ - 1); - CHECK_NULL(output_[frame_index]); + CHECK_EQ(output_[frame_index], NULL); output_[frame_index] = output_frame; // The top address of the frame is computed from the previous frame's top and @@ -1522,7 +1522,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, DoTranslateCommand(iterator, frame_index, output_offset); } - CHECK_EQ(0u, output_offset); + CHECK_EQ(output_offset, 0); Smi* offset = is_setter_stub_frame ? isolate_->heap()->setter_stub_deopt_pc_offset() : @@ -1735,7 +1735,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, } } - CHECK_EQ(0u, output_frame_offset); + CHECK_EQ(output_frame_offset, 0); if (!arg_count_known) { CHECK_GE(arguments_length_offset, 0); diff --git a/src/deoptimizer.h b/src/deoptimizer.h index 47412f4..b14c369 100644 --- a/src/deoptimizer.h +++ b/src/deoptimizer.h @@ -95,10 +95,11 @@ class Deoptimizer : public Malloced { SOFT, // This last bailout type is not really a bailout, but used by the // debugger to deoptimize stack frames to allow inspection. - DEBUGGER, - kBailoutTypesWithCodeEntry = SOFT + 1 + DEBUGGER }; + static const int kBailoutTypesWithCodeEntry = SOFT + 1; + struct Reason { Reason(int r, const char* m, const char* d) : raw_position(r), mnemonic(m), detail(d) {} diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 9d014a0..c76f691 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -5461,7 +5461,7 @@ bool Heap::CreateHeapObjects() { // Create initial objects CreateInitialObjects(); - CHECK_EQ(0u, gc_count_); + CHECK_EQ(0, gc_count_); set_native_contexts_list(undefined_value()); set_array_buffers_list(undefined_value()); diff --git a/src/heap/spaces.h b/src/heap/spaces.h index efb784c..e21876b 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -1276,13 +1276,13 @@ class AllocationInfo { INLINE(void set_top(Address top)) { SLOW_DCHECK(top == NULL || - (reinterpret_cast(top) & kHeapObjectTagMask) == 0); + (reinterpret_cast(top) & HeapObjectTagMask()) == 0); top_ = top; } INLINE(Address top()) const { SLOW_DCHECK(top_ == NULL || - (reinterpret_cast(top_) & kHeapObjectTagMask) == 0); + (reinterpret_cast(top_) & HeapObjectTagMask()) == 0); return top_; } @@ -1290,13 +1290,13 @@ class AllocationInfo { INLINE(void set_limit(Address limit)) { SLOW_DCHECK(limit == NULL || - (reinterpret_cast(limit) & kHeapObjectTagMask) == 0); + (reinterpret_cast(limit) & HeapObjectTagMask()) == 0); limit_ = limit; } INLINE(Address limit()) const { SLOW_DCHECK(limit_ == NULL || - (reinterpret_cast(limit_) & kHeapObjectTagMask) == + (reinterpret_cast(limit_) & HeapObjectTagMask()) == 0); return limit_; } diff --git a/src/hydrogen-check-elimination.cc b/src/hydrogen-check-elimination.cc index 3542fa6..1530fe1 100644 --- a/src/hydrogen-check-elimination.cc +++ b/src/hydrogen-check-elimination.cc @@ -373,7 +373,7 @@ class HCheckTable : public ZoneObject { instr->DeleteAndReplaceWith(entry->check_); INC_STAT(redundant_); } else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) { - DCHECK_NULL(entry->check_); + DCHECK_EQ(NULL, entry->check_); TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n", instr->id(), instr->block()->block_id())); instr->set_maps(entry->maps_->Copy(graph->zone())); @@ -684,14 +684,14 @@ class HCheckTable : public ZoneObject { bool compact = false; for (int i = 0; i < size_; i++) { HCheckTableEntry* entry = &entries_[i]; - DCHECK_NOT_NULL(entry->object_); + DCHECK(entry->object_ != NULL); if (phase_->aliasing_->MayAlias(entry->object_, object)) { entry->object_ = NULL; compact = true; } } if (compact) Compact(); - DCHECK_NULL(Find(object)); + DCHECK(Find(object) == NULL); } void Compact() { diff --git a/src/hydrogen.cc b/src/hydrogen.cc index 9771d3f..cbed39e 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -8721,7 +8721,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle function, case kCallApiGetter: // Receiver and prototype chain cannot have changed. DCHECK_EQ(0, argc); - DCHECK_NULL(receiver); + DCHECK_EQ(NULL, receiver); // Receiver is on expression stack. receiver = Pop(); Add(receiver); @@ -8731,7 +8731,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle function, is_store = true; // Receiver and prototype chain cannot have changed. DCHECK_EQ(1, argc); - DCHECK_NULL(receiver); + DCHECK_EQ(NULL, receiver); // Receiver and value are on expression stack. HValue* value = Pop(); receiver = Pop(); @@ -11812,7 +11812,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) { DCHECK(call->arguments()->length() == 2); - DCHECK_NOT_NULL(call->arguments()->at(1)->AsLiteral()); + DCHECK_NE(NULL, call->arguments()->at(1)->AsLiteral()); Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value())); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* date = Pop(); diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 1c74450..ca1afcd 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -1706,7 +1706,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { // If either is a Smi (we know that not both are), then they can only // be equal if the other is a HeapNumber. If so, use the slow case. STATIC_ASSERT(kSmiTag == 0); - DCHECK_EQ(static_cast(0), Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); __ mov(ecx, Immediate(kSmiTagMask)); __ and_(ecx, eax); __ test(ecx, edx); diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index af5fe45..81cd11f 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -3708,7 +3708,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NOT_NULL(args->at(1)->AsLiteral()); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4064,7 +4064,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NOT_NULL(args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index 854e5ed..3579281 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -4387,7 +4387,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), Immediate(to_map)); // Write barrier. - DCHECK_NOT_NULL(instr->temp()); + DCHECK_NE(instr->temp(), NULL); __ RecordWriteForMap(object_reg, to_map, new_map_reg, ToRegister(instr->temp()), kDontSaveFPRegs); diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc index 3af36fa..16e42ce 100644 --- a/src/ic/handler-compiler.cc +++ b/src/ic/handler-compiler.cc @@ -347,7 +347,7 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor( case LookupIterator::ACCESSOR: Handle info = Handle::cast(it->GetAccessors()); - DCHECK_NOT_NULL(info->getter()); + DCHECK_NE(NULL, info->getter()); GenerateLoadCallback(reg, info); } } diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h index 4d72366..72fc865 100644 --- a/src/ic/ic-state.h +++ b/src/ic/ic-state.h @@ -123,7 +123,8 @@ class BinaryOpICState FINAL BASE_EMBEDDED { return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_); } - enum { FIRST_TOKEN = Token::BIT_OR, LAST_TOKEN = Token::MOD }; + static const int FIRST_TOKEN = Token::BIT_OR; + static const int LAST_TOKEN = Token::MOD; Token::Value op() const { return op_; } OverwriteMode mode() const { return mode_; } diff --git a/src/ic/ic.cc b/src/ic/ic.cc index a821cd3..68c7cc2 100644 --- a/src/ic/ic.cc +++ b/src/ic/ic.cc @@ -2557,7 +2557,7 @@ MaybeHandle BinaryOpIC::Transition( target = stub.GetCode(); // Sanity check the generic stub. - DCHECK_NULL(target->FindFirstAllocationSite()); + DCHECK_EQ(NULL, target->FindFirstAllocationSite()); } set_target(*target); diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc index 4be0d5b..f15635c 100644 --- a/src/ic/x64/stub-cache-x64.cc +++ b/src/ic/x64/stub-cache-x64.cc @@ -30,7 +30,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm, : kPointerSizeLog2 == StubCache::kCacheIndexShift); ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; - DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry)); + DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry)); // The offset register holds the entry offset times four (due to masking // and shifting optimizations). ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); diff --git a/src/isolate.cc b/src/isolate.cc index 3479ae2..ea5e3a9 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -1560,7 +1560,7 @@ Isolate::ThreadDataTable::~ThreadDataTable() { // TODO(svenpanne) The assertion below would fire if an embedder does not // cleanly dispose all Isolates before disposing v8, so we are conservative // and leave it out for now. - // DCHECK_NULL(list_); + // DCHECK_EQ(NULL, list_); } diff --git a/src/jsregexp.cc b/src/jsregexp.cc index 63c7a50..1843597 100644 --- a/src/jsregexp.cc +++ b/src/jsregexp.cc @@ -3446,14 +3446,14 @@ int ChoiceNode::GreedyLoopTextLengthForAlternative( void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) { - DCHECK_NULL(loop_node_); + DCHECK_EQ(loop_node_, NULL); AddAlternative(alt); loop_node_ = alt.node(); } void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) { - DCHECK_NULL(continue_node_); + DCHECK_EQ(continue_node_, NULL); AddAlternative(alt); continue_node_ = alt.node(); } @@ -3473,7 +3473,7 @@ void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) { macro_assembler->GoTo(trace->loop_label()); return; } - DCHECK_NULL(trace->stop_node()); + DCHECK(trace->stop_node() == NULL); if (!trace->is_trivial()) { trace->Flush(compiler, this); return; @@ -5294,8 +5294,8 @@ void CharacterRange::Split(ZoneList* base, ZoneList** included, ZoneList** excluded, Zone* zone) { - DCHECK_NULL(*included); - DCHECK_NULL(*excluded); + DCHECK_EQ(NULL, *included); + DCHECK_EQ(NULL, *excluded); DispatchTable table(zone); for (int i = 0; i < base->length(); i++) table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone); diff --git a/src/jsregexp.h b/src/jsregexp.h index 0b4f39d..d74b3bc 100644 --- a/src/jsregexp.h +++ b/src/jsregexp.h @@ -239,7 +239,7 @@ class CharacterRange { public: CharacterRange() : from_(0), to_(0) { } // For compatibility with the CHECK_OK macro - CharacterRange(void* null) { DCHECK_NULL(null); } // NOLINT + CharacterRange(void* null) { DCHECK_EQ(NULL, null); } //NOLINT CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { } static void AddClassEscape(uc16 type, ZoneList* ranges, Zone* zone); diff --git a/src/log.cc b/src/log.cc index 3f1c970..31460b6 100644 --- a/src/log.cc +++ b/src/log.cc @@ -271,7 +271,7 @@ PerfBasicLogger::PerfBasicLogger() CHECK_NE(size, -1); perf_output_handle_ = base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode); - CHECK_NOT_NULL(perf_output_handle_); + CHECK_NE(perf_output_handle_, NULL); setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize); } diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc index dc85538..b9c34ca 100644 --- a/src/mips/full-codegen-mips.cc +++ b/src/mips/full-codegen-mips.cc @@ -3795,7 +3795,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NOT_NULL(args->at(1)->AsLiteral()); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4161,7 +4161,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NOT_NULL(args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/mips64/full-codegen-mips64.cc b/src/mips64/full-codegen-mips64.cc index 00791c0..f86de38 100644 --- a/src/mips64/full-codegen-mips64.cc +++ b/src/mips64/full-codegen-mips64.cc @@ -3794,7 +3794,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NOT_NULL(args->at(1)->AsLiteral()); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4161,7 +4161,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NOT_NULL(args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/objects-debug.cc b/src/objects-debug.cc index a8cbe9c..e6c231e 100644 --- a/src/objects-debug.cc +++ b/src/objects-debug.cc @@ -207,7 +207,7 @@ void HeapObject::VerifyHeapPointer(Object* p) { void Symbol::SymbolVerify() { CHECK(IsSymbol()); CHECK(HasHashCode()); - CHECK_GT(Hash(), 0u); + CHECK_GT(Hash(), 0); CHECK(name()->IsUndefined() || name()->IsString()); CHECK(flags()->IsSmi()); } diff --git a/src/objects-inl.h b/src/objects-inl.h index b097bb1..a0e2a38 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -2951,7 +2951,7 @@ int LinearSearch(T* array, Name* name, int len, int valid_entries, return T::kNotFound; } else { DCHECK(len >= valid_entries); - DCHECK_NULL(out_insertion_index); // Not supported here. + DCHECK_EQ(NULL, out_insertion_index); // Not supported here. for (int number = 0; number < valid_entries; number++) { Name* entry = array->GetKey(number); uint32_t current_hash = entry->Hash(); diff --git a/src/objects.cc b/src/objects.cc index ac1a1b2..94a1228 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -9589,7 +9589,7 @@ FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) { FixedArray* code_map = FixedArray::cast(optimized_code_map()); if (!bound()) { FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1)); - DCHECK_NOT_NULL(cached_literals); + DCHECK_NE(NULL, cached_literals); return cached_literals; } return NULL; @@ -9600,7 +9600,7 @@ Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) { DCHECK(index > kEntriesStart); FixedArray* code_map = FixedArray::cast(optimized_code_map()); Code* code = Code::cast(code_map->get(index)); - DCHECK_NOT_NULL(code); + DCHECK_NE(NULL, code); return code; } diff --git a/src/objects.h b/src/objects.h index 058f85d..93c3928 100644 --- a/src/objects.h +++ b/src/objects.h @@ -4791,7 +4791,7 @@ template class FixedTypedArray: public FixedTypedArrayBase { public: typedef typename Traits::ElementType ElementType; - enum { kInstanceType = Traits::kInstanceType }; + static const InstanceType kInstanceType = Traits::kInstanceType; DECLARE_CAST(FixedTypedArray) @@ -4823,17 +4823,17 @@ class FixedTypedArray: public FixedTypedArrayBase { DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray); }; -#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \ - class Type##ArrayTraits { \ - public: /* NOLINT */ \ - typedef elementType ElementType; \ - enum { kInstanceType = FIXED_##TYPE##_ARRAY_TYPE }; \ - static const char* Designator() { return #type " array"; } \ - static inline Handle ToHandle(Isolate* isolate, \ - elementType scalar); \ - static inline elementType defaultValue(); \ - }; \ - \ +#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \ + class Type##ArrayTraits { \ + public: /* NOLINT */ \ + typedef elementType ElementType; \ + static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \ + static const char* Designator() { return #type " array"; } \ + static inline Handle ToHandle(Isolate* isolate, \ + elementType scalar); \ + static inline elementType defaultValue(); \ + }; \ + \ typedef FixedTypedArray Fixed##Type##Array; TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS) diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc index 5999df9..ed1e56a 100644 --- a/src/optimizing-compiler-thread.cc +++ b/src/optimizing-compiler-thread.cc @@ -102,7 +102,7 @@ OptimizingCompilerThread::~OptimizingCompilerThread() { if (FLAG_concurrent_osr) { #ifdef DEBUG for (int i = 0; i < osr_buffer_capacity_; i++) { - CHECK_NULL(osr_buffer_[i]); + CHECK_EQ(NULL, osr_buffer_[i]); } #endif DeleteArray(osr_buffer_); @@ -178,7 +178,7 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) { return NULL; } OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; - DCHECK_NOT_NULL(job); + DCHECK_NE(NULL, job); input_queue_shift_ = InputQueueIndex(1); input_queue_length_--; if (flag) { @@ -189,7 +189,7 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) { void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) { - DCHECK_NOT_NULL(job); + DCHECK_NE(NULL, job); // The function may have already been optimized by OSR. Simply continue. OptimizedCompileJob::Status status = job->OptimizeGraph(); diff --git a/src/perf-jit.cc b/src/perf-jit.cc index 819fe4e..3f30e38 100644 --- a/src/perf-jit.cc +++ b/src/perf-jit.cc @@ -57,7 +57,7 @@ PerfJitLogger::PerfJitLogger() : perf_output_handle_(NULL), code_index_(0) { CHECK_NE(size, -1); perf_output_handle_ = base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode); - CHECK_NOT_NULL(perf_output_handle_); + CHECK_NE(perf_output_handle_, NULL); setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize); LogWriteHeader(); diff --git a/src/ppc/full-codegen-ppc.cc b/src/ppc/full-codegen-ppc.cc index e926d6e..a762d00 100644 --- a/src/ppc/full-codegen-ppc.cc +++ b/src/ppc/full-codegen-ppc.cc @@ -3753,7 +3753,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NOT_NULL(args->at(1)->AsLiteral()); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4089,7 +4089,7 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NOT_NULL(args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc index d1c1f30..0ba1fa6 100644 --- a/src/runtime/runtime-array.cc +++ b/src/runtime/runtime-array.cc @@ -851,7 +851,7 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) { case FAST_HOLEY_ELEMENTS: case FAST_ELEMENTS: case DICTIONARY_ELEMENTS: - DCHECK_EQ(0u, length); + DCHECK_EQ(0, length); break; default: UNREACHABLE(); diff --git a/src/serialize.cc b/src/serialize.cc index f4849f3..0c9df8e 100644 --- a/src/serialize.cc +++ b/src/serialize.cc @@ -95,12 +95,12 @@ void ExternalReferenceTable::Add(Address address, TypeCode type, uint16_t id, const char* name) { - DCHECK_NOT_NULL(address); + DCHECK_NE(NULL, address); ExternalReferenceEntry entry; entry.address = address; entry.code = EncodeExternal(type, id); entry.name = name; - DCHECK_NE(0u, entry.code); + DCHECK_NE(0, entry.code); // Assert that the code is added in ascending order to rule out duplicates. DCHECK((size() == 0) || (code(size() - 1) < entry.code)); refs_.Add(entry); @@ -647,10 +647,10 @@ bool Deserializer::ReserveSpace() { void Deserializer::Initialize(Isolate* isolate) { - DCHECK_NULL(isolate_); - DCHECK_NOT_NULL(isolate); + DCHECK_EQ(NULL, isolate_); + DCHECK_NE(NULL, isolate); isolate_ = isolate; - DCHECK_NULL(external_reference_decoder_); + DCHECK_EQ(NULL, external_reference_decoder_); external_reference_decoder_ = new ExternalReferenceDecoder(isolate); } @@ -659,7 +659,7 @@ void Deserializer::Deserialize(Isolate* isolate) { Initialize(isolate); if (!ReserveSpace()) FatalProcessOutOfMemory("deserializing context"); // No active threads. - DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse()); + DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); // No active handles. DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); isolate_->heap()->IterateSmiRoots(this); @@ -925,7 +925,7 @@ Address Deserializer::Allocate(int space_index, int size) { } else { DCHECK(space_index < kNumberOfPreallocatedSpaces); Address address = high_water_[space_index]; - DCHECK_NOT_NULL(address); + DCHECK_NE(NULL, address); high_water_[space_index] += size; #ifdef DEBUG // Assert that the current reserved chunk is still big enough. @@ -1366,7 +1366,7 @@ Serializer::~Serializer() { void StartupSerializer::SerializeStrongReferences() { Isolate* isolate = this->isolate(); // No active threads. - CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse()); + CHECK_EQ(NULL, isolate->thread_manager()->FirstThreadStateInUse()); // No active or weak handles. CHECK(isolate->handle_scope_implementer()->blocks()->is_empty()); CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); diff --git a/src/serialize.h b/src/serialize.h index 9aaf381..cc267aa 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -275,7 +275,7 @@ class BackReferenceMap : public AddressMapBase { void Add(HeapObject* obj, BackReference b) { DCHECK(b.is_valid()); - DCHECK_NULL(LookupEntry(map_, obj, false)); + DCHECK_EQ(NULL, LookupEntry(map_, obj, false)); HashMap::Entry* entry = LookupEntry(map_, obj, true); SetValue(entry, b.bitfield()); } @@ -307,7 +307,7 @@ class HotObjectsList { } HeapObject* Get(int index) { - DCHECK_NOT_NULL(circular_queue_[index]); + DCHECK_NE(NULL, circular_queue_[index]); return circular_queue_[index]; } diff --git a/src/unique.h b/src/unique.h index b56ee84..321eb36 100644 --- a/src/unique.h +++ b/src/unique.h @@ -49,7 +49,7 @@ class Unique { // TODO(titzer): other immortable immovable objects are also fine. DCHECK(!AllowHeapAllocation::IsAllowed() || handle->IsMap()); raw_address_ = reinterpret_cast
(*handle); - DCHECK_NOT_NULL(raw_address_); // Non-null should imply non-zero address. + DCHECK_NE(raw_address_, NULL); // Non-null should imply non-zero address. } handle_ = handle; } diff --git a/src/v8.h b/src/v8.h index 17398ed..4922a4d 100644 --- a/src/v8.h +++ b/src/v8.h @@ -73,7 +73,7 @@ class V8 : public AllStatic { } static void SetArrayBufferAllocator(v8::ArrayBuffer::Allocator *allocator) { - CHECK_NULL(array_buffer_allocator_); + CHECK_EQ(NULL, array_buffer_allocator_); array_buffer_allocator_ = allocator; } diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc index ded6c33..75adc89 100644 --- a/src/x64/disasm-x64.cc +++ b/src/x64/disasm-x64.cc @@ -803,7 +803,7 @@ int DisassemblerX64::ShiftInstruction(byte* data) { UnimplementedInstruction(); return count + 1; } - DCHECK_NOT_NULL(mnem); + DCHECK_NE(NULL, mnem); AppendToBuffer("%s%c ", mnem, operand_size_code()); } count += PrintRightOperand(data + count); diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index db49e75..acdb08f 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -3706,7 +3706,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NOT_NULL(args->at(1)->AsLiteral()); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4058,7 +4058,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NOT_NULL(args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index d81a00f..3a477fc 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -2198,7 +2198,7 @@ void MacroAssembler::SelectNonSmi(Register dst, Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi); #endif STATIC_ASSERT(kSmiTag == 0); - DCHECK_EQ(static_cast(0), Smi::FromInt(0)); + DCHECK_EQ(0, Smi::FromInt(0)); movl(kScratchRegister, Immediate(kSmiTagMask)); andp(kScratchRegister, src1); testl(kScratchRegister, src2); diff --git a/src/x87/full-codegen-x87.cc b/src/x87/full-codegen-x87.cc index 9680974..b6b501b 100644 --- a/src/x87/full-codegen-x87.cc +++ b/src/x87/full-codegen-x87.cc @@ -3648,7 +3648,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitDateField(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK(args->length() == 2); - DCHECK_NOT_NUL(args->at(1)->AsLiteral()); + DCHECK_NE(NULL, args->at(1)->AsLiteral()); Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value())); VisitForAccumulatorValue(args->at(0)); // Load the object. @@ -4003,7 +4003,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); DCHECK_EQ(2, args->length()); - DCHECK_NOT_NULL(args->at(0)->AsLiteral()); + DCHECK_NE(NULL, args->at(0)->AsLiteral()); int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value(); Handle jsfunction_result_caches( diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp index fc98256..de5fedb 100644 --- a/test/cctest/cctest.gyp +++ b/test/cctest/cctest.gyp @@ -103,6 +103,7 @@ 'test-bignum.cc', 'test-bignum-dtoa.cc', 'test-bit-vector.cc', + 'test-checks.cc', 'test-circular-queue.cc', 'test-compiler.cc', 'test-constantpool.cc', diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h index 2f33d13..81a0dd9 100644 --- a/test/cctest/cctest.h +++ b/test/cctest/cctest.h @@ -440,7 +440,7 @@ static inline void ExpectString(const char* code, const char* expected) { v8::Local result = CompileRun(code); CHECK(result->IsString()); v8::String::Utf8Value utf8(result); - CHECK_EQ(0, strcmp(expected, *utf8)); + CHECK_EQ(expected, *utf8); } @@ -557,7 +557,7 @@ class HeapObjectsTracker { public: HeapObjectsTracker() { heap_profiler_ = i::Isolate::Current()->heap_profiler(); - CHECK_NOT_NULL(heap_profiler_); + CHECK_NE(NULL, heap_profiler_); heap_profiler_->StartHeapObjectsTracking(true); } diff --git a/test/cctest/compiler/codegen-tester.cc b/test/cctest/compiler/codegen-tester.cc index d05b282..b20da87 100644 --- a/test/cctest/compiler/codegen-tester.cc +++ b/test/cctest/compiler/codegen-tester.cc @@ -373,9 +373,9 @@ void Int32BinopInputShapeTester::RunRight( TEST(ParametersEqual) { RawMachineAssemblerTester m(kMachInt32, kMachInt32); Node* p1 = m.Parameter(1); - CHECK(p1); + CHECK_NE(NULL, p1); Node* p0 = m.Parameter(0); - CHECK(p0); + CHECK_NE(NULL, p0); CHECK_EQ(p0, m.Parameter(0)); CHECK_EQ(p1, m.Parameter(1)); } @@ -561,7 +561,7 @@ TEST(RunBinopTester) { Float64BinopTester bt(&m); bt.AddReturn(bt.param0); - FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(*i, 9.0)); } + FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 9.0)); } } { @@ -569,7 +569,7 @@ TEST(RunBinopTester) { Float64BinopTester bt(&m); bt.AddReturn(bt.param1); - FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(-11.25, *i)); } + FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(-11.25, *i)); } } } diff --git a/test/cctest/compiler/codegen-tester.h b/test/cctest/compiler/codegen-tester.h index d45d1fd..18903fd 100644 --- a/test/cctest/compiler/codegen-tester.h +++ b/test/cctest/compiler/codegen-tester.h @@ -332,16 +332,6 @@ class Int32BinopInputShapeTester { void RunLeft(RawMachineAssemblerTester* m); void RunRight(RawMachineAssemblerTester* m); }; - -// TODO(bmeurer): Drop this crap once we switch to GTest/Gmock. -static inline void CheckDoubleEq(volatile double x, volatile double y) { - if (std::isnan(x)) { - CHECK(std::isnan(y)); - } else { - CHECK_EQ(x, y); - } -} - } // namespace compiler } // namespace internal } // namespace v8 diff --git a/test/cctest/compiler/function-tester.h b/test/cctest/compiler/function-tester.h index 7cb118a..7e16eea 100644 --- a/test/cctest/compiler/function-tester.h +++ b/test/cctest/compiler/function-tester.h @@ -36,7 +36,7 @@ class FunctionTester : public InitializedHandleScope { const uint32_t supported_flags = CompilationInfo::kContextSpecializing | CompilationInfo::kInliningEnabled | CompilationInfo::kTypingEnabled; - CHECK_EQ(0u, flags_ & ~supported_flags); + CHECK_EQ(0, flags_ & ~supported_flags); } explicit FunctionTester(Graph* graph) diff --git a/test/cctest/compiler/graph-builder-tester.cc b/test/cctest/compiler/graph-builder-tester.cc index 38bc633..de39410 100644 --- a/test/cctest/compiler/graph-builder-tester.cc +++ b/test/cctest/compiler/graph-builder-tester.cc @@ -21,7 +21,7 @@ MachineCallHelper::MachineCallHelper(Isolate* isolate, void MachineCallHelper::InitParameters(GraphBuilder* builder, CommonOperatorBuilder* common) { - DCHECK(!parameters_); + DCHECK_EQ(NULL, parameters_); graph_ = builder->graph(); int param_count = static_cast(parameter_count()); if (param_count == 0) return; @@ -46,7 +46,7 @@ byte* MachineCallHelper::Generate() { Node* MachineCallHelper::Parameter(size_t index) { - DCHECK(parameters_); + DCHECK_NE(NULL, parameters_); DCHECK(index < parameter_count()); return parameters_[index]; } diff --git a/test/cctest/compiler/test-basic-block-profiler.cc b/test/cctest/compiler/test-basic-block-profiler.cc index fa4da9a..703fc17 100644 --- a/test/cctest/compiler/test-basic-block-profiler.cc +++ b/test/cctest/compiler/test-basic-block-profiler.cc @@ -24,7 +24,7 @@ class BasicBlockProfilerTest : public RawMachineAssemblerTester { void ResetCounts() { isolate()->basic_block_profiler()->ResetCounts(); } void Expect(size_t size, uint32_t* expected) { - CHECK(isolate()->basic_block_profiler()); + CHECK_NE(NULL, isolate()->basic_block_profiler()); const BasicBlockProfiler::DataList* l = isolate()->basic_block_profiler()->data_list(); CHECK_NE(0, static_cast(l->size())); diff --git a/test/cctest/compiler/test-changes-lowering.cc b/test/cctest/compiler/test-changes-lowering.cc index c416420..2c76461 100644 --- a/test/cctest/compiler/test-changes-lowering.cc +++ b/test/cctest/compiler/test-changes-lowering.cc @@ -242,13 +242,13 @@ TEST(RunChangeTaggedToFloat64) { { Handle number = t.factory()->NewNumber(input); t.Call(*number); - CheckDoubleEq(input, result); + CHECK_EQ(input, result); } { Handle number = t.factory()->NewHeapNumber(input); t.Call(*number); - CheckDoubleEq(input, result); + CHECK_EQ(input, result); } } } diff --git a/test/cctest/compiler/test-control-reducer.cc b/test/cctest/compiler/test-control-reducer.cc index 84b6b4b..5f56a8f 100644 --- a/test/cctest/compiler/test-control-reducer.cc +++ b/test/cctest/compiler/test-control-reducer.cc @@ -221,7 +221,7 @@ TEST(Trim1_dead) { CHECK(IsUsedBy(T.start, T.p0)); T.Trim(); CHECK(!IsUsedBy(T.start, T.p0)); - CHECK(!T.p0->InputAt(0)); + CHECK_EQ(NULL, T.p0->InputAt(0)); } @@ -252,9 +252,9 @@ TEST(Trim2_dead) { CHECK(!IsUsedBy(T.one, phi)); CHECK(!IsUsedBy(T.half, phi)); CHECK(!IsUsedBy(T.start, phi)); - CHECK(!phi->InputAt(0)); - CHECK(!phi->InputAt(1)); - CHECK(!phi->InputAt(2)); + CHECK_EQ(NULL, phi->InputAt(0)); + CHECK_EQ(NULL, phi->InputAt(1)); + CHECK_EQ(NULL, phi->InputAt(2)); } @@ -274,7 +274,7 @@ TEST(Trim_chain1) { T.Trim(); for (int i = 0; i < kDepth; i++) { CHECK(!IsUsedBy(live[i], dead[i])); - CHECK(!dead[i]->InputAt(0)); + CHECK_EQ(NULL, dead[i]->InputAt(0)); CHECK_EQ(i == 0 ? T.start : live[i - 1], live[i]->InputAt(0)); } } @@ -354,9 +354,9 @@ TEST(Trim_cycle2) { CHECK(!IsUsedBy(loop, phi)); CHECK(!IsUsedBy(T.one, phi)); CHECK(!IsUsedBy(T.half, phi)); - CHECK(!phi->InputAt(0)); - CHECK(!phi->InputAt(1)); - CHECK(!phi->InputAt(2)); + CHECK_EQ(NULL, phi->InputAt(0)); + CHECK_EQ(NULL, phi->InputAt(1)); + CHECK_EQ(NULL, phi->InputAt(2)); } @@ -365,8 +365,8 @@ void CheckTrimConstant(ControlReducerTester* T, Node* k) { CHECK(IsUsedBy(k, phi)); T->Trim(); CHECK(!IsUsedBy(k, phi)); - CHECK(!phi->InputAt(0)); - CHECK(!phi->InputAt(1)); + CHECK_EQ(NULL, phi->InputAt(0)); + CHECK_EQ(NULL, phi->InputAt(1)); } @@ -954,7 +954,7 @@ TEST(CMergeReduce_dead_chain1) { R.graph.SetEnd(end); R.ReduceGraph(); CHECK(merge->IsDead()); - CHECK(!end->InputAt(0)); // end dies. + CHECK_EQ(NULL, end->InputAt(0)); // end dies. } } diff --git a/test/cctest/compiler/test-instruction.cc b/test/cctest/compiler/test-instruction.cc index a884d28..47e6610 100644 --- a/test/cctest/compiler/test-instruction.cc +++ b/test/cctest/compiler/test-instruction.cc @@ -135,7 +135,7 @@ TEST(InstructionBasic) { for (auto block : *blocks) { CHECK_EQ(block->rpo_number(), R.BlockAt(block)->rpo_number().ToInt()); CHECK_EQ(block->id().ToInt(), R.BlockAt(block)->id().ToInt()); - CHECK(!block->loop_end()); + CHECK_EQ(NULL, block->loop_end()); } } @@ -278,7 +278,7 @@ TEST(InstructionAddGapMove) { R.code->AddGapMove(index, op1, op2); GapInstruction* gap = R.code->GapAt(index); ParallelMove* move = gap->GetParallelMove(GapInstruction::START); - CHECK(move); + CHECK_NE(NULL, move); const ZoneList* move_operands = move->move_operands(); CHECK_EQ(1, move_operands->length()); MoveOperands* cur = &move_operands->at(0); diff --git a/test/cctest/compiler/test-js-constant-cache.cc b/test/cctest/compiler/test-js-constant-cache.cc index 630f911..60cbb8d 100644 --- a/test/cctest/compiler/test-js-constant-cache.cc +++ b/test/cctest/compiler/test-js-constant-cache.cc @@ -103,10 +103,10 @@ TEST(MinusZeroConstant) { double zero_value = OpParameter(zero); double minus_zero_value = OpParameter(minus_zero); - CHECK(bit_cast(0.0) == bit_cast(zero_value)); - CHECK(bit_cast(-0.0) != bit_cast(zero_value)); - CHECK(bit_cast(0.0) != bit_cast(minus_zero_value)); - CHECK(bit_cast(-0.0) == bit_cast(minus_zero_value)); + CHECK_EQ(0.0, zero_value); + CHECK_NE(-0.0, zero_value); + CHECK_EQ(-0.0, minus_zero_value); + CHECK_NE(0.0, minus_zero_value); } diff --git a/test/cctest/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/test-js-typed-lowering.cc index cf252c4..e1b8a15 100644 --- a/test/cctest/compiler/test-js-typed-lowering.cc +++ b/test/cctest/compiler/test-js-typed-lowering.cc @@ -797,7 +797,7 @@ TEST(RemoveToNumberEffects) { } } - CHECK(!effect_use); // should have done all cases above. + CHECK_EQ(NULL, effect_use); // should have done all cases above. } diff --git a/test/cctest/compiler/test-linkage.cc b/test/cctest/compiler/test-linkage.cc index cb74deb..b21965d 100644 --- a/test/cctest/compiler/test-linkage.cc +++ b/test/cctest/compiler/test-linkage.cc @@ -62,7 +62,7 @@ TEST(TestLinkageJSFunctionIncoming) { Linkage linkage(info.zone(), &info); CallDescriptor* descriptor = linkage.GetIncomingDescriptor(); - CHECK(descriptor); + CHECK_NE(NULL, descriptor); CHECK_EQ(1 + i, static_cast(descriptor->JSParameterCount())); CHECK_EQ(1, static_cast(descriptor->ReturnCount())); @@ -78,7 +78,7 @@ TEST(TestLinkageCodeStubIncoming) { Linkage linkage(info.zone(), &info); // TODO(titzer): test linkage creation with a bonafide code stub. // this just checks current behavior. - CHECK(!linkage.GetIncomingDescriptor()); + CHECK_EQ(NULL, linkage.GetIncomingDescriptor()); } @@ -91,7 +91,7 @@ TEST(TestLinkageJSCall) { for (int i = 0; i < 32; i++) { CallDescriptor* descriptor = linkage.GetJSCallDescriptor(i, CallDescriptor::kNoFlags); - CHECK(descriptor); + CHECK_NE(NULL, descriptor); CHECK_EQ(i, static_cast(descriptor->JSParameterCount())); CHECK_EQ(1, static_cast(descriptor->ReturnCount())); CHECK_EQ(Operator::kNoProperties, descriptor->properties()); diff --git a/test/cctest/compiler/test-loop-analysis.cc b/test/cctest/compiler/test-loop-analysis.cc index 87746ec..71708aa 100644 --- a/test/cctest/compiler/test-loop-analysis.cc +++ b/test/cctest/compiler/test-loop-analysis.cc @@ -136,7 +136,7 @@ class LoopFinderTester : HandleAndZoneScope { void CheckLoop(Node** header, int header_count, Node** body, int body_count) { LoopTree* tree = GetLoopTree(); LoopTree::Loop* loop = tree->ContainingLoop(header[0]); - CHECK(loop); + CHECK_NE(NULL, loop); CHECK(header_count == static_cast(loop->HeaderSize())); for (int i = 0; i < header_count; i++) { @@ -164,7 +164,7 @@ class LoopFinderTester : HandleAndZoneScope { Node* header = chain[i]; // Each header should be in a loop. LoopTree::Loop* loop = tree->ContainingLoop(header); - CHECK(loop); + CHECK_NE(NULL, loop); // Check parentage. LoopTree::Loop* parent = i == 0 ? NULL : tree->ContainingLoop(chain[i - 1]); diff --git a/test/cctest/compiler/test-loop-assignment-analysis.cc b/test/cctest/compiler/test-loop-assignment-analysis.cc index 6f37964..aabd95b 100644 --- a/test/cctest/compiler/test-loop-assignment-analysis.cc +++ b/test/cctest/compiler/test-loop-assignment-analysis.cc @@ -37,18 +37,18 @@ struct TestHelper : public HandleAndZoneScope { Scope* scope = info.function()->scope(); AstValueFactory* factory = info.ast_value_factory(); - CHECK(scope); + CHECK_NE(NULL, scope); if (result == NULL) { AstLoopAssignmentAnalyzer analyzer(main_zone(), &info); result = analyzer.Analyze(); - CHECK(result); + CHECK_NE(NULL, result); } const i::AstRawString* name = factory->GetOneByteString(var_name); i::Variable* var = scope->Lookup(name); - CHECK(var); + CHECK_NE(NULL, var); if (var->location() == Variable::UNALLOCATED) { CHECK_EQ(0, expected); diff --git a/test/cctest/compiler/test-machine-operator-reducer.cc b/test/cctest/compiler/test-machine-operator-reducer.cc index 7ee5751..c460456 100644 --- a/test/cctest/compiler/test-machine-operator-reducer.cc +++ b/test/cctest/compiler/test-machine-operator-reducer.cc @@ -100,7 +100,7 @@ class ReducerTester : public HandleAndZoneScope { // the {expect} value. template void CheckFoldBinop(volatile T expect, Node* a, Node* b) { - CHECK(binop); + CHECK_NE(NULL, binop); Node* n = CreateBinopNode(a, b); MachineOperatorReducer reducer(&jsgraph); Reduction reduction = reducer.Reduce(n); @@ -112,7 +112,7 @@ class ReducerTester : public HandleAndZoneScope { // Check that the reduction of this binop applied to {a} and {b} yields // the {expect} node. void CheckBinop(Node* expect, Node* a, Node* b) { - CHECK(binop); + CHECK_NE(NULL, binop); Node* n = CreateBinopNode(a, b); MachineOperatorReducer reducer(&jsgraph); Reduction reduction = reducer.Reduce(n); @@ -124,7 +124,7 @@ class ReducerTester : public HandleAndZoneScope { // this binop applied to {left_expect} and {right_expect}. void CheckFoldBinop(Node* left_expect, Node* right_expect, Node* left, Node* right) { - CHECK(binop); + CHECK_NE(NULL, binop); Node* n = CreateBinopNode(left, right); MachineOperatorReducer reducer(&jsgraph); Reduction reduction = reducer.Reduce(n); @@ -139,7 +139,7 @@ class ReducerTester : public HandleAndZoneScope { template void CheckFoldBinop(volatile T left_expect, const Operator* op_expect, Node* right_expect, Node* left, Node* right) { - CHECK(binop); + CHECK_NE(NULL, binop); Node* n = CreateBinopNode(left, right); MachineOperatorReducer reducer(&jsgraph); Reduction r = reducer.Reduce(n); @@ -154,7 +154,7 @@ class ReducerTester : public HandleAndZoneScope { template void CheckFoldBinop(Node* left_expect, const Operator* op_expect, volatile T right_expect, Node* left, Node* right) { - CHECK(binop); + CHECK_NE(NULL, binop); Node* n = CreateBinopNode(left, right); MachineOperatorReducer reducer(&jsgraph); Reduction r = reducer.Reduce(n); @@ -723,6 +723,133 @@ TEST(ReduceLoadStore) { } +static void CheckNans(ReducerTester* R) { + Node* x = R->Parameter(); + std::vector nans = ValueHelper::nan_vector(); + for (std::vector::const_iterator pl = nans.begin(); pl != nans.end(); + ++pl) { + for (std::vector::const_iterator pr = nans.begin(); + pr != nans.end(); ++pr) { + Node* nan1 = R->Constant(*pl); + Node* nan2 = R->Constant(*pr); + R->CheckBinop(nan1, x, nan1); // x op NaN => NaN + R->CheckBinop(nan1, nan1, x); // NaN op x => NaN + R->CheckBinop(nan1, nan2, nan1); // NaN op NaN => NaN + } + } +} + + +TEST(ReduceFloat64Add) { + ReducerTester R; + R.binop = R.machine.Float64Add(); + + FOR_FLOAT64_INPUTS(pl) { + FOR_FLOAT64_INPUTS(pr) { + double x = *pl, y = *pr; + R.CheckFoldBinop(x + y, x, y); + } + } + + FOR_FLOAT64_INPUTS(i) { + Double tmp(*i); + if (!tmp.IsSpecial() || tmp.IsInfinite()) { + // Don't check NaNs as they are reduced more. + R.CheckPutConstantOnRight(*i); + } + } + + CheckNans(&R); +} + + +TEST(ReduceFloat64Sub) { + ReducerTester R; + R.binop = R.machine.Float64Sub(); + + FOR_FLOAT64_INPUTS(pl) { + FOR_FLOAT64_INPUTS(pr) { + double x = *pl, y = *pr; + R.CheckFoldBinop(x - y, x, y); + } + } + + Node* zero = R.Constant(0.0); + Node* x = R.Parameter(); + + R.CheckBinop(x, x, zero); // x - 0.0 => x + + CheckNans(&R); +} + + +TEST(ReduceFloat64Mul) { + ReducerTester R; + R.binop = R.machine.Float64Mul(); + + FOR_FLOAT64_INPUTS(pl) { + FOR_FLOAT64_INPUTS(pr) { + double x = *pl, y = *pr; + R.CheckFoldBinop(x * y, x, y); + } + } + + double inf = V8_INFINITY; + R.CheckPutConstantOnRight(-inf); + R.CheckPutConstantOnRight(-0.1); + R.CheckPutConstantOnRight(0.1); + R.CheckPutConstantOnRight(inf); + + Node* x = R.Parameter(); + Node* one = R.Constant(1.0); + + R.CheckBinop(x, x, one); // x * 1.0 => x + R.CheckBinop(x, one, x); // 1.0 * x => x + + CheckNans(&R); +} + + +TEST(ReduceFloat64Div) { + ReducerTester R; + R.binop = R.machine.Float64Div(); + + FOR_FLOAT64_INPUTS(pl) { + FOR_FLOAT64_INPUTS(pr) { + double x = *pl, y = *pr; + R.CheckFoldBinop(x / y, x, y); + } + } + + Node* x = R.Parameter(); + Node* one = R.Constant(1.0); + + R.CheckBinop(x, x, one); // x / 1.0 => x + + CheckNans(&R); +} + + +TEST(ReduceFloat64Mod) { + ReducerTester R; + R.binop = R.machine.Float64Mod(); + + FOR_FLOAT64_INPUTS(pl) { + FOR_FLOAT64_INPUTS(pr) { + double x = *pl, y = *pr; + R.CheckFoldBinop(modulo(x, y), x, y); + } + } + + Node* x = R.Parameter(); + Node* zero = R.Constant(0.0); + + R.CheckFoldBinop(std::numeric_limits::quiet_NaN(), x, zero); + + CheckNans(&R); +} + + // TODO(titzer): test MachineOperatorReducer for Word64And // TODO(titzer): test MachineOperatorReducer for Word64Or // TODO(titzer): test MachineOperatorReducer for Word64Xor @@ -743,8 +870,3 @@ TEST(ReduceLoadStore) { // TODO(titzer): test MachineOperatorReducer for ChangeInt32ToFloat64 // TODO(titzer): test MachineOperatorReducer for ChangeFloat64ToInt32 // TODO(titzer): test MachineOperatorReducer for Float64Compare -// TODO(titzer): test MachineOperatorReducer for Float64Add -// TODO(titzer): test MachineOperatorReducer for Float64Sub -// TODO(titzer): test MachineOperatorReducer for Float64Mul -// TODO(titzer): test MachineOperatorReducer for Float64Div -// TODO(titzer): test MachineOperatorReducer for Float64Mod diff --git a/test/cctest/compiler/test-node-cache.cc b/test/cctest/compiler/test-node-cache.cc index b11e859..835c028 100644 --- a/test/cctest/compiler/test-node-cache.cc +++ b/test/cctest/compiler/test-node-cache.cc @@ -17,7 +17,7 @@ TEST(Int32Constant_back_to_back) { for (int i = -2000000000; i < 2000000000; i += 3315177) { Node** pos = cache.Find(graph.zone(), i); - CHECK(pos); + CHECK_NE(NULL, pos); for (int j = 0; j < 3; j++) { Node** npos = cache.Find(graph.zone(), i); CHECK_EQ(pos, npos); @@ -80,7 +80,7 @@ TEST(Int64Constant_back_to_back) { for (int64_t i = -2000000000; i < 2000000000; i += 3315177) { Node** pos = cache.Find(graph.zone(), i); - CHECK(pos); + CHECK_NE(NULL, pos); for (int j = 0; j < 3; j++) { Node** npos = cache.Find(graph.zone(), i); CHECK_EQ(pos, npos); diff --git a/test/cctest/compiler/test-node.cc b/test/cctest/compiler/test-node.cc index 2c51e26..23238da 100644 --- a/test/cctest/compiler/test-node.cc +++ b/test/cctest/compiler/test-node.cc @@ -632,15 +632,15 @@ TEST(RemoveAllInputs) { n1->RemoveAllInputs(); CHECK_EQ(1, n1->InputCount()); CHECK_EQ(1, n0->UseCount()); - CHECK(!n1->InputAt(0)); + CHECK_EQ(NULL, n1->InputAt(0)); CHECK_EQ(1, n1->UseCount()); n2->RemoveAllInputs(); CHECK_EQ(2, n2->InputCount()); CHECK_EQ(0, n0->UseCount()); CHECK_EQ(0, n1->UseCount()); - CHECK(!n2->InputAt(0)); - CHECK(!n2->InputAt(1)); + CHECK_EQ(NULL, n2->InputAt(0)); + CHECK_EQ(NULL, n2->InputAt(1)); } { @@ -653,6 +653,6 @@ TEST(RemoveAllInputs) { n1->RemoveAllInputs(); CHECK_EQ(1, n1->InputCount()); CHECK_EQ(0, n1->UseCount()); - CHECK(!n1->InputAt(0)); + CHECK_EQ(NULL, n1->InputAt(0)); } } diff --git a/test/cctest/compiler/test-operator.cc b/test/cctest/compiler/test-operator.cc index e635da7..39f660f 100644 --- a/test/cctest/compiler/test-operator.cc +++ b/test/cctest/compiler/test-operator.cc @@ -80,14 +80,14 @@ TEST(TestOperator_Print) { Operator op1a(19, NONE, "Another1", 0, 0, 0, 0, 0, 0); Operator op1b(19, FOLD, "Another2", 2, 0, 0, 2, 0, 0); - CHECK_EQ(0, strcmp("Another1", OperatorToString(&op1a).get())); - CHECK_EQ(0, strcmp("Another2", OperatorToString(&op1b).get())); + CHECK_EQ("Another1", OperatorToString(&op1a).get()); + CHECK_EQ("Another2", OperatorToString(&op1b).get()); Operator op2a(20, NONE, "Flog1", 0, 0, 0, 0, 0, 0); Operator op2b(20, FOLD, "Flog2", 1, 0, 0, 1, 0, 0); - CHECK_EQ(0, strcmp("Flog1", OperatorToString(&op2a).get())); - CHECK_EQ(0, strcmp("Flog2", OperatorToString(&op2b).get())); + CHECK_EQ("Flog1", OperatorToString(&op2a).get()); + CHECK_EQ("Flog2", OperatorToString(&op2b).get()); } @@ -148,16 +148,16 @@ TEST(TestOperator1int_Equals) { TEST(TestOperator1int_Print) { Operator1 op1(12, NONE, "Op1Test", 0, 0, 0, 1, 0, 0, 0); - CHECK_EQ(0, strcmp("Op1Test[0]", OperatorToString(&op1).get())); + CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get()); Operator1 op2(12, NONE, "Op1Test", 0, 0, 0, 1, 0, 0, 66666666); - CHECK_EQ(0, strcmp("Op1Test[66666666]", OperatorToString(&op2).get())); + CHECK_EQ("Op1Test[66666666]", OperatorToString(&op2).get()); Operator1 op3(12, NONE, "FooBar", 0, 0, 0, 1, 0, 0, 2347); - CHECK_EQ(0, strcmp("FooBar[2347]", OperatorToString(&op3).get())); + CHECK_EQ("FooBar[2347]", OperatorToString(&op3).get()); Operator1 op4(12, NONE, "BarFoo", 0, 0, 0, 1, 0, 0, -879); - CHECK_EQ(0, strcmp("BarFoo[-879]", OperatorToString(&op4).get())); + CHECK_EQ("BarFoo[-879]", OperatorToString(&op4).get()); } @@ -179,8 +179,8 @@ TEST(TestOperator1doublePrint) { Operator1 op1a(23, NONE, "Canary", 0, 0, 0, 0, 0, 0, 0.5); Operator1 op1b(23, FOLD, "Finch", 2, 0, 0, 2, 0, 0, -1.5); - CHECK_EQ(0, strcmp("Canary[0.5]", OperatorToString(&op1a).get())); - CHECK_EQ(0, strcmp("Finch[-1.5]", OperatorToString(&op1b).get())); + CHECK_EQ("Canary[0.5]", OperatorToString(&op1a).get()); + CHECK_EQ("Finch[-1.5]", OperatorToString(&op1b).get()); } diff --git a/test/cctest/compiler/test-representation-change.cc b/test/cctest/compiler/test-representation-change.cc index 55f054a..f44648e 100644 --- a/test/cctest/compiler/test-representation-change.cc +++ b/test/cctest/compiler/test-representation-change.cc @@ -6,7 +6,6 @@ #include "src/v8.h" #include "test/cctest/cctest.h" -#include "test/cctest/compiler/codegen-tester.h" #include "test/cctest/compiler/graph-builder-tester.h" #include "test/cctest/compiler/value-helper.h" @@ -59,7 +58,7 @@ class RepresentationChangerTester : public HandleAndZoneScope, void CheckFloat64Constant(Node* n, double expected) { Float64Matcher m(n); CHECK(m.HasValue()); - CheckDoubleEq(expected, m.Value()); + CHECK_EQ(expected, m.Value()); } void CheckFloat32Constant(Node* n, float expected) { @@ -78,7 +77,7 @@ class RepresentationChangerTester : public HandleAndZoneScope, NumberMatcher m(n); CHECK_EQ(IrOpcode::kNumberConstant, n->opcode()); CHECK(m.HasValue()); - CheckDoubleEq(expected, m.Value()); + CHECK_EQ(expected, m.Value()); } Node* Parameter(int index = 0) { diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc index 2d20575..a5a6823 100644 --- a/test/cctest/compiler/test-run-machops.cc +++ b/test/cctest/compiler/test-run-machops.cc @@ -15,6 +15,10 @@ #if V8_TURBOFAN_TARGET using namespace v8::base; + +#define CHECK_UINT32_EQ(x, y) \ + CHECK_EQ(static_cast(x), static_cast(y)) + using namespace v8::internal; using namespace v8::internal::compiler; @@ -501,7 +505,7 @@ TEST(RunLoadStoreFloat64Offset) { p1 = *j; p2 = *j - 5; CHECK_EQ(magic, m.Call()); - CheckDoubleEq(p1, p2); + CHECK_EQ(p1, p2); } } } @@ -759,7 +763,7 @@ TEST(RunInt32AddInBranch) { static const int32_t constant = 987654321; { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)), @@ -777,7 +781,7 @@ TEST(RunInt32AddInBranch) { } { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32NotEqual(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)), @@ -806,7 +810,7 @@ TEST(RunInt32AddInBranch) { m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) == 0 ? constant : 0 - constant; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -823,7 +827,7 @@ TEST(RunInt32AddInBranch) { m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) != 0 ? constant : 0 - constant; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -881,7 +885,7 @@ TEST(RunInt32AddInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) == 0; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -893,7 +897,7 @@ TEST(RunInt32AddInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) == 0; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -904,7 +908,7 @@ TEST(RunInt32AddInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i + *j) == 0; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -915,7 +919,7 @@ TEST(RunInt32AddInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*j + *i) == 0; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -967,7 +971,7 @@ TEST(RunInt32SubP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = static_cast(*i - *j); - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -980,7 +984,7 @@ TEST(RunInt32SubImm) { m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i - *j; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -990,7 +994,7 @@ TEST(RunInt32SubImm) { m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i))); FOR_UINT32_INPUTS(j) { uint32_t expected = *j - *i; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1068,8 +1072,8 @@ TEST(RunInt32SubAndWord32ShrP) { FOR_UINT32_INPUTS(j) { FOR_UINT32_SHIFTS(shift) { // Use uint32_t because signed overflow is UB in C. - uint32_t expected = *i - (*j >> shift); - CHECK_EQ(expected, m.Call(*i, *j, shift)); + int32_t expected = *i - (*j >> shift); + CHECK_UINT32_EQ(expected, m.Call(*i, *j, shift)); } } } @@ -1083,7 +1087,7 @@ TEST(RunInt32SubAndWord32ShrP) { FOR_UINT32_SHIFTS(shift) { FOR_UINT32_INPUTS(k) { // Use uint32_t because signed overflow is UB in C. - uint32_t expected = (*i >> shift) - *k; + int32_t expected = (*i >> shift) - *k; CHECK_EQ(expected, m.Call(*i, shift, *k)); } } @@ -1096,7 +1100,7 @@ TEST(RunInt32SubInBranch) { static const int constant = 987654321; { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)), @@ -1114,7 +1118,7 @@ TEST(RunInt32SubInBranch) { } { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32NotEqual(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)), @@ -1142,7 +1146,7 @@ TEST(RunInt32SubInBranch) { m.Bind(&blockb); m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { - uint32_t expected = (*i - *j) == 0 ? constant : 0 - constant; + int32_t expected = (*i - *j) == 0 ? constant : 0 - constant; CHECK_EQ(expected, m.Call(*j)); } } @@ -1218,7 +1222,7 @@ TEST(RunInt32SubInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i - *j) == 0; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1230,7 +1234,7 @@ TEST(RunInt32SubInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i - *j) == 0; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1241,7 +1245,7 @@ TEST(RunInt32SubInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i - *j) == 0; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1252,7 +1256,7 @@ TEST(RunInt32SubInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*j - *i) == 0; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1314,7 +1318,7 @@ TEST(RunInt32MulP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i * *j; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1342,7 +1346,7 @@ TEST(RunInt32MulImm) { m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i * *j; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1352,7 +1356,7 @@ TEST(RunInt32MulImm) { m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i))); FOR_UINT32_INPUTS(j) { uint32_t expected = *j * *i; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1523,7 +1527,7 @@ TEST(RunUint32DivP) { uint32_t p0 = *i; uint32_t p1 = *j; if (p1 != 0) { - int32_t expected = bit_cast(p0 / p1); + uint32_t expected = static_cast(p0 / p1); CHECK_EQ(expected, bt.call(p0, p1)); } } @@ -1538,7 +1542,7 @@ TEST(RunUint32DivP) { uint32_t p0 = *i; uint32_t p1 = *j; if (p1 != 0) { - int32_t expected = bit_cast(p0 + (p0 / p1)); + uint32_t expected = static_cast(p0 + (p0 / p1)); CHECK_EQ(expected, bt.call(p0, p1)); } } @@ -1584,7 +1588,7 @@ TEST(RunInt32ModP) { TEST(RunUint32ModP) { { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); bt.AddReturn(m.Uint32Mod(bt.param0, bt.param1)); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { @@ -1599,7 +1603,7 @@ TEST(RunUint32ModP) { } { RawMachineAssemblerTester m; - Uint32BinopTester bt(&m); + Int32BinopTester bt(&m); bt.AddReturn(m.Int32Add(bt.param0, m.Uint32Mod(bt.param0, bt.param1))); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { @@ -1622,7 +1626,7 @@ TEST(RunWord32AndP) { bt.AddReturn(m.Word32And(bt.param0, bt.param1)); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { - int32_t expected = *i & *j; + uint32_t expected = *i & *j; CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -1633,7 +1637,7 @@ TEST(RunWord32AndP) { bt.AddReturn(m.Word32And(bt.param0, m.Word32Not(bt.param1))); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { - int32_t expected = *i & ~(*j); + uint32_t expected = *i & ~(*j); CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -1644,7 +1648,7 @@ TEST(RunWord32AndP) { bt.AddReturn(m.Word32And(m.Word32Not(bt.param0), bt.param1)); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { - int32_t expected = ~(*i) & *j; + uint32_t expected = ~(*i) & *j; CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -1661,7 +1665,7 @@ TEST(RunWord32AndAndWord32ShlP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i << (*j & 0x1f); - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1673,7 +1677,7 @@ TEST(RunWord32AndAndWord32ShlP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i << (0x1f & *j); - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1689,7 +1693,7 @@ TEST(RunWord32AndAndWord32ShrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i >> (*j & 0x1f); - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1701,7 +1705,7 @@ TEST(RunWord32AndAndWord32ShrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i >> (0x1f & *j); - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1728,7 +1732,7 @@ TEST(RunWord32AndAndWord32SarP) { m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1))); FOR_INT32_INPUTS(i) { FOR_INT32_INPUTS(j) { - int32_t expected = *i >> (0x1f & *j); + uint32_t expected = *i >> (0x1f & *j); CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -1743,7 +1747,7 @@ TEST(RunWord32AndImm) { m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i & *j; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1753,7 +1757,7 @@ TEST(RunWord32AndImm) { m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0)))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i & ~(*j); - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1764,7 +1768,7 @@ TEST(RunWord32AndInBranch) { static const int constant = 987654321; { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)), @@ -1782,7 +1786,7 @@ TEST(RunWord32AndInBranch) { } { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); MLabel blocka, blockb; m.Branch( m.Word32NotEqual(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)), @@ -1887,7 +1891,7 @@ TEST(RunWord32AndInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i & *j) == 0; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1899,7 +1903,7 @@ TEST(RunWord32AndInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i & *j) == 0; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1910,7 +1914,7 @@ TEST(RunWord32AndInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i & *j) == 0; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1921,7 +1925,7 @@ TEST(RunWord32AndInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*j & *i) == 0; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1936,7 +1940,7 @@ TEST(RunWord32OrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i | *j; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1947,7 +1951,7 @@ TEST(RunWord32OrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = *i | ~(*j); - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1958,7 +1962,7 @@ TEST(RunWord32OrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = ~(*i) | *j; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -1972,7 +1976,7 @@ TEST(RunWord32OrImm) { m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i | *j; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -1982,7 +1986,7 @@ TEST(RunWord32OrImm) { m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0)))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i | ~(*j); - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2109,7 +2113,7 @@ TEST(RunWord32OrInBranch) { TEST(RunWord32OrInComparison) { { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); bt.AddReturn( m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0))); FOR_UINT32_INPUTS(i) { @@ -2121,7 +2125,7 @@ TEST(RunWord32OrInComparison) { } { RawMachineAssemblerTester m; - Int32BinopTester bt(&m); + Uint32BinopTester bt(&m); bt.AddReturn( m.Word32Equal(m.Int32Constant(0), m.Word32Or(bt.param0, bt.param1))); FOR_UINT32_INPUTS(i) { @@ -2138,7 +2142,7 @@ TEST(RunWord32OrInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i | *j) == 0; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2149,7 +2153,7 @@ TEST(RunWord32OrInComparison) { m.Int32Constant(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = (*j | *i) == 0; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2159,11 +2163,11 @@ TEST(RunWord32OrInComparison) { TEST(RunWord32XorP) { { FOR_UINT32_INPUTS(i) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i ^ *j; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2173,8 +2177,8 @@ TEST(RunWord32XorP) { bt.AddReturn(m.Word32Xor(bt.param0, bt.param1)); FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { - uint32_t expected = *i ^ *j; - CHECK_EQ(expected, bt.call(*i, *j)); + int32_t expected = *i ^ *j; + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -2206,7 +2210,7 @@ TEST(RunWord32XorP) { m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0)))); FOR_UINT32_INPUTS(j) { uint32_t expected = *i ^ ~(*j); - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2229,7 +2233,7 @@ TEST(RunWord32XorInBranch) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -2247,7 +2251,7 @@ TEST(RunWord32XorInBranch) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant; - CHECK_EQ(expected, bt.call(*i, *j)); + CHECK_UINT32_EQ(expected, bt.call(*i, *j)); } } } @@ -2264,7 +2268,7 @@ TEST(RunWord32XorInBranch) { m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2282,7 +2286,7 @@ TEST(RunWord32XorInBranch) { m.Return(m.Int32Constant(0 - constant)); FOR_UINT32_INPUTS(j) { uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2338,7 +2342,7 @@ TEST(RunWord32ShlP) { m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift))); FOR_UINT32_INPUTS(j) { uint32_t expected = *j << shift; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2349,7 +2353,7 @@ TEST(RunWord32ShlP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = *i << shift; - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } } @@ -2365,7 +2369,7 @@ TEST(RunWord32ShlInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == (*i << shift); - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } } @@ -2377,31 +2381,31 @@ TEST(RunWord32ShlInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == (*i << shift); - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Int32Constant(0), m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == (*i << shift); - CHECK_EQ(expected, m.Call(*i)); + CHECK_UINT32_EQ(expected, m.Call(*i)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)), m.Int32Constant(0))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == (*i << shift); - CHECK_EQ(expected, m.Call(*i)); + CHECK_UINT32_EQ(expected, m.Call(*i)); } } } @@ -2415,7 +2419,7 @@ TEST(RunWord32ShrP) { m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))); FOR_UINT32_INPUTS(j) { uint32_t expected = *j >> shift; - CHECK_EQ(expected, m.Call(*j)); + CHECK_UINT32_EQ(expected, m.Call(*j)); } } } @@ -2426,10 +2430,10 @@ TEST(RunWord32ShrP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = *i >> shift; - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } - CHECK_EQ(0x00010000u, bt.call(0x80000000, 15)); + CHECK_EQ(0x00010000, bt.call(0x80000000, 15)); } } @@ -2443,7 +2447,7 @@ TEST(RunWord32ShrInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == (*i >> shift); - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } } @@ -2455,31 +2459,31 @@ TEST(RunWord32ShrInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == (*i >> shift); - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Int32Constant(0), m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == (*i >> shift); - CHECK_EQ(expected, m.Call(*i)); + CHECK_UINT32_EQ(expected, m.Call(*i)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)), m.Int32Constant(0))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == (*i >> shift); - CHECK_EQ(expected, m.Call(*i)); + CHECK_UINT32_EQ(expected, m.Call(*i)); } } } @@ -2507,7 +2511,7 @@ TEST(RunWord32SarP) { CHECK_EQ(expected, bt.call(*i, shift)); } } - CHECK_EQ(bit_cast(0xFFFF0000), bt.call(0x80000000, 15)); + CHECK_EQ(0xFFFF0000, bt.call(0x80000000, 15)); } } @@ -2556,7 +2560,7 @@ TEST(RunWord32SarInComparison) { m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)), m.Int32Constant(0))); FOR_INT32_INPUTS(i) { - int32_t expected = 0 == (*i >> shift); + uint32_t expected = 0 == (*i >> shift); CHECK_EQ(expected, m.Call(*i)); } } @@ -2582,7 +2586,7 @@ TEST(RunWord32RorP) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = bits::RotateRight32(*i, shift); - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } } @@ -2598,7 +2602,7 @@ TEST(RunWord32RorInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == bits::RotateRight32(*i, shift); - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } } @@ -2610,31 +2614,31 @@ TEST(RunWord32RorInComparison) { FOR_UINT32_INPUTS(i) { FOR_UINT32_SHIFTS(shift) { uint32_t expected = 0 == bits::RotateRight32(*i, shift); - CHECK_EQ(expected, bt.call(*i, shift)); + CHECK_UINT32_EQ(expected, bt.call(*i, shift)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Int32Constant(0), m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == bits::RotateRight32(*i, shift); - CHECK_EQ(expected, m.Call(*i)); + CHECK_UINT32_EQ(expected, m.Call(*i)); } } } { FOR_UINT32_SHIFTS(shift) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); m.Return( m.Word32Equal(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)), m.Int32Constant(0))); FOR_UINT32_INPUTS(i) { uint32_t expected = 0 == bits::RotateRight32(*i, shift); - CHECK_EQ(expected, m.Call(*i)); + CHECK_UINT32_EQ(expected, m.Call(*i)); } } } @@ -2960,7 +2964,7 @@ TEST(RunFloat64AddP) { FOR_FLOAT64_INPUTS(pl) { FOR_FLOAT64_INPUTS(pr) { double expected = *pl + *pr; - CheckDoubleEq(expected, bt.call(*pl, *pr)); + CHECK_EQ(expected, bt.call(*pl, *pr)); } } } @@ -2975,7 +2979,7 @@ TEST(RunFloat64SubP) { FOR_FLOAT64_INPUTS(pl) { FOR_FLOAT64_INPUTS(pr) { double expected = *pl - *pr; - CheckDoubleEq(expected, bt.call(*pl, *pr)); + CHECK_EQ(expected, bt.call(*pl, *pr)); } } } @@ -2995,7 +2999,7 @@ TEST(RunFloat64SubImm1) { input = *j; double expected = *i - input; CHECK_EQ(0, m.Call()); - CheckDoubleEq(expected, output); + CHECK_EQ(expected, output); } } } @@ -3015,7 +3019,7 @@ TEST(RunFloat64SubImm2) { input = *j; double expected = input - *i; CHECK_EQ(0, m.Call()); - CheckDoubleEq(expected, output); + CHECK_EQ(expected, output); } } } @@ -3030,7 +3034,7 @@ TEST(RunFloat64MulP) { FOR_FLOAT64_INPUTS(pl) { FOR_FLOAT64_INPUTS(pr) { double expected = *pl * *pr; - CheckDoubleEq(expected, bt.call(*pl, *pr)); + CHECK_EQ(expected, bt.call(*pl, *pr)); } } } @@ -3059,7 +3063,7 @@ TEST(RunFloat64MulAndFloat64AddP) { volatile double temp = input_a * input_b; volatile double expected = temp + input_c; CHECK_EQ(0, m.Call()); - CheckDoubleEq(expected, output); + CHECK_EQ(expected, output); } } } @@ -3081,7 +3085,7 @@ TEST(RunFloat64MulAndFloat64AddP) { volatile double temp = input_b * input_c; volatile double expected = input_a + temp; CHECK_EQ(0, m.Call()); - CheckDoubleEq(expected, output); + CHECK_EQ(expected, output); } } } @@ -3111,7 +3115,7 @@ TEST(RunFloat64MulAndFloat64SubP) { volatile double temp = input_b * input_c; volatile double expected = input_a - temp; CHECK_EQ(0, m.Call()); - CheckDoubleEq(expected, output); + CHECK_EQ(expected, output); } } } @@ -3133,7 +3137,7 @@ TEST(RunFloat64MulImm) { input = *j; double expected = *i * input; CHECK_EQ(0, m.Call()); - CheckDoubleEq(expected, output); + CHECK_EQ(expected, output); } } } @@ -3148,7 +3152,7 @@ TEST(RunFloat64MulImm) { input = *j; double expected = input * *i; CHECK_EQ(0, m.Call()); - CheckDoubleEq(expected, output); + CHECK_EQ(expected, output); } } } @@ -3164,7 +3168,7 @@ TEST(RunFloat64DivP) { FOR_FLOAT64_INPUTS(pl) { FOR_FLOAT64_INPUTS(pr) { double expected = *pl / *pr; - CheckDoubleEq(expected, bt.call(*pl, *pr)); + CHECK_EQ(expected, bt.call(*pl, *pr)); } } } @@ -3180,7 +3184,7 @@ TEST(RunFloat64ModP) { FOR_FLOAT64_INPUTS(j) { double expected = modulo(*i, *j); double found = bt.call(*i, *j); - CheckDoubleEq(expected, found); + CHECK_EQ(expected, found); } } } @@ -3219,7 +3223,7 @@ TEST(RunChangeInt32ToFloat64_B) { TEST(RunChangeUint32ToFloat64_B) { - RawMachineAssemblerTester m(kMachUint32); + RawMachineAssemblerTester m(kMachUint32); double output = 0; Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0)); @@ -3400,7 +3404,7 @@ TEST(RunChangeFloat64ToInt32_spilled) { TEST(RunChangeFloat64ToUint32_spilled) { RawMachineAssemblerTester m; const int kNumInputs = 32; - uint32_t magic = 0x786234; + int32_t magic = 0x786234; double input[kNumInputs]; uint32_t result[kNumInputs]; Node* input_node[kNumInputs]; @@ -3429,9 +3433,9 @@ TEST(RunChangeFloat64ToUint32_spilled) { for (int i = 0; i < kNumInputs; i++) { if (i % 2) { - CHECK_EQ(result[i], static_cast(100 + i + 2147483648u)); + CHECK_UINT32_EQ(result[i], static_cast(100 + i + 2147483648u)); } else { - CHECK_EQ(result[i], static_cast(100 + i)); + CHECK_UINT32_EQ(result[i], static_cast(100 + i)); } } } @@ -3440,7 +3444,7 @@ TEST(RunChangeFloat64ToUint32_spilled) { TEST(RunTruncateFloat64ToFloat32_spilled) { RawMachineAssemblerTester m; const int kNumInputs = 32; - uint32_t magic = 0x786234; + int32_t magic = 0x786234; double input[kNumInputs]; float result[kNumInputs]; Node* input_node[kNumInputs]; @@ -4364,7 +4368,7 @@ TEST(RunTruncateInt64ToInt32P) { FOR_UINT32_INPUTS(i) { FOR_UINT32_INPUTS(j) { expected = (static_cast(*j) << 32) | *i; - CHECK_EQ(static_cast(expected), m.Call()); + CHECK_UINT32_EQ(expected, m.Call()); } } } @@ -4500,7 +4504,7 @@ TEST(RunTruncateFloat64ToFloat32) { input = *i; volatile double expected = DoubleToFloat32(input); CHECK_EQ(0, m.Call()); - CheckDoubleEq(expected, actual); + CHECK_EQ(expected, actual); } } diff --git a/test/cctest/compiler/value-helper.h b/test/cctest/compiler/value-helper.h index 208fa43..caf1daf 100644 --- a/test/cctest/compiler/value-helper.h +++ b/test/cctest/compiler/value-helper.h @@ -44,7 +44,7 @@ class ValueHelper { void CheckUint32Constant(int32_t expected, Node* node) { CHECK_EQ(IrOpcode::kInt32Constant, node->opcode()); - CHECK_EQ(expected, OpParameter(node)); + CHECK_EQ(expected, OpParameter(node)); } void CheckHeapConstant(Object* expected, Node* node) { diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc index bbb74c0..5f452ea 100644 --- a/test/cctest/test-accessors.cc +++ b/test/cctest/test-accessors.cc @@ -150,20 +150,20 @@ static void XGetter(const Info& info, int offset) { ApiTestFuzzer::Fuzz(); v8::Isolate* isolate = CcTest::isolate(); CHECK_EQ(isolate, info.GetIsolate()); - CHECK(x_receiver->Equals(info.This())); + CHECK_EQ(x_receiver, info.This()); info.GetReturnValue().Set(v8_num(x_register[offset])); } static void XGetter(Local name, const v8::PropertyCallbackInfo& info) { - CHECK(x_holder->Equals(info.Holder())); + CHECK_EQ(x_holder, info.Holder()); XGetter(info, 0); } static void XGetter(const v8::FunctionCallbackInfo& info) { - CHECK(x_receiver->Equals(info.Holder())); + CHECK_EQ(x_receiver, info.Holder()); XGetter(info, 1); } @@ -172,8 +172,8 @@ template static void XSetter(Local value, const Info& info, int offset) { v8::Isolate* isolate = CcTest::isolate(); CHECK_EQ(isolate, info.GetIsolate()); - CHECK(x_holder->Equals(info.This())); - CHECK(x_holder->Equals(info.Holder())); + CHECK_EQ(x_holder, info.This()); + CHECK_EQ(x_holder, info.Holder()); x_register[offset] = value->Int32Value(); info.GetReturnValue().Set(v8_num(-1)); } @@ -222,10 +222,10 @@ THREADED_TEST(AccessorIC) { " result.push(obj[key_1]);" "}" "result")); - CHECK_EQ(80u, array->Length()); + CHECK_EQ(80, array->Length()); for (int i = 0; i < 80; i++) { v8::Handle entry = array->Get(v8::Integer::New(isolate, i)); - CHECK(v8::Integer::New(isolate, i / 2)->Equals(entry)); + CHECK_EQ(v8::Integer::New(isolate, i/2), entry); } } @@ -407,7 +407,7 @@ THREADED_TEST(Regress1054726) { "for (var i = 0; i < 5; i++) {" " try { obj.x; } catch (e) { result += e; }" "}; result"))->Run(); - CHECK(v8_str("ggggg")->Equals(result)); + CHECK_EQ(v8_str("ggggg"), result); result = Script::Compile(String::NewFromUtf8( isolate, @@ -415,7 +415,7 @@ THREADED_TEST(Regress1054726) { "for (var i = 0; i < 5; i++) {" " try { obj.x = i; } catch (e) { result += e; }" "}; result"))->Run(); - CHECK(v8_str("01234")->Equals(result)); + CHECK_EQ(v8_str("01234"), result); } diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index a043b36..6306db9 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -107,8 +107,8 @@ static void IncrementingSignatureCallback( const v8::FunctionCallbackInfo& args) { ApiTestFuzzer::Fuzz(); signature_callback_count++; - CHECK(signature_expected_receiver->Equals(args.Holder())); - CHECK(signature_expected_receiver->Equals(args.This())); + CHECK_EQ(signature_expected_receiver, args.Holder()); + CHECK_EQ(signature_expected_receiver, args.This()); v8::Handle result = v8::Array::New(args.GetIsolate(), args.Length()); for (int i = 0; i < args.Length(); i++) @@ -190,8 +190,8 @@ static void TestSignature(const char* loop_js, Local receiver, if (!expected_to_throw) { CHECK_EQ(10, signature_callback_count); } else { - CHECK(v8_str("TypeError: Illegal invocation") - ->Equals(try_catch.Exception()->ToString(isolate))); + CHECK_EQ(v8_str("TypeError: Illegal invocation"), + try_catch.Exception()->ToString(isolate)); } } @@ -296,7 +296,7 @@ THREADED_TEST(Access) { Local foo_after = obj->Get(v8_str("foo")); CHECK(!foo_after->IsUndefined()); CHECK(foo_after->IsString()); - CHECK(bar_str->Equals(foo_after)); + CHECK_EQ(bar_str, foo_after); } @@ -311,11 +311,11 @@ THREADED_TEST(AccessElement) { Local after = obj->Get(1); CHECK(!after->IsUndefined()); CHECK(after->IsString()); - CHECK(bar_str->Equals(after)); + CHECK_EQ(bar_str, after); Local value = CompileRun("[\"a\", \"b\"]").As(); - CHECK(v8_str("a")->Equals(value->Get(0))); - CHECK(v8_str("b")->Equals(value->Get(1))); + CHECK_EQ(v8_str("a"), value->Get(0)); + CHECK_EQ(v8_str("b"), value->Get(1)); } @@ -459,7 +459,7 @@ THREADED_TEST(ScriptMakingExternalString) { CHECK_EQ(source->IsExternal(), false); CHECK_EQ(source->IsExternalOneByte(), false); String::Encoding encoding = String::UNKNOWN_ENCODING; - CHECK(!source->GetExternalStringResourceBase(&encoding)); + CHECK_EQ(NULL, source->GetExternalStringResourceBase(&encoding)); CHECK_EQ(String::ONE_BYTE_ENCODING, encoding); bool success = source->MakeExternal(new TestResource(two_byte_source, &dispose_count)); @@ -697,7 +697,7 @@ THREADED_TEST(NewExternalForVeryLongString) { CHECK(str.IsEmpty()); CHECK(try_catch.HasCaught()); String::Utf8Value exception_value(try_catch.Exception()); - CHECK_EQ(0, strcmp("RangeError: Invalid string length", *exception_value)); + CHECK_EQ("RangeError: Invalid string length", *exception_value); } { @@ -709,7 +709,7 @@ THREADED_TEST(NewExternalForVeryLongString) { CHECK(str.IsEmpty()); CHECK(try_catch.HasCaught()); String::Utf8Value exception_value(try_catch.Exception()); - CHECK_EQ(0, strcmp("RangeError: Invalid string length", *exception_value)); + CHECK_EQ("RangeError: Invalid string length", *exception_value); } } @@ -1001,7 +1001,7 @@ static void TestFunctionTemplateAccessor(Constructor constructor, Local fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("obj"), fun); Local result = v8_compile("(new obj()).toString()")->Run(); - CHECK(v8_str("[object funky]")->Equals(result)); + CHECK_EQ(v8_str("[object funky]"), result); CompileRun("var obj_instance = new obj();"); Local