if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
- set_register(r0, (int32_t) *result);
+ set_register(r0, reinterpret_cast<int32_t>(*result));
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
- set_register(r0, (int32_t) *result);
+ set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
if (src_precision == kDoublePrecision) {
if (unsigned_integer) {
- set_d_register_from_double(dst,
- static_cast<double>((uint32_t)val));
+ set_d_register_from_double(
+ dst, static_cast<double>(static_cast<uint32_t>(val)));
} else {
set_d_register_from_double(dst, static_cast<double>(val));
}
} else {
if (unsigned_integer) {
- set_s_register_from_float(dst,
- static_cast<float>((uint32_t)val));
+ set_s_register_from_float(
+ dst, static_cast<float>(static_cast<uint32_t>(val)));
} else {
set_s_register_from_float(dst, static_cast<float>(val));
}
}
// Rounding up may cause overflow.
- if ((number & ((int64_t)1 << 53)) != 0) {
+ if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
exponent++;
number >>= 1;
}
MaybeObject* Heap::NumberFromUint32(
uint32_t value, PretenureFlag pretenure) {
- if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
- return Smi::FromInt((int32_t)value);
+ if (static_cast<int32_t>(value) >= 0 &&
+ Smi::IsValid(static_cast<int32_t>(value))) {
+ return Smi::FromInt(static_cast<int32_t>(value));
}
// Bypass NumberFromDouble to avoid various redundant checks.
return AllocateHeapNumber(FastUI2D(value), pretenure);
for (;
cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
+ ASSERT(static_cast<unsigned>(cell_index) ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(cell_base))));
const MarkBit::CellType current_cell = cells[cell_index];
if (current_cell == 0) continue;
for (;
cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
+ ASSERT(static_cast<unsigned>(cell_index) ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(cell_base))));
if (cells[cell_index] == 0) continue;
int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
for (;
cell_index < last_cell_index;
cell_index++, object_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(object_address))));
+ ASSERT(static_cast<unsigned>(cell_index) ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(object_address))));
int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
int live_index = 0;
for ( ; live_objects != 0; live_objects--) {
for ( ;
cell_index < last_cell_index;
cell_index++, block_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(block_address))));
+ ASSERT(static_cast<unsigned>(cell_index) ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(block_address))));
uint32_t cell = cells[cell_index];
if (cell != 0) {
// We have a live object. Check approximately whether it is more than 32
int32_t Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
- return ((int16_t)(instr & kImm16Mask)) << 2;
+ return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
}
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori));
- uint32_t imm = (uint32_t)buffer_ + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
instr_lui &= ~kImm16Mask;
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else {
- uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+ uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
imm28 &= kImm28Mask;
ASSERT((imm28 & 3) == 0);
}
}
- uint32_t imm = (uint32_t)buffer_ + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
return imm;
#if DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
GenInstrJump(J, target >> 2);
#ifdef DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
positions_recorder()->WriteRecordedPositions();
void Assembler::j_or_jr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
-
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
if (in_range) {
j(target);
} else {
void Assembler::jal_or_jalr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
-
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits+kImmFieldShift)) == 0;
if (in_range) {
jal(target);
} else {
return 2; // Number of instructions patched.
} else {
uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
- if ((int32_t)imm28 == kEndOfJumpChain) {
+ if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
return 0; // Number of instructions patched.
}
imm28 += pc_delta;
Instr instr3 = instr_at(pc + 2 * kInstrSize);
uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range =
- ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
- uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
+ bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
+ uint32_t target_field =
+ static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift;
bool patched_jump = false;
#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
-const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1) << 31 - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
return Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == (uint32_t)zero_reg.code();
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
}
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
if (!(Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
return;
}
FUNCTION_ADDR(target), arg1);
}
v8::Handle<v8::Value> result = target(arg1);
- *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
// See DirectCEntryStub::GenerateCall for explanation of register usage.
FUNCTION_ADDR(target), arg1, arg2);
}
v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else {
SimulatorRuntimeCall target =
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
- i64 = (uint32_t) get_fpu_register(fs_reg);
- i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
+ i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
+ i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
if (!js_value->value()->IsString()) return false;
String* str = String::cast(js_value->value());
- if (index >= (uint32_t)str->length()) return false;
+ if (index >= static_cast<uint32_t>(str->length())) return false;
return true;
}
if ((type & kStringRepresentationMask) != kConsStringTag) {
// Pop stack so next iteration is in correct place.
Pop();
- unsigned length = (unsigned) string->length();
+ unsigned length = static_cast<unsigned>(string->length());
// Could be a flattened ConsString.
if (length == 0) continue;
*length_out = length;
type = string->map()->instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) {
AdjustMaximumDepth();
- unsigned length = (unsigned) string->length();
+ unsigned length = static_cast<unsigned>(string->length());
ASSERT(length != 0);
*length_out = length;
*type_out = type;
hash = hash ^ (hash >> 11);
hash = hash + (hash << 6);
hash = hash ^ (hash >> 22);
- return (uint32_t) hash;
+ return static_cast<uint32_t>(hash);
}
CHECK_EQ(static_cast<double>(static_cast<uint32_t>(Smi::kMaxValue) + 1),
value->Number());
+ maybe_value = HEAP->NumberFromUint32(static_cast<uint32_t>(1) << 31);
+ value = maybe_value->ToObjectChecked();
+ CHECK(value->IsHeapNumber());
+ CHECK(value->IsNumber());
+ CHECK_EQ(static_cast<double>(static_cast<uint32_t>(1) << 31),
+ value->Number());
+
// nan oddball checks
CHECK(HEAP->nan_value()->IsNumber());
CHECK(isnan(HEAP->nan_value()->Number()));