// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
int pc_offset = deopt_data->Pc(i)->value();
if (pc_offset == -1) continue;
- ASSERT_GE(pc_offset, prev_pc_offset);
+ DCHECK_GE(pc_offset, prev_pc_offset);
int pc_delta = pc_offset - prev_pc_offset;
// We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
// if encodable with small pc delta encoding and up to 6 bytes
Factory* factory = isolate->factory();
Handle<ByteArray> new_reloc =
factory->NewByteArray(reloc_length + padding, TENURED);
- OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(),
- reloc_length);
+ MemCopy(new_reloc->GetDataStartAddress() + padding,
+ code->relocation_info()->GetDataStartAddress(), reloc_length);
// Create a relocation writer to write the comments in the padding
// space. Use position 0 for everything to ensure short encoding.
RelocInfoWriter reloc_info_writer(
byte* pos_before = reloc_info_writer.pos();
#endif
reloc_info_writer.Write(&rinfo);
- ASSERT(RelocInfo::kMinRelocCommentSize ==
+ DCHECK(RelocInfo::kMinRelocCommentSize ==
pos_before - reloc_info_writer.pos());
}
// Replace relocation information on the code object.
// Emit call to lazy deoptimization at all lazy deopt points.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
- SharedFunctionInfo* shared =
- SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
- shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
reinterpret_cast<intptr_t>(deopt_entry),
NULL);
reloc_info_writer.Write(&rinfo);
- ASSERT_GE(reloc_info_writer.pos(),
+ DCHECK_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
- ASSERT(prev_call_address == NULL ||
+ DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
+ DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
// Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- OS::MemMove(
- code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+ MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
// The relocation info is in place, update the size.
reloc_info->set_length(new_reloc_size);
// Handle the junk part after the new relocation info. We will create
// a non-live object in the extra space at the end of the former reloc info.
Address junk_address = reloc_info->address() + reloc_info->Size();
- ASSERT(junk_address <= reloc_end_address);
+ DCHECK(junk_address <= reloc_end_address);
isolate->heap()->CreateFillerObjectAt(junk_address,
reloc_end_address - junk_address);
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
simd128_value_t zero = {{0.0, 0.0}};
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) {
input_->SetSIMD128Register(i, zero);
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
intptr_t handler =
- reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+ reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(eax.code(), params);
output_frame->SetRegister(ebx.code(), handler);
void Deoptimizer::CopySIMD128Registers(FrameDescription* output_frame) {
- if (!CpuFeatures::IsSupported(SSE2)) return;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
simd128_value_t xmm_value = input_->GetSIMD128Register(i);
output_frame->SetSIMD128Register(i, xmm_value);
}
input_frame_size - parameter_count * kPointerSize -
StandardFrameConstants::kFixedFrameSize -
kPointerSize;
- ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+ DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
JavaScriptFrameConstants::kLocal0Offset);
int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
return (alignment_state == kAlignmentPaddingPushed);
}
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
- Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
- return isolate_->builtins()->builtin(name);
-}
-
-
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
const int kNumberOfRegisters = Register::kNumRegisters;
const int kXMMRegsSize = kSIMD128Size *
- XMMRegister::kNumAllocatableRegisters;
+ XMMRegister::kMaxNumAllocatableRegisters;
__ sub(esp, Immediate(kXMMRegsSize));
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kSIMD128Size;
- __ movups(Operand(esp, offset), xmm_reg);
- }
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kSIMD128Size;
+ __ movups(Operand(esp, offset), xmm_reg);
}
__ pushad();
}
int xmm_regs_offset = FrameDescription::simd128_registers_offset();
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- // Fill in the xmm input registers.
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kSIMD128Size + xmm_regs_offset;
- int src_offset = i * kSIMD128Size;
- __ movups(xmm0, Operand(esp, src_offset));
- __ movups(Operand(ebx, dst_offset), xmm0);
- }
+ // Fill in the double input registers.
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kSIMD128Size + xmm_regs_offset;
+ int src_offset = i * kSIMD128Size;
+ __ movups(xmm0, Operand(esp, src_offset));
+ __ movups(Operand(ebx, dst_offset), xmm0);
}
// Clear FPU all exceptions.
__ j(below, &outer_push_loop);
// In case of a failed STUB, we have to restore the XMM registers.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kSIMD128Size + xmm_regs_offset;
- __ movups(xmm_reg, Operand(ebx, src_offset));
- }
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kSIMD128Size + xmm_regs_offset;
+ __ movups(xmm_reg, Operand(ebx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
USE(start);
__ push_imm32(i);
__ jmp(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
}
double FrameDescription::GetDoubleRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(simd128_registers_));
+ DCHECK(n < arraysize(simd128_registers_));
return simd128_registers_[n].d[0];
}
void FrameDescription::SetDoubleRegister(unsigned n, double value) {
- ASSERT(n < ARRAY_SIZE(simd128_registers_));
+ DCHECK(n < arraysize(simd128_registers_));
simd128_registers_[n].d[0] = value;
}