1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #if V8_TARGET_ARCH_IA32
33 #include "ia32/lithium-ia32.h"
34 #include "ia32/lithium-codegen-ia32.h"
35 #elif V8_TARGET_ARCH_X64
36 #include "x64/lithium-x64.h"
37 #include "x64/lithium-codegen-x64.h"
38 #elif V8_TARGET_ARCH_ARM
39 #include "arm/lithium-arm.h"
40 #include "arm/lithium-codegen-arm.h"
41 #elif V8_TARGET_ARCH_MIPS
42 #include "mips/lithium-mips.h"
43 #include "mips/lithium-codegen-mips.h"
45 #error "Unknown architecture."
52 void LOperand::PrintTo(StringStream* stream) {
53 LUnallocated* unalloc = NULL;
59 unalloc = LUnallocated::cast(this);
60 stream->Add("v%d", unalloc->virtual_register());
61 if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
62 stream->Add("(=%dS)", unalloc->fixed_slot_index());
65 switch (unalloc->extended_policy()) {
66 case LUnallocated::NONE:
68 case LUnallocated::FIXED_REGISTER: {
69 int reg_index = unalloc->fixed_register_index();
70 const char* register_name =
71 Register::AllocationIndexToString(reg_index);
72 stream->Add("(=%s)", register_name);
75 case LUnallocated::FIXED_DOUBLE_REGISTER: {
76 int reg_index = unalloc->fixed_register_index();
77 const char* double_register_name =
78 DoubleRegister::AllocationIndexToString(reg_index);
79 stream->Add("(=%s)", double_register_name);
82 case LUnallocated::MUST_HAVE_REGISTER:
85 case LUnallocated::WRITABLE_REGISTER:
88 case LUnallocated::SAME_AS_FIRST_INPUT:
91 case LUnallocated::ANY:
96 case CONSTANT_OPERAND:
97 stream->Add("[constant:%d]", index());
100 stream->Add("[stack:%d]", index());
102 case DOUBLE_STACK_SLOT:
103 stream->Add("[double_stack:%d]", index());
105 case FLOAT32x4_STACK_SLOT:
106 stream->Add("[float32x4_stack:%d]", index());
108 case INT32x4_STACK_SLOT:
109 stream->Add("[int32x4_stack:%d]", index());
112 stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
114 case DOUBLE_REGISTER:
115 stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
117 case FLOAT32x4_REGISTER:
118 stream->Add("[%s|R]",
119 SIMD128Register::AllocationIndexToString(index()));
121 case INT32x4_REGISTER:
122 stream->Add("[%s|R]",
123 SIMD128Register::AllocationIndexToString(index()));
126 stream->Add("[arg:%d]", index());
132 template<LOperand::Kind kOperandKind, int kNumCachedOperands>
133 LSubKindOperand<kOperandKind, kNumCachedOperands>*
134 LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
137 template<LOperand::Kind kOperandKind, int kNumCachedOperands>
138 void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
140 cache = new LSubKindOperand[kNumCachedOperands];
141 for (int i = 0; i < kNumCachedOperands; i++) {
142 cache[i].ConvertTo(kOperandKind, i);
147 template<LOperand::Kind kOperandKind, int kNumCachedOperands>
148 void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
153 void LOperand::SetUpCaches() {
154 #define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
155 LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
156 #undef LITHIUM_OPERAND_SETUP
160 void LOperand::TearDownCaches() {
161 #define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
162 LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
163 #undef LITHIUM_OPERAND_TEARDOWN
167 bool LParallelMove::IsRedundant() const {
168 for (int i = 0; i < move_operands_.length(); ++i) {
169 if (!move_operands_[i].IsRedundant()) return false;
175 void LParallelMove::PrintDataTo(StringStream* stream) const {
177 for (int i = 0; i < move_operands_.length(); ++i) {
178 if (!move_operands_[i].IsEliminated()) {
179 LOperand* source = move_operands_[i].source();
180 LOperand* destination = move_operands_[i].destination();
181 if (!first) stream->Add(" ");
183 if (source->Equals(destination)) {
184 destination->PrintTo(stream);
186 destination->PrintTo(stream);
188 source->PrintTo(stream);
196 void LEnvironment::PrintTo(StringStream* stream) {
197 stream->Add("[id=%d|", ast_id().ToInt());
198 if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
199 stream->Add("deopt_id=%d|", deoptimization_index());
201 stream->Add("parameters=%d|", parameter_count());
202 stream->Add("arguments_stack_height=%d|", arguments_stack_height());
203 for (int i = 0; i < values_.length(); ++i) {
204 if (i != 0) stream->Add(";");
205 if (values_[i] == NULL) {
206 stream->Add("[hole]");
208 values_[i]->PrintTo(stream);
215 void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
216 // Do not record arguments as pointers.
217 if (op->IsStackSlot() && op->index() < 0) return;
218 ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
219 !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
220 !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
221 pointer_operands_.Add(op, zone);
225 void LPointerMap::RemovePointer(LOperand* op) {
226 // Do not record arguments as pointers.
227 if (op->IsStackSlot() && op->index() < 0) return;
228 ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
229 !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
230 !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
231 for (int i = 0; i < pointer_operands_.length(); ++i) {
232 if (pointer_operands_[i]->Equals(op)) {
233 pointer_operands_.Remove(i);
240 void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
241 // Do not record arguments as pointers.
242 if (op->IsStackSlot() && op->index() < 0) return;
243 ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
244 !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
245 !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
246 untagged_operands_.Add(op, zone);
250 void LPointerMap::PrintTo(StringStream* stream) {
252 for (int i = 0; i < pointer_operands_.length(); ++i) {
253 if (i != 0) stream->Add(";");
254 pointer_operands_[i]->PrintTo(stream);
260 int StackSlotOffset(int index) {
262 // Local or spill slot. Skip the frame pointer, function, and
263 // context in the fixed part of the frame.
264 return -(index + 1) * kPointerSize -
265 StandardFrameConstants::kFixedFrameSizeFromFp;
267 // Incoming parameter. Skip the return address.
268 return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
273 LChunk::LChunk(CompilationInfo* info, HGraph* graph)
274 : spill_slot_count_(0),
277 instructions_(32, graph->zone()),
278 pointer_maps_(8, graph->zone()),
279 inlined_closures_(1, graph->zone()) {
283 LLabel* LChunk::GetLabel(int block_id) const {
284 HBasicBlock* block = graph_->blocks()->at(block_id);
285 int first_instruction = block->first_instruction_index();
286 return LLabel::cast(instructions_[first_instruction]);
290 int LChunk::LookupDestination(int block_id) const {
291 LLabel* cur = GetLabel(block_id);
292 while (cur->replacement() != NULL) {
293 cur = cur->replacement();
295 return cur->block_id();
298 Label* LChunk::GetAssemblyLabel(int block_id) const {
299 LLabel* label = GetLabel(block_id);
300 ASSERT(!label->HasReplacement());
301 return label->label();
305 void LChunk::MarkEmptyBlocks() {
306 LPhase phase("L_Mark empty blocks", this);
307 for (int i = 0; i < graph()->blocks()->length(); ++i) {
308 HBasicBlock* block = graph()->blocks()->at(i);
309 int first = block->first_instruction_index();
310 int last = block->last_instruction_index();
311 LInstruction* first_instr = instructions()->at(first);
312 LInstruction* last_instr = instructions()->at(last);
314 LLabel* label = LLabel::cast(first_instr);
315 if (last_instr->IsGoto()) {
316 LGoto* goto_instr = LGoto::cast(last_instr);
317 if (label->IsRedundant() &&
318 !label->is_loop_header()) {
319 bool can_eliminate = true;
320 for (int i = first + 1; i < last && can_eliminate; ++i) {
321 LInstruction* cur = instructions()->at(i);
323 LGap* gap = LGap::cast(cur);
324 if (!gap->IsRedundant()) {
325 can_eliminate = false;
328 can_eliminate = false;
332 label->set_replacement(GetLabel(goto_instr->block_id()));
340 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
341 LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
342 gap->set_hydrogen_value(instr->hydrogen_value());
344 if (instr->IsControl()) {
345 instructions_.Add(gap, zone());
346 index = instructions_.length();
347 instructions_.Add(instr, zone());
349 index = instructions_.length();
350 instructions_.Add(instr, zone());
351 instructions_.Add(gap, zone());
353 if (instr->HasPointerMap()) {
354 pointer_maps_.Add(instr->pointer_map(), zone());
355 instr->pointer_map()->set_lithium_position(index);
360 LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
361 return LConstantOperand::Create(constant->id(), zone());
365 int LChunk::GetParameterStackSlot(int index) const {
366 // The receiver is at index 0, the first parameter at index 1, so we
367 // shift all parameter indexes down by the number of parameters, and
368 // make sure they end up negative so they are distinguishable from
370 int result = index - info()->num_parameters() - 1;
377 // A parameter relative to ebp in the arguments stub.
378 int LChunk::ParameterAt(int index) {
379 ASSERT(-1 <= index); // -1 is the receiver.
380 return (1 + info()->scope()->num_parameters() - index) *
385 LGap* LChunk::GetGapAt(int index) const {
386 return LGap::cast(instructions_[index]);
390 bool LChunk::IsGapAt(int index) const {
391 return instructions_[index]->IsGap();
395 int LChunk::NearestGapPos(int index) const {
396 while (!IsGapAt(index)) index--;
401 void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
402 GetGapAt(index)->GetOrCreateParallelMove(
403 LGap::START, zone())->AddMove(from, to, zone());
407 HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
408 return HConstant::cast(graph_->LookupValue(operand->index()));
412 Representation LChunk::LookupLiteralRepresentation(
413 LConstantOperand* operand) const {
414 return graph_->LookupValue(operand->index())->representation();
418 LChunk* LChunk::NewChunk(HGraph* graph) {
419 DisallowHandleAllocation no_handles;
420 DisallowHeapAllocation no_gc;
421 graph->DisallowAddingNewValues();
422 int values = graph->GetMaximumValueID();
423 CompilationInfo* info = graph->info();
424 if (values > LUnallocated::kMaxVirtualRegisters) {
425 info->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
428 LAllocator allocator(values, graph);
429 LChunkBuilder builder(info, graph, &allocator);
430 LChunk* chunk = builder.Build();
431 if (chunk == NULL) return NULL;
433 if (!allocator.Allocate(chunk)) {
434 info->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
438 chunk->set_allocated_double_registers(
439 allocator.assigned_double_registers());
445 Handle<Code> LChunk::Codegen() {
446 MacroAssembler assembler(info()->isolate(), NULL, 0);
447 LOG_CODE_EVENT(info()->isolate(),
448 CodeStartLinePosInfoRecordEvent(
449 assembler.positions_recorder()));
450 LCodeGen generator(this, &assembler, info());
454 if (generator.GenerateCode()) {
455 CodeGenerator::MakeCodePrologue(info(), "optimized");
456 Code::Flags flags = info()->flags();
458 CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
459 generator.FinishCode(code);
460 code->set_is_crankshafted(true);
461 void* jit_handler_data =
462 assembler.positions_recorder()->DetachJITHandlerData();
463 LOG_CODE_EVENT(info()->isolate(),
464 CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
466 CodeGenerator::PrintCode(code, info());
469 return Handle<Code>::null();
473 void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
474 allocated_double_registers_ = allocated_registers;
475 BitVector* doubles = allocated_double_registers();
476 BitVector::Iterator iterator(doubles);
477 while (!iterator.Done()) {
478 if (info()->saves_caller_doubles()) {
479 if (kDoubleSize == kPointerSize * 2) {
480 spill_slot_count_ += 2;
490 LEnvironment* LChunkBuilderBase::CreateEnvironment(
491 HEnvironment* hydrogen_env,
492 int* argument_index_accumulator,
493 ZoneList<HValue*>* objects_to_materialize) {
494 if (hydrogen_env == NULL) return NULL;
496 LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
497 argument_index_accumulator,
498 objects_to_materialize);
499 BailoutId ast_id = hydrogen_env->ast_id();
500 ASSERT(!ast_id.IsNone() ||
501 hydrogen_env->frame_type() != JS_FUNCTION);
502 int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
503 LEnvironment* result =
504 new(zone()) LEnvironment(hydrogen_env->closure(),
505 hydrogen_env->frame_type(),
507 hydrogen_env->parameter_count(),
511 hydrogen_env->entry(),
513 int argument_index = *argument_index_accumulator;
515 // Store the environment description into the environment
516 // (with holes for nested objects)
517 for (int i = 0; i < hydrogen_env->length(); ++i) {
518 if (hydrogen_env->is_special_index(i)) continue;
521 HValue* value = hydrogen_env->values()->at(i);
522 if (value->IsArgumentsObject() || value->IsCapturedObject()) {
523 op = LEnvironment::materialization_marker();
524 } else if (value->IsPushArgument()) {
525 op = new(zone()) LArgument(argument_index++);
530 value->representation(),
531 value->CheckFlag(HInstruction::kUint32));
534 // Recursively store the nested objects into the environment
535 for (int i = 0; i < hydrogen_env->length(); ++i) {
536 if (hydrogen_env->is_special_index(i)) continue;
538 HValue* value = hydrogen_env->values()->at(i);
539 if (value->IsArgumentsObject() || value->IsCapturedObject()) {
540 AddObjectToMaterialize(value, objects_to_materialize, result);
544 if (hydrogen_env->frame_type() == JS_FUNCTION) {
545 *argument_index_accumulator = argument_index;
552 // Add an object to the supplied environment and object materialization list.
556 // We are building three lists here:
558 // 1. In the result->object_mapping_ list (added to by the
559 // LEnvironment::Add*Object methods), we store the lengths (number
560 // of fields) of the captured objects in depth-first traversal order, or
561 // in case of duplicated objects, we store the index to the duplicate object
562 // (with a tag to differentiate between captured and duplicated objects).
564 // 2. The object fields are stored in the result->values_ list
565 // (added to by the LEnvironment.AddValue method) sequentially as lists
566 // of fields with holes for nested objects (the holes will be expanded
567 // later by LCodegen::AddToTranslation according to the
568 // LEnvironment.object_mapping_ list).
570 // 3. The auxiliary objects_to_materialize array stores the hydrogen values
571 // in the same order as result->object_mapping_ list. This is used
572 // to detect duplicate values and calculate the corresponding object index.
573 void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
574 ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
575 int object_index = objects_to_materialize->length();
576 // Store the hydrogen value into the de-duplication array
577 objects_to_materialize->Add(value, zone());
578 // Find out whether we are storing a duplicated value
579 int previously_materialized_object = -1;
580 for (int prev = 0; prev < object_index; ++prev) {
581 if (objects_to_materialize->at(prev) == value) {
582 previously_materialized_object = prev;
586 // Store the captured object length (or duplicated object index)
587 // into the environment. For duplicated objects, we stop here.
588 int length = value->OperandCount();
589 bool is_arguments = value->IsArgumentsObject();
590 if (previously_materialized_object >= 0) {
591 result->AddDuplicateObject(previously_materialized_object);
594 result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
596 // Store the captured object's fields into the environment
597 for (int i = is_arguments ? 1 : 0; i < length; ++i) {
599 HValue* arg_value = value->OperandAt(i);
600 if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
601 // Insert a hole for nested objects
602 op = LEnvironment::materialization_marker();
604 ASSERT(!arg_value->IsPushArgument());
605 // For ordinary values, tell the register allocator we need the value
607 op = UseAny(arg_value);
610 arg_value->representation(),
611 arg_value->CheckFlag(HInstruction::kUint32));
613 // Recursively store all the nested captured objects into the environment
614 for (int i = is_arguments ? 1 : 0; i < length; ++i) {
615 HValue* arg_value = value->OperandAt(i);
616 if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
617 AddObjectToMaterialize(arg_value, objects_to_materialize, result);
623 LInstruction* LChunkBuilder::CheckElideControlInstruction(
624 HControlInstruction* instr) {
625 HBasicBlock* successor;
626 if (!instr->KnownSuccessorBlock(&successor)) return NULL;
627 return new(zone()) LGoto(successor);
632 if (ShouldProduceTraceOutput()) {
633 isolate()->GetHTracer()->TraceLithium(name(), chunk_);
638 } } // namespace v8::internal