* should be between 0 and one less than the result from GetStressRuns()
*/
static void PrepareStressRun(int run);
+
+ /**
+ * Force deoptimization of all functions.
+ */
+ static void DeoptimizeAll();
};
// Create a new execution environment containing the built-in
// functions
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ if (context.IsEmpty()) {
+ printf("Error creating context\n");
+ return 1;
+ }
+
bool run_shell = (argc == 1);
for (int i = 1; i < argc; i++) {
// Enter the execution environment before evaluating any code.
v8::Testing::PrepareStressRun(i);
result = RunMain(argc, argv);
}
+ printf("======== Full Deoptimization =======\n");
+ v8::Testing::DeoptimizeAll();
} else {
result = RunMain(argc, argv);
}
}
+void Testing::DeoptimizeAll() {
+ internal::Deoptimizer::DeoptimizeAll();
+}
+
+
namespace internal {
}
-bool Operand::is_single_instruction() const {
+bool Operand::is_single_instruction(Instr instr) const {
if (rm_.is_valid()) return true;
- if (must_use_constant_pool()) return false;
uint32_t dummy1, dummy2;
- return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
+ if (must_use_constant_pool() ||
+ !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
+ // The immediate operand cannot be encoded as a shifter operand, or use of
+ // constant pool is required. For a mov instruction not setting the
+ // condition code additional instruction conventions can be used.
+ if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
+ if (must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
+ // mov instruction will be an ldr from constant pool (one instruction).
+ return true;
+ } else {
+ // mov instruction will be a mov or movw followed by movt (two
+ // instructions).
+ return false;
+ }
+ } else {
+ // If this is not a mov or mvn instruction there will always an additional
+ // instructions - either mov or ldr. The mov might actually be two
+ // instructions mov or movw followed by movt so including the actual
+ // instruction two or three instructions will be generated.
+ return false;
+ }
+ } else {
+ // No use of constant pool and the immediate operand can be encoded as a
+ // shifter operand.
+ return true;
+ }
}
INLINE(bool is_reg() const);
// Return true if this operand fits in one instruction so that no
- // 2-instruction solution with a load into the ip register is necessary.
- bool is_single_instruction() const;
+ // 2-instruction solution with a load into the ip register is necessary. If
+ // the instruction this operand is used for is a MOV or MVN instruction the
+ // actual instruction to use is required for this calculation. For other
+ // instructions instr is ignored.
+ bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const;
inline int32_t immediate() const {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
- // TODO(srdjan): How do we guarantee that safepoint code does not
- // overlap other safepoint patching code?
CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
namespace internal {
-class SafepointGenerator : public PostCallGenerator {
+class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
deoptimization_index_(deoptimization_index) { }
virtual ~SafepointGenerator() { }
- virtual void Generate() {
+ virtual void BeforeCall(int call_size) {
+ ASSERT(call_size >= 0);
+ // Ensure that we have enough space after the previous safepoint position
+ // for the generated code there.
+ int call_end = codegen_->masm()->pc_offset() + call_size;
+ int prev_jump_end =
+ codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
+ if (call_end < prev_jump_end) {
+ int padding_size = prev_jump_end - call_end;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ codegen_->masm()->nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ }
+
+ virtual void AfterCall() {
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
int arguments,
int deoptimization_index);
void RecordPosition(int position);
+ int LastSafepointEnd() {
+ return static_cast<int>(safepoints_.GetPcAfterGap());
+ }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
}
+int MacroAssembler::CallSize(Register target, Condition cond) {
+#if USE_BLX
+ return kInstrSize;
+#else
+ return 2 * kInstrSize;
+#endif
+}
+
+
void MacroAssembler::Call(Register target, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
blx(target, cond);
#else
// set lr for return at current pc + 8
- mov(lr, Operand(pc), LeaveCC, cond);
- mov(pc, Operand(target), LeaveCC, cond);
+ { BlockConstPoolScope block_const_pool(this);
+ mov(lr, Operand(pc), LeaveCC, cond);
+ mov(pc, Operand(target), LeaveCC, cond);
+ }
+#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, cond), post_position);
#endif
}
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ int size = 2 * kInstrSize;
+ Instr mov_instr = cond | MOV | LeaveCC;
+ if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+ size += kInstrSize;
+ }
+ return size;
+}
+
+
+void MacroAssembler::Call(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
- // Set lr for return at current pc + 8.
- mov(lr, Operand(pc), LeaveCC, cond);
- // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(target, rmode), LeaveCC, cond);
-
+ { BlockConstPoolScope block_const_pool(this);
+ // Set lr for return at current pc + 8.
+ mov(lr, Operand(pc), LeaveCC, cond);
+ // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
+ }
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Call(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
+#endif
}
Register code_reg,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
bool definitely_matches = false;
Label regular_invoke;
Handle<Code> adaptor =
Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
if (flag == CALL_FUNCTION) {
+ if (call_wrapper != NULL) {
+ call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ }
Call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
b(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- post_call_generator);
+ call_wrapper);
if (flag == CALL_FUNCTION) {
+ if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
Call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code);
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
GetBuiltinEntry(r2, id);
if (flags == CALL_JS) {
+ if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
Call(r2);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flags == JUMP_JS);
Jump(r2);
namespace internal {
// Forward declaration.
-class PostCallGenerator;
+class CallWrapper;
// ----------------------------------------------------------------------------
// Static helper functions
void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
+ int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ CallWrapper* call_wrapper = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ CallWrapper* call_wrapper = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- PostCallGenerator* post_call_generator = NULL);
+ CallWrapper* call_wrapper = NULL);
// Store the code object for the given builtin in the target register and
// setup the function in r1.
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Helper functions for generating invokes.
Register code_reg,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ CallWrapper* call_wrapper = NULL);
// Activation support.
void EnterFrame(StackFrame::Type type);
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
+class CallWrapper {
public:
- PostCallGenerator() { }
- virtual ~PostCallGenerator() { }
- virtual void Generate() = 0;
+ CallWrapper() { }
+ virtual ~CallWrapper() { }
+ // Called just before emitting a call. Argument is the size of the generated
+ // call code.
+ virtual void BeforeCall(int call_size) = 0;
+ // Called just after emitting a call, i.e., at the return site for the call.
+ virtual void AfterCall() = 0;
};
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}