QTextStream qout(stderr, QIODevice::WriteOnly);
}
+Assembler::Assembler(IR::Function* function)
+ : _function(function)
+{
+}
+
+void Assembler::registerBlock(IR::BasicBlock* block)
+{
+ _addrs[block] = label();
+}
+
+void Assembler::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target)
+{
+ if (current->index + 1 != target->index)
+ _patches[target].append(jump());
+}
+
+void Assembler::addPatch(IR::BasicBlock* targetBlock, Jump targetJump)
+{
+ _patches[targetBlock].append(targetJump);
+}
+
+Assembler::Pointer Assembler::loadTempAddress(RegisterID reg, IR::Temp *t)
+{
+ int32_t offset = 0;
+ if (t->index < 0) {
+ const int arg = -t->index - 1;
+ loadPtr(Address(ContextRegister, offsetof(ExecutionContext, arguments)), reg);
+ offset = arg * sizeof(Value);
+ } else if (t->index < _function->locals.size()) {
+ loadPtr(Address(ContextRegister, offsetof(ExecutionContext, locals)), reg);
+ offset = t->index * sizeof(Value);
+ } else {
+ const int arg = _function->maxNumberOfArguments + t->index - _function->locals.size();
+ // StackFrameRegister points to its old value on the stack, so even for the first temp we need to
+ // subtract at least sizeof(Value).
+ offset = - sizeof(Value) * (arg + 1);
+ reg = StackFrameRegister;
+ }
+ return Pointer(reg, offset);
+}
+
+template <typename Result, typename Source>
+void Assembler::copyValue(Result result, Source source)
+{
+#ifdef VALUE_FITS_IN_REGISTER
+ // Use ReturnValueRegister as "scratch" register because loadArgument
+ // and storeArgument are functions that may need a scratch register themselves.
+ loadArgument(source, ReturnValueRegister);
+ storeArgument(ReturnValueRegister, result);
+#else
+ loadDouble(source, FPGpr0);
+ storeDouble(FPGpr0, result);
+#endif
+}
+
+void Assembler::enterStandardStackFrame(int locals)
+{
+#if CPU(ARM)
+ push(JSC::ARMRegisters::lr);
+#endif
+ push(StackFrameRegister);
+ move(StackPointerRegister, StackFrameRegister);
+
+ // space for the locals and the ContextRegister
+ int32_t frameSize = locals * sizeof(QQmlJS::VM::Value) + sizeof(void*);
+
+#if CPU(X86) || CPU(X86_64)
+ frameSize = (frameSize + 15) & ~15; // align on 16 byte boundaries for MMX
+#endif
+ subPtr(TrustedImm32(frameSize), StackPointerRegister);
+
+#if CPU(X86) || CPU(ARM)
+ for (int saveReg = CalleeSavedFirstRegister; saveReg <= CalleeSavedLastRegister; ++saveReg)
+ push(static_cast<RegisterID>(saveReg));
+#endif
+ // save the ContextRegister
+ storePtr(ContextRegister, StackPointerRegister);
+}
+
+void Assembler::leaveStandardStackFrame(int locals)
+{
+ // restore the ContextRegister
+ loadPtr(StackPointerRegister, ContextRegister);
+
+#if CPU(X86) || CPU(ARM)
+ for (int saveReg = CalleeSavedLastRegister; saveReg >= CalleeSavedFirstRegister; --saveReg)
+ pop(static_cast<RegisterID>(saveReg));
+#endif
+ // space for the locals and the ContextRegister
+ int32_t frameSize = locals * sizeof(QQmlJS::VM::Value) + sizeof(void*);
+#if CPU(X86) || CPU(X86_64)
+ frameSize = (frameSize + 15) & ~15; // align on 16 byte boundaries for MMX
+#endif
+ addPtr(TrustedImm32(frameSize), StackPointerRegister);
+
+ pop(StackFrameRegister);
+#if CPU(ARM)
+ pop(JSC::ARMRegisters::lr);
+#endif
+}
+
+
+
#define OP(op) \
{ isel_stringIfy(op), op, 0, 0 }
#define NULL_OP \
{ 0, 0, 0, 0 }
-const InstructionSelection::BinaryOperationInfo InstructionSelection::binaryOperations[QQmlJS::IR::LastAluOp + 1] = {
+const Assembler::BinaryOperationInfo Assembler::binaryOperations[QQmlJS::IR::LastAluOp + 1] = {
NULL_OP, // OpInvalid
NULL_OP, // OpIfTrue
NULL_OP, // OpNot
NULL_OP, // OpUPlus
NULL_OP, // OpCompl
- INLINE_OP(__qmljs_bit_and, &InstructionSelection::inline_and32, &InstructionSelection::inline_and32), // OpBitAnd
- INLINE_OP(__qmljs_bit_or, &InstructionSelection::inline_or32, &InstructionSelection::inline_or32), // OpBitOr
- INLINE_OP(__qmljs_bit_xor, &InstructionSelection::inline_xor32, &InstructionSelection::inline_xor32), // OpBitXor
+ INLINE_OP(__qmljs_bit_and, &Assembler::inline_and32, &Assembler::inline_and32), // OpBitAnd
+ INLINE_OP(__qmljs_bit_or, &Assembler::inline_or32, &Assembler::inline_or32), // OpBitOr
+ INLINE_OP(__qmljs_bit_xor, &Assembler::inline_xor32, &Assembler::inline_xor32), // OpBitXor
- INLINE_OP(__qmljs_add, &InstructionSelection::inline_add32, &InstructionSelection::inline_add32), // OpAdd
- INLINE_OP(__qmljs_sub, &InstructionSelection::inline_sub32, &InstructionSelection::inline_sub32), // OpSub
- INLINE_OP(__qmljs_mul, &InstructionSelection::inline_mul32, &InstructionSelection::inline_mul32), // OpMul
+ INLINE_OP(__qmljs_add, &Assembler::inline_add32, &Assembler::inline_add32), // OpAdd
+ INLINE_OP(__qmljs_sub, &Assembler::inline_sub32, &Assembler::inline_sub32), // OpSub
+ INLINE_OP(__qmljs_mul, &Assembler::inline_mul32, &Assembler::inline_mul32), // OpMul
OP(__qmljs_div), // OpDiv
OP(__qmljs_mod), // OpMod
- INLINE_OP(__qmljs_shl, &InstructionSelection::inline_shl32, &InstructionSelection::inline_shl32), // OpLShift
- INLINE_OP(__qmljs_shr, &InstructionSelection::inline_shr32, &InstructionSelection::inline_shr32), // OpRShift
- INLINE_OP(__qmljs_ushr, &InstructionSelection::inline_ushr32, &InstructionSelection::inline_ushr32), // OpURShift
+ INLINE_OP(__qmljs_shl, &Assembler::inline_shl32, &Assembler::inline_shl32), // OpLShift
+ INLINE_OP(__qmljs_shr, &Assembler::inline_shr32, &Assembler::inline_shr32), // OpRShift
+ INLINE_OP(__qmljs_ushr, &Assembler::inline_ushr32, &Assembler::inline_ushr32), // OpURShift
OP(__qmljs_gt), // OpGt
OP(__qmljs_lt), // OpLt
NULL_OP // OpOr
};
+void Assembler::generateBinOp(IR::AluOp operation, IR::Temp* target, IR::Expr* left, IR::Expr* right)
+{
+ const BinaryOperationInfo& info = binaryOperations[operation];
+ if (!info.fallbackImplementation) {
+ assert(!"unreachable");
+ return;
+ }
+
+ Value leftConst = Value::undefinedValue();
+ Value rightConst = Value::undefinedValue();
+
+ bool canDoInline = info.inlineMemRegOp && info.inlineImmRegOp;
+
+ if (canDoInline) {
+ if (left->asConst()) {
+ leftConst = convertToValue(left->asConst());
+ canDoInline = canDoInline && leftConst.tryIntegerConversion();
+ }
+ if (right->asConst()) {
+ rightConst = convertToValue(right->asConst());
+ canDoInline = canDoInline && rightConst.tryIntegerConversion();
+ }
+ }
+
+ Jump binOpFinished;
+
+ if (canDoInline) {
+
+ Jump leftTypeCheck;
+ if (left->asTemp()) {
+ Address typeAddress = loadTempAddress(ScratchRegister, left->asTemp());
+ typeAddress.offset += offsetof(VM::Value, tag);
+ leftTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
+ }
+
+ Jump rightTypeCheck;
+ if (right->asTemp()) {
+ Address typeAddress = loadTempAddress(ScratchRegister, right->asTemp());
+ typeAddress.offset += offsetof(VM::Value, tag);
+ rightTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
+ }
+
+ if (left->asTemp()) {
+ Address leftValue = loadTempAddress(ScratchRegister, left->asTemp());
+ leftValue.offset += offsetof(VM::Value, int_32);
+ load32(leftValue, IntegerOpRegister);
+ } else { // left->asConst()
+ move(TrustedImm32(leftConst.integerValue()), IntegerOpRegister);
+ }
+
+ Jump overflowCheck;
+
+ if (right->asTemp()) {
+ Address rightValue = loadTempAddress(ScratchRegister, right->asTemp());
+ rightValue.offset += offsetof(VM::Value, int_32);
+
+ overflowCheck = (this->*info.inlineMemRegOp)(rightValue, IntegerOpRegister);
+ } else { // right->asConst()
+ overflowCheck = (this->*info.inlineImmRegOp)(TrustedImm32(rightConst.integerValue()), IntegerOpRegister);
+ }
+
+ Address resultAddr = loadTempAddress(ScratchRegister, target);
+ Address resultValueAddr = resultAddr;
+ resultValueAddr.offset += offsetof(VM::Value, int_32);
+ store32(IntegerOpRegister, resultValueAddr);
+
+ Address resultTypeAddr = resultAddr;
+ resultTypeAddr.offset += offsetof(VM::Value, tag);
+ store32(TrustedImm32(VM::Value::_Integer_Type), resultTypeAddr);
+
+ binOpFinished = jump();
+
+ if (leftTypeCheck.isSet())
+ leftTypeCheck.link(this);
+ if (rightTypeCheck.isSet())
+ rightTypeCheck.link(this);
+ if (overflowCheck.isSet())
+ overflowCheck.link(this);
+ }
+
+ // Fallback
+ generateFunctionCallImp(target, info.name, info.fallbackImplementation, left, right, ContextRegister);
+
+ if (binOpFinished.isSet())
+ binOpFinished.link(this);
+}
#if OS(LINUX)
static void printDisassembledOutputWithCalls(const char* output, const QHash<void*, const char*>& functions)
{
}
#endif
-InstructionSelection::InstructionSelection(VM::ExecutionEngine *engine)
- : _engine(engine)
- , _function(0)
- , _block(0)
+void Assembler::link()
{
-}
-
-InstructionSelection::~InstructionSelection()
-{
-}
-
-void InstructionSelection::operator()(IR::Function *function)
-{
- qSwap(_function, function);
-
- int locals = (_function->tempCount - _function->locals.size() + _function->maxNumberOfArguments);
- locals = (locals + 1) & ~1;
- enterStandardStackFrame(locals);
-
- int contextPointer = 0;
-#ifndef VALUE_FITS_IN_REGISTER
- // When the return VM value doesn't fit into a register, then
- // the caller provides a pointer for storage as first argument.
- // That shifts the index the context pointer argument by one.
- contextPointer++;
-#endif
-#if CPU(X86)
- loadPtr(addressForArgument(contextPointer), ContextRegister);
-#elif CPU(X86_64) || CPU(ARM)
- move(registerForArgument(contextPointer), ContextRegister);
-#else
- assert(!"TODO");
-#endif
-
- foreach (IR::BasicBlock *block, _function->basicBlocks) {
- _block = block;
- _addrs[block] = label();
- foreach (IR::Stmt *s, block->statements) {
- s->accept(this);
- }
- }
-
- leaveStandardStackFrame(locals);
-#ifndef VALUE_FITS_IN_REGISTER
- // Emulate ret(n) instruction
- // Pop off return address into scratch register ...
- pop(ScratchRegister);
- // ... and overwrite the invisible argument with
- // the return address.
- poke(ScratchRegister);
-#endif
- ret();
-
QHashIterator<IR::BasicBlock *, QVector<Jump> > it(_patches);
while (it.hasNext()) {
it.next();
}
_function->code = (Value (*)(VM::ExecutionContext *, const uchar *)) _function->codeRef.code().executableAddress();
+}
- qSwap(_function, function);
+InstructionSelection::InstructionSelection(VM::ExecutionEngine *engine)
+ : _engine(engine)
+ , _block(0)
+ , _function(0)
+ , _asm(0)
+{
}
-String *InstructionSelection::identifier(const QString &s)
+InstructionSelection::~InstructionSelection()
{
- return _engine->identifier(s);
+ delete _asm;
}
-InstructionSelection::Pointer InstructionSelection::loadTempAddress(RegisterID reg, IR::Temp *t)
+void InstructionSelection::operator()(IR::Function *function)
{
- int32_t offset = 0;
- if (t->index < 0) {
- const int arg = -t->index - 1;
- loadPtr(Address(ContextRegister, offsetof(ExecutionContext, arguments)), reg);
- offset = arg * sizeof(Value);
- } else if (t->index < _function->locals.size()) {
- loadPtr(Address(ContextRegister, offsetof(ExecutionContext, locals)), reg);
- offset = t->index * sizeof(Value);
- } else {
- const int arg = _function->maxNumberOfArguments + t->index - _function->locals.size();
- // StackFrameRegister points to its old value on the stack, so even for the first temp we need to
- // subtract at least sizeof(Value).
- offset = - sizeof(Value) * (arg + 1);
- reg = StackFrameRegister;
+ qSwap(_function, function);
+ Assembler* oldAssembler = _asm;
+ _asm = new Assembler(_function);
+
+ int locals = (_function->tempCount - _function->locals.size() + _function->maxNumberOfArguments);
+ locals = (locals + 1) & ~1;
+ _asm->enterStandardStackFrame(locals);
+
+ int contextPointer = 0;
+#ifndef VALUE_FITS_IN_REGISTER
+ // When the return VM value doesn't fit into a register, then
+ // the caller provides a pointer for storage as first argument.
+ // That shifts the index the context pointer argument by one.
+ contextPointer++;
+#endif
+#if CPU(X86)
+ _asm->loadPtr(addressForArgument(contextPointer), Assembler::ContextRegister);
+#elif CPU(X86_64) || CPU(ARM)
+ _asm->move(_asm->registerForArgument(contextPointer), Assembler::ContextRegister);
+#else
+ assert(!"TODO");
+#endif
+
+ foreach (IR::BasicBlock *block, _function->basicBlocks) {
+ _block = block;
+ _asm->registerBlock(_block);
+ foreach (IR::Stmt *s, block->statements) {
+ s->accept(this);
+ }
}
- return Pointer(reg, offset);
+
+ _asm->leaveStandardStackFrame(locals);
+#ifndef VALUE_FITS_IN_REGISTER
+ // Emulate ret(n) instruction
+ // Pop off return address into scratch register ...
+ _asm->pop(Assembler::ScratchRegister);
+ // ... and overwrite the invisible argument with
+ // the return address.
+ _asm->poke(Assembler::ScratchRegister);
+#endif
+ _asm->ret();
+
+ _asm->link();
+
+ qSwap(_function, function);
+ delete _asm;
+ _asm = oldAssembler;
+}
+
+String *InstructionSelection::identifier(const QString &s)
+{
+ return _engine->identifier(s);
}
void InstructionSelection::callActivationProperty(IR::Call *call, IR::Temp *result)
break;
case IR::Name::builtin_typeof: {
if (IR::Member *m = call->args->expr->asMember()) {
- generateFunctionCall(result, __qmljs_builtin_typeof_member, m->base->asTemp(), identifier(*m->name), ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_typeof_member, m->base->asTemp(), identifier(*m->name), Assembler::ContextRegister);
return;
} else if (IR::Subscript *ss = call->args->expr->asSubscript()) {
- generateFunctionCall(result, __qmljs_builtin_typeof_element, ss->base->asTemp(), ss->index->asTemp(), ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_typeof_element, ss->base->asTemp(), ss->index->asTemp(), Assembler::ContextRegister);
return;
} else if (IR::Name *n = call->args->expr->asName()) {
- generateFunctionCall(result, __qmljs_builtin_typeof_name, identifier(*n->id), ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_typeof_name, identifier(*n->id), Assembler::ContextRegister);
return;
} else if (IR::Temp *arg = call->args->expr->asTemp()){
assert(arg != 0);
- generateFunctionCall(result, __qmljs_builtin_typeof, arg, ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_typeof, arg, Assembler::ContextRegister);
} else {
assert(false);
}
break;
case IR::Name::builtin_delete: {
if (IR::Member *m = call->args->expr->asMember()) {
- generateFunctionCall(result, __qmljs_delete_member, ContextRegister, m->base->asTemp(), identifier(*m->name));
+ generateFunctionCall(result, __qmljs_delete_member, Assembler::ContextRegister, m->base->asTemp(), identifier(*m->name));
return;
} else if (IR::Subscript *ss = call->args->expr->asSubscript()) {
- generateFunctionCall(result, __qmljs_delete_subscript, ContextRegister, ss->base->asTemp(), ss->index->asTemp());
+ generateFunctionCall(result, __qmljs_delete_subscript, Assembler::ContextRegister, ss->base->asTemp(), ss->index->asTemp());
return;
} else if (IR::Name *n = call->args->expr->asName()) {
- generateFunctionCall(result, __qmljs_delete_name, ContextRegister, identifier(*n->id));
+ generateFunctionCall(result, __qmljs_delete_name, Assembler::ContextRegister, identifier(*n->id));
return;
} else if (call->args->expr->asTemp()){
// ### should throw in strict mode
- Address dest = loadTempAddress(ScratchRegister, result);
+ Address dest = _asm->loadTempAddress(Assembler::ScratchRegister, result);
Value v = Value::fromBoolean(false);
- storeValue(v, dest);
+ _asm->storeValue(v, dest);
return;
}
break;
case IR::Name::builtin_throw: {
IR::Temp *arg = call->args->expr->asTemp();
assert(arg != 0);
- generateFunctionCall(result, __qmljs_builtin_throw, arg, ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_throw, arg, Assembler::ContextRegister);
}
break;
case IR::Name::builtin_create_exception_handler:
- generateFunctionCall(ReturnValueRegister, __qmljs_create_exception_handler, ContextRegister);
- generateFunctionCall(result, setjmp, ReturnValueRegister);
+ generateFunctionCall(Assembler::ReturnValueRegister, __qmljs_create_exception_handler, Assembler::ContextRegister);
+ generateFunctionCall(result, setjmp, Assembler::ReturnValueRegister);
break;
case IR::Name::builtin_delete_exception_handler:
- generateFunctionCall(Void, __qmljs_delete_exception_handler, ContextRegister);
+ generateFunctionCall(Assembler::Void, __qmljs_delete_exception_handler, Assembler::ContextRegister);
break;
case IR::Name::builtin_get_exception:
- generateFunctionCall(result, __qmljs_get_exception, ContextRegister);
+ generateFunctionCall(result, __qmljs_get_exception, Assembler::ContextRegister);
break;
case IR::Name::builtin_foreach_iterator_object: {
IR::Temp *arg = call->args->expr->asTemp();
assert(arg != 0);
- generateFunctionCall(result, __qmljs_foreach_iterator_object, arg, ContextRegister);
+ generateFunctionCall(result, __qmljs_foreach_iterator_object, arg, Assembler::ContextRegister);
}
break;
case IR::Name::builtin_foreach_next_property_name: {
case IR::Name::builtin_push_with: {
IR::Temp *arg = call->args->expr->asTemp();
assert(arg != 0);
- generateFunctionCall(Void, __qmljs_builtin_push_with, arg, ContextRegister);
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_push_with, arg, Assembler::ContextRegister);
}
break;
case IR::Name::builtin_pop_with:
- generateFunctionCall(Void, __qmljs_builtin_pop_with, ContextRegister);
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_pop_with, Assembler::ContextRegister);
break;
case IR::Name::builtin_declare_vars: {
if (!call->args)
for (IR::ExprList *it = call->args->next; it; it = it->next) {
IR::Name *arg = it->expr->asName();
assert(arg != 0);
- generateFunctionCall(Void, __qmljs_builtin_declare_var, ContextRegister,
- TrustedImm32(deletable->value != 0), identifier(*arg->id));
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_declare_var, Assembler::ContextRegister,
+ Assembler::TrustedImm32(deletable->value != 0), identifier(*arg->id));
}
}
}
int argc = prepareVariableArguments(call->args);
IR::Temp* thisObject = 0;
- generateFunctionCall(result, __qmljs_call_value, ContextRegister, thisObject, baseTemp, baseAddressForCallArguments(), TrustedImm32(argc));
+ generateFunctionCall(result, __qmljs_call_value, Assembler::ContextRegister, thisObject, baseTemp, baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::callProperty(IR::Call *call, IR::Temp *result)
assert(member->base->asTemp() != 0);
int argc = prepareVariableArguments(call->args);
- generateFunctionCall(result, __qmljs_call_property, ContextRegister, member->base->asTemp(), identifier(*member->name), baseAddressForCallArguments(), TrustedImm32(argc));
+ generateFunctionCall(result, __qmljs_call_property, Assembler::ContextRegister, member->base->asTemp(), identifier(*member->name), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::constructActivationProperty(IR::New *call, IR::Temp *result)
assert(member->base->asTemp() != 0);
int argc = prepareVariableArguments(call->args);
- generateFunctionCall(result, __qmljs_construct_property, ContextRegister, member->base->asTemp(), identifier(*member->name), baseAddressForCallArguments(), TrustedImm32(argc));
+ generateFunctionCall(result, __qmljs_construct_property, Assembler::ContextRegister, member->base->asTemp(), identifier(*member->name), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::constructValue(IR::New *call, IR::Temp *result)
assert(baseTemp != 0);
int argc = prepareVariableArguments(call->args);
- generateFunctionCall(result, __qmljs_construct_value, ContextRegister, baseTemp, baseAddressForCallArguments(), TrustedImm32(argc));
+ generateFunctionCall(result, __qmljs_construct_value, Assembler::ContextRegister, baseTemp, baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::visitExp(IR::Exp *s)
String *propertyName = identifier(*n->id);
if (s->source->asTemp() || s->source->asConst()) {
- generateFunctionCall(Void, __qmljs_set_activation_property, ContextRegister, propertyName, s->source);
+ generateFunctionCall(Assembler::Void, __qmljs_set_activation_property, Assembler::ContextRegister, propertyName, s->source);
return;
} else {
Q_UNREACHABLE();
} else if (IR::Temp *t = s->target->asTemp()) {
if (IR::Name *n = s->source->asName()) {
if (*n->id == QStringLiteral("this")) { // ### `this' should be a builtin.
- generateFunctionCall(t, __qmljs_get_thisObject, ContextRegister);
+ generateFunctionCall(t, __qmljs_get_thisObject, Assembler::ContextRegister);
} else {
String *propertyName = identifier(*n->id);
- generateFunctionCall(t, __qmljs_get_activation_property, ContextRegister, propertyName);
+ generateFunctionCall(t, __qmljs_get_activation_property, Assembler::ContextRegister, propertyName);
}
return;
} else if (IR::Const *c = s->source->asConst()) {
- Address dest = loadTempAddress(ScratchRegister, t);
+ Address dest = _asm->loadTempAddress(Assembler::ScratchRegister, t);
Value v;
switch (c->type) {
case IR::NullType:
Q_UNIMPLEMENTED();
assert(!"TODO");
}
- storeValue(v, dest);
+ _asm->storeValue(v, dest);
return;
} else if (IR::Temp *t2 = s->source->asTemp()) {
- copyValue(t, t2);
+ _asm->copyValue(t, t2);
return;
} else if (IR::String *str = s->source->asString()) {
- Address dest = loadTempAddress(ScratchRegister, t);
+ Address dest = _asm->loadTempAddress(Assembler::ScratchRegister, t);
Value v = Value::fromString(_engine->newString(*str->value));
- storeValue(v, dest);
+ _asm->storeValue(v, dest);
return;
} else if (IR::RegExp *re = s->source->asRegExp()) {
- Address dest = loadTempAddress(ScratchRegister, t);
+ Address dest = _asm->loadTempAddress(Assembler::ScratchRegister, t);
Value v = Value::fromObject(_engine->newRegExpObject(*re->value, re->flags));
- storeValue(v, dest);
+ _asm->storeValue(v, dest);
return;
} else if (IR::Closure *clos = s->source->asClosure()) {
- generateFunctionCall(t, __qmljs_init_closure, TrustedImmPtr(clos->value), ContextRegister);
+ generateFunctionCall(t, __qmljs_init_closure, Assembler::TrustedImmPtr(clos->value), Assembler::ContextRegister);
return;
} else if (IR::New *ctor = s->source->asNew()) {
if (ctor->base->asName()) {
} else if (IR::Member *m = s->source->asMember()) {
//__qmljs_get_property(ctx, result, object, name);
if (IR::Temp *base = m->base->asTemp()) {
- generateFunctionCall(t, __qmljs_get_property, ContextRegister, base, identifier(*m->name));
+ generateFunctionCall(t, __qmljs_get_property, Assembler::ContextRegister, base, identifier(*m->name));
return;
}
assert(!"wip");
return;
} else if (IR::Subscript *ss = s->source->asSubscript()) {
- generateFunctionCall(t, __qmljs_get_element, ContextRegister, ss->base->asTemp(), ss->index->asTemp());
+ generateFunctionCall(t, __qmljs_get_element, Assembler::ContextRegister, ss->base->asTemp(), ss->index->asTemp());
return;
} else if (IR::Unop *u = s->source->asUnop()) {
if (IR::Temp *e = u->expr->asTemp()) {
} // switch
if (op)
- generateFunctionCallImp(t, opName, op, e, ContextRegister);
+ _asm->generateFunctionCallImp(t, opName, op, e, Assembler::ContextRegister);
return;
}
} else if (IR::Binop *b = s->source->asBinop()) {
if ((b->left->asTemp() || b->left->asConst()) &&
(b->right->asTemp() || b->right->asConst())) {
- generateBinOp((IR::AluOp)b->op, t, b->left, b->right);
+ _asm->generateBinOp((IR::AluOp)b->op, t, b->left, b->right);
return;
}
} else if (IR::Call *c = s->source->asCall()) {
} else if (IR::Member *m = s->target->asMember()) {
if (IR::Temp *base = m->base->asTemp()) {
if (s->source->asTemp() || s->source->asConst()) {
- generateFunctionCall(Void, __qmljs_set_property, ContextRegister, base, identifier(*m->name), s->source);
+ generateFunctionCall(Assembler::Void, __qmljs_set_property, Assembler::ContextRegister, base, identifier(*m->name), s->source);
return;
} else {
Q_UNREACHABLE();
}
} else if (IR::Subscript *ss = s->target->asSubscript()) {
if (s->source->asTemp() || s->source->asConst()) {
- generateFunctionCall(Void, __qmljs_set_element, ContextRegister, ss->base->asTemp(), ss->index->asTemp(), s->source);
+ generateFunctionCall(Assembler::Void, __qmljs_set_element, Assembler::ContextRegister, ss->base->asTemp(), ss->index->asTemp(), s->source);
return;
} else {
Q_UNIMPLEMENTED();
// inplace assignment, e.g. x += 1, ++x, ...
if (IR::Temp *t = s->target->asTemp()) {
if (s->source->asTemp() || s->source->asConst()) {
- generateBinOp((IR::AluOp)s->op, t, t, s->source);
+ _asm->generateBinOp((IR::AluOp)s->op, t, t, s->source);
return;
}
} else if (IR::Name *n = s->target->asName()) {
break;
}
if (op) {
- generateFunctionCallImp(Void, opName, op, s->source, identifier(*n->id), ContextRegister);
+ _asm->generateFunctionCallImp(Assembler::Void, opName, op, s->source, identifier(*n->id), Assembler::ContextRegister);
}
return;
}
if (op) {
IR::Temp* base = ss->base->asTemp();
IR::Temp* index = ss->index->asTemp();
- generateFunctionCallImp(Void, opName, op, base, index, s->source, ContextRegister);
+ _asm->generateFunctionCallImp(Assembler::Void, opName, op, base, index, s->source, Assembler::ContextRegister);
}
return;
}
if (op) {
IR::Temp* base = m->base->asTemp();
String* member = identifier(*m->name);
- generateFunctionCallImp(Void, opName, op, s->source, base, member, ContextRegister);
+ _asm->generateFunctionCallImp(Assembler::Void, opName, op, s->source, base, member, Assembler::ContextRegister);
}
return;
}
void InstructionSelection::visitJump(IR::Jump *s)
{
- jumpToBlock(s->target);
-}
-
-void InstructionSelection::jumpToBlock(IR::BasicBlock *target)
-{
- if (_block->index + 1 != target->index)
- _patches[target].append(jump());
+ _asm->jumpToBlock(_block, s->target);
}
void InstructionSelection::visitCJump(IR::CJump *s)
{
if (IR::Temp *t = s->cond->asTemp()) {
- Address temp = loadTempAddress(ScratchRegister, t);
+ Address temp = _asm->loadTempAddress(Assembler::ScratchRegister, t);
Address tag = temp;
tag.offset += offsetof(VM::Value, tag);
- Jump booleanConversion = branch32(NotEqual, tag, TrustedImm32(VM::Value::Boolean_Type));
+ Assembler::Jump booleanConversion = _asm->branch32(Assembler::NotEqual, tag, Assembler::TrustedImm32(VM::Value::Boolean_Type));
Address data = temp;
data.offset += offsetof(VM::Value, int_32);
- load32(data, ReturnValueRegister);
- Jump testBoolean = jump();
+ _asm->load32(data, Assembler::ReturnValueRegister);
+ Assembler::Jump testBoolean = _asm->jump();
- booleanConversion.link(this);
+ booleanConversion.link(_asm);
{
- generateFunctionCall(ReturnValueRegister, __qmljs_to_boolean, t, ContextRegister);
+ generateFunctionCall(Assembler::ReturnValueRegister, __qmljs_to_boolean, t, Assembler::ContextRegister);
}
- testBoolean.link(this);
- Jump target = branch32(NotEqual, ReturnValueRegister, TrustedImm32(0));
- _patches[s->iftrue].append(target);
+ testBoolean.link(_asm);
+ Assembler::Jump target = _asm->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister, Assembler::TrustedImm32(0));
+ _asm->addPatch(s->iftrue, target);
- jumpToBlock(s->iffalse);
+ _asm->jumpToBlock(_block, s->iffalse);
return;
} else if (IR::Binop *b = s->cond->asBinop()) {
if ((b->left->asTemp() || b->left->asConst()) &&
case IR::OpIn: setOp(op, opName, __qmljs_cmp_in); break;
} // switch
- generateFunctionCallImp(ReturnValueRegister, opName, op, b->left, b->right, ContextRegister);
+ _asm->generateFunctionCallImp(Assembler::ReturnValueRegister, opName, op, b->left, b->right, Assembler::ContextRegister);
- Jump target = branch32(NotEqual, ReturnValueRegister, TrustedImm32(0));
- _patches[s->iftrue].append(target);
+ Assembler::Jump target = _asm->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister, Assembler::TrustedImm32(0));
+ _asm->addPatch(s->iftrue, target);
- jumpToBlock(s->iffalse);
+ _asm->jumpToBlock(_block, s->iffalse);
return;
} else {
assert(!"wip");
{
if (IR::Temp *t = s->expr->asTemp()) {
#ifdef VALUE_FITS_IN_REGISTER
- copyValue(ReturnValueRegister, t);
+ _asm->copyValue(Assembler::ReturnValueRegister, t);
#else
- loadPtr(addressForArgument(0), ReturnValueRegister);
- copyValue(Address(ReturnValueRegister, 0), t);
+ _asm->loadPtr(addressForArgument(0), Assembler::ReturnValueRegister);
+ _asm->copyValue(Address(Assembler::ReturnValueRegister, 0), t);
#endif
return;
}
for (IR::ExprList *it = args; it; it = it->next, ++i) {
IR::Temp *arg = it->expr->asTemp();
assert(arg != 0);
- copyValue(argumentAddressForCall(i), arg);
+ _asm->copyValue(argumentAddressForCall(i), arg);
}
return argc;
assert(baseName != 0);
int argc = prepareVariableArguments(args);
- generateFunctionCallImp(result, name, method, ContextRegister, identifier(*baseName->id), baseAddressForCallArguments(), TrustedImm32(argc));
+ _asm->generateFunctionCallImp(result, name, method, Assembler::ContextRegister, identifier(*baseName->id), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::callRuntimeMethodImp(IR::Temp *result, const char* name, BuiltinMethod method, IR::ExprList *args)
{
int argc = prepareVariableArguments(args);
- generateFunctionCallImp(result, name, method, ContextRegister, baseAddressForCallArguments(), TrustedImm32(argc));
-}
-
-template <typename Result, typename Source>
-void InstructionSelection::copyValue(Result result, Source source)
-{
-#ifdef VALUE_FITS_IN_REGISTER
- // Use ReturnValueRegister as "scratch" register because loadArgument
- // and storeArgument are functions that may need a scratch register themselves.
- loadArgument(source, ReturnValueRegister);
- storeArgument(ReturnValueRegister, result);
-#else
- loadDouble(source, FPGpr0);
- storeDouble(FPGpr0, result);
-#endif
+ _asm->generateFunctionCallImp(result, name, method, Assembler::ContextRegister, baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
-void InstructionSelection::generateBinOp(IR::AluOp operation, IR::Temp* target, IR::Expr* left, IR::Expr* right)
-{
- const BinaryOperationInfo& info = binaryOperations[operation];
- if (!info.fallbackImplementation) {
- assert(!"unreachable");
- return;
- }
-
- Value leftConst = Value::undefinedValue();
- Value rightConst = Value::undefinedValue();
-
- bool canDoInline = info.inlineMemRegOp && info.inlineImmRegOp;
-
- if (canDoInline) {
- if (left->asConst()) {
- leftConst = convertToValue(left->asConst());
- canDoInline = canDoInline && leftConst.tryIntegerConversion();
- }
- if (right->asConst()) {
- rightConst = convertToValue(right->asConst());
- canDoInline = canDoInline && rightConst.tryIntegerConversion();
- }
- }
-
- Jump binOpFinished;
-
- if (canDoInline) {
-
- Jump leftTypeCheck;
- if (left->asTemp()) {
- Address typeAddress = loadTempAddress(ScratchRegister, left->asTemp());
- typeAddress.offset += offsetof(VM::Value, tag);
- leftTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
- }
-
- Jump rightTypeCheck;
- if (right->asTemp()) {
- Address typeAddress = loadTempAddress(ScratchRegister, right->asTemp());
- typeAddress.offset += offsetof(VM::Value, tag);
- rightTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
- }
-
- if (left->asTemp()) {
- Address leftValue = loadTempAddress(ScratchRegister, left->asTemp());
- leftValue.offset += offsetof(VM::Value, int_32);
- load32(leftValue, IntegerOpRegister);
- } else { // left->asConst()
- move(TrustedImm32(leftConst.integerValue()), IntegerOpRegister);
- }
-
- Jump overflowCheck;
- if (right->asTemp()) {
- Address rightValue = loadTempAddress(ScratchRegister, right->asTemp());
- rightValue.offset += offsetof(VM::Value, int_32);
-
- overflowCheck = (this->*info.inlineMemRegOp)(rightValue, IntegerOpRegister);
- } else { // right->asConst()
- overflowCheck = (this->*info.inlineImmRegOp)(TrustedImm32(rightConst.integerValue()), IntegerOpRegister);
- }
-
- Address resultAddr = loadTempAddress(ScratchRegister, target);
- Address resultValueAddr = resultAddr;
- resultValueAddr.offset += offsetof(VM::Value, int_32);
- store32(IntegerOpRegister, resultValueAddr);
-
- Address resultTypeAddr = resultAddr;
- resultTypeAddr.offset += offsetof(VM::Value, tag);
- store32(TrustedImm32(VM::Value::_Integer_Type), resultTypeAddr);
-
- binOpFinished = jump();
-
- if (leftTypeCheck.isSet())
- leftTypeCheck.link(this);
- if (rightTypeCheck.isSet())
- rightTypeCheck.link(this);
- if (overflowCheck.isSet())
- overflowCheck.link(this);
- }
-
- // Fallback
- generateFunctionCallImp(target, info.name, info.fallbackImplementation, left, right, ContextRegister);
-
- if (binOpFinished.isSet())
- binOpFinished.link(this);
-}
namespace QQmlJS {
namespace MASM {
-class InstructionSelection: protected IR::StmtVisitor, public JSC::MacroAssembler, public EvalInstructionSelection
+class Assembler : public JSC::MacroAssembler
{
public:
- InstructionSelection(VM::ExecutionEngine *engine);
- ~InstructionSelection();
-
- virtual void run(IR::Function *function)
- { this->operator()(function); }
- void operator()(IR::Function *function);
-
-protected:
+ Assembler(IR::Function* function);
#if CPU(X86)
#undef VALUE_FITS_IN_REGISTER
#error Argh.
#endif
- struct VoidType {};
- static const VoidType Void;
-
// Explicit type to allow distinguishing between
// pushing an address itself or the value it points
// to onto the stack when calling functions.
{}
};
- void enterStandardStackFrame(int locals)
- {
-#if CPU(ARM)
- push(JSC::ARMRegisters::lr);
-#endif
- push(StackFrameRegister);
- move(StackPointerRegister, StackFrameRegister);
-
- // space for the locals and the ContextRegister
- int32_t frameSize = locals * sizeof(QQmlJS::VM::Value) + sizeof(void*);
-
-#if CPU(X86) || CPU(X86_64)
- frameSize = (frameSize + 15) & ~15; // align on 16 byte boundaries for MMX
-#endif
- subPtr(TrustedImm32(frameSize), StackPointerRegister);
-
-#if CPU(X86) || CPU(ARM)
- for (int saveReg = CalleeSavedFirstRegister; saveReg <= CalleeSavedLastRegister; ++saveReg)
- push(static_cast<RegisterID>(saveReg));
-#endif
- // save the ContextRegister
- storePtr(ContextRegister, StackPointerRegister);
- }
- void leaveStandardStackFrame(int locals)
- {
- // restore the ContextRegister
- loadPtr(StackPointerRegister, ContextRegister);
-
-#if CPU(X86) || CPU(ARM)
- for (int saveReg = CalleeSavedLastRegister; saveReg >= CalleeSavedFirstRegister; --saveReg)
- pop(static_cast<RegisterID>(saveReg));
-#endif
- // space for the locals and the ContextRegister
- int32_t frameSize = locals * sizeof(QQmlJS::VM::Value) + sizeof(void*);
-#if CPU(X86) || CPU(X86_64)
- frameSize = (frameSize + 15) & ~15; // align on 16 byte boundaries for MMX
-#endif
- addPtr(TrustedImm32(frameSize), StackPointerRegister);
-
- pop(StackFrameRegister);
-#if CPU(ARM)
- pop(JSC::ARMRegisters::lr);
-#endif
- }
-
- Address addressForArgument(int index) const
- {
- if (index < RegisterArgumentCount)
- return Address(registerForArgument(index), 0);
-
- // StackFrameRegister points to its old value on the stack, and above
- // it we have the return address, hence the need to step over two
- // values before reaching the first argument.
- return Address(StackFrameRegister, (index - RegisterArgumentCount + 2) * sizeof(void*));
- }
-
- // Some run-time functions take (Value* args, int argc). This function is for populating
- // the args.
- Pointer argumentAddressForCall(int argument)
- {
- const int index = _function->maxNumberOfArguments - argument;
- return Pointer(StackFrameRegister, sizeof(VM::Value) * (-index)
- - sizeof(void*) // size of ebp
- );
- }
- Pointer baseAddressForCallArguments()
- {
- return argumentAddressForCall(0);
- }
-
- VM::String *identifier(const QString &s);
- Pointer loadTempAddress(RegisterID reg, IR::Temp *t);
- void callActivationProperty(IR::Call *call, IR::Temp *result);
- void callProperty(IR::Call *call, IR::Temp *result);
- void constructActivationProperty(IR::New *call, IR::Temp *result);
- void constructProperty(IR::New *ctor, IR::Temp *result);
- void callValue(IR::Call *call, IR::Temp *result);
- void constructValue(IR::New *call, IR::Temp *result);
-
- virtual void visitExp(IR::Exp *);
- virtual void visitEnter(IR::Enter *);
- virtual void visitLeave(IR::Leave *);
- virtual void visitMove(IR::Move *s);
- virtual void visitJump(IR::Jump *);
- virtual void visitCJump(IR::CJump *);
- virtual void visitRet(IR::Ret *);
+ struct VoidType {};
+ static const VoidType Void;
-private:
- void jumpToBlock(IR::BasicBlock *target);
typedef JSC::FunctionPtr FunctionPtr;
+ struct CallToLink {
+ Call call;
+ FunctionPtr externalFunction;
+ const char* functionName;
+ };
+
void callAbsolute(const char* functionName, FunctionPtr function) {
CallToLink ctl;
ctl.call = call();
_callsToLink.append(ctl);
}
+ void registerBlock(IR::BasicBlock*);
+ void jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target);
+ void addPatch(IR::BasicBlock* targetBlock, Jump targetJump);
+
+ Pointer loadTempAddress(RegisterID reg, IR::Temp *t);
+
void loadArgument(RegisterID source, RegisterID dest)
{
move(source, dest);
push(TrustedImmPtr(name));
}
+ using JSC::MacroAssembler::loadDouble;
+ void loadDouble(IR::Temp* temp, FPRegisterID dest)
+ {
+ Pointer ptr = loadTempAddress(ScratchRegister, temp);
+ loadDouble(ptr, dest);
+ }
+
+ using JSC::MacroAssembler::storeDouble;
+ void storeDouble(FPRegisterID source, IR::Temp* temp)
+ {
+ Pointer ptr = loadTempAddress(ScratchRegister, temp);
+ storeDouble(source, ptr);
+ }
+
+ template <typename Result, typename Source>
+ void copyValue(Result result, Source source);
+
+ void storeValue(VM::Value value, Address destination)
+ {
+#ifdef VALUE_FITS_IN_REGISTER
+ store64(TrustedImm64(value.val), destination);
+#else
+ store32(TrustedImm32(value.int_32), destination);
+ destination.offset += 4;
+ store32(TrustedImm32(value.tag), destination);
+#endif
+ }
+
+ void enterStandardStackFrame(int locals);
+ void leaveStandardStackFrame(int locals);
+
void callFunctionPrologue()
{
#if CPU(X86)
#endif
}
- #define isel_stringIfyx(s) #s
- #define isel_stringIfy(s) isel_stringIfyx(s)
-
- #define generateFunctionCall(t, function, ...) \
- generateFunctionCallImp(t, isel_stringIfy(function), function, __VA_ARGS__)
-
static inline int sizeOfArgument(VoidType)
{ return 0; }
static inline int sizeOfArgument(RegisterID)
struct ArgumentLoader
{
- ArgumentLoader(InstructionSelection* instructionSelection, int totalNumberOfArguments)
- : isel(instructionSelection)
+ ArgumentLoader(Assembler* _assembler, int totalNumberOfArguments)
+ : assembler(_assembler)
, stackSpaceForArguments(0)
, currentRegisterIndex(qMin(totalNumberOfArguments - 1, RegisterArgumentCount - 1))
{
void load(T argument)
{
if (currentRegisterIndex >= 0) {
- isel->loadArgument(argument, registerForArgument(currentRegisterIndex));
+ assembler->loadArgument(argument, registerForArgument(currentRegisterIndex));
--currentRegisterIndex;
} else {
- isel->push(argument);
+ assembler->push(argument);
stackSpaceForArguments += sizeOfArgument(argument);
}
}
--currentRegisterIndex;
}
- InstructionSelection *isel;
+ Assembler *assembler;
int stackSpaceForArguments;
int currentRegisterIndex;
};
generateFunctionCallImp(r, functionName, function, arg1, VoidType(), VoidType(), VoidType(), VoidType());
}
- int prepareVariableArguments(IR::ExprList* args);
-
- typedef VM::Value (*ActivationMethod)(VM::ExecutionContext *, VM::String *name, VM::Value *args, int argc);
- typedef VM::Value (*BuiltinMethod)(VM::ExecutionContext *, VM::Value *args, int argc);
- void callRuntimeMethodImp(IR::Temp *result, const char* name, ActivationMethod method, IR::Expr *base, IR::ExprList *args);
- void callRuntimeMethodImp(IR::Temp *result, const char* name, BuiltinMethod method, IR::ExprList *args);
-#define callRuntimeMethod(result, function, ...) \
- callRuntimeMethodImp(result, isel_stringIfy(function), function, __VA_ARGS__)
-
- using JSC::MacroAssembler::loadDouble;
- void loadDouble(IR::Temp* temp, FPRegisterID dest)
- {
- Pointer ptr = loadTempAddress(ScratchRegister, temp);
- loadDouble(ptr, dest);
- }
-
- using JSC::MacroAssembler::storeDouble;
- void storeDouble(FPRegisterID source, IR::Temp* temp)
- {
- Pointer ptr = loadTempAddress(ScratchRegister, temp);
- storeDouble(source, ptr);
- }
-
- template <typename Result, typename Source>
- void copyValue(Result result, Source source);
-
- struct CallToLink {
- Call call;
- FunctionPtr externalFunction;
- const char* functionName;
- };
-
- void storeValue(VM::Value value, Address destination)
- {
-#ifdef VALUE_FITS_IN_REGISTER
- store64(TrustedImm64(value.val), destination);
-#else
- store32(TrustedImm32(value.int_32), destination);
- destination.offset += 4;
- store32(TrustedImm32(value.tag), destination);
-#endif
- }
-
- typedef Jump (InstructionSelection::*MemRegBinOp)(Address, RegisterID);
- typedef Jump (InstructionSelection::*ImmRegBinOp)(TrustedImm32, RegisterID);
+ typedef Jump (Assembler::*MemRegBinOp)(Address, RegisterID);
+ typedef Jump (Assembler::*ImmRegBinOp)(TrustedImm32, RegisterID);
struct BinaryOperationInfo {
const char *name;
return Jump();
}
- VM::ExecutionEngine *_engine;
- IR::Function *_function;
- IR::BasicBlock *_block;
- QHash<IR::BasicBlock *, QVector<Jump> > _patches;
+ void link();
+
+private:
+ IR::Function* _function;
QHash<IR::BasicBlock *, Label> _addrs;
+ QHash<IR::BasicBlock *, QVector<Jump> > _patches;
QList<CallToLink> _callsToLink;
};
+class InstructionSelection: protected IR::StmtVisitor, public EvalInstructionSelection
+{
+public:
+ InstructionSelection(VM::ExecutionEngine *engine);
+ ~InstructionSelection();
+
+ virtual void run(IR::Function *function)
+ { this->operator()(function); }
+ void operator()(IR::Function *function);
+
+protected:
+ typedef Assembler::Address Address;
+ typedef Assembler::Pointer Pointer;
+
+ Address addressForArgument(int index) const
+ {
+ if (index < Assembler::RegisterArgumentCount)
+ return Address(_asm->registerForArgument(index), 0);
+
+ // StackFrameRegister points to its old value on the stack, and above
+ // it we have the return address, hence the need to step over two
+ // values before reaching the first argument.
+ return Address(Assembler::StackFrameRegister, (index - Assembler::RegisterArgumentCount + 2) * sizeof(void*));
+ }
+
+ // Some run-time functions take (Value* args, int argc). This function is for populating
+ // the args.
+ Pointer argumentAddressForCall(int argument)
+ {
+ const int index = _function->maxNumberOfArguments - argument;
+ return Pointer(Assembler::StackFrameRegister, sizeof(VM::Value) * (-index)
+ - sizeof(void*) // size of ebp
+ );
+ }
+ Pointer baseAddressForCallArguments()
+ {
+ return argumentAddressForCall(0);
+ }
+
+ VM::String *identifier(const QString &s);
+ void callActivationProperty(IR::Call *call, IR::Temp *result);
+ void callProperty(IR::Call *call, IR::Temp *result);
+ void constructActivationProperty(IR::New *call, IR::Temp *result);
+ void constructProperty(IR::New *ctor, IR::Temp *result);
+ void callValue(IR::Call *call, IR::Temp *result);
+ void constructValue(IR::New *call, IR::Temp *result);
+
+ virtual void visitExp(IR::Exp *);
+ virtual void visitEnter(IR::Enter *);
+ virtual void visitLeave(IR::Leave *);
+ virtual void visitMove(IR::Move *s);
+ virtual void visitJump(IR::Jump *);
+ virtual void visitCJump(IR::CJump *);
+ virtual void visitRet(IR::Ret *);
+
+private:
+ #define isel_stringIfyx(s) #s
+ #define isel_stringIfy(s) isel_stringIfyx(s)
+
+ #define generateFunctionCall(t, function, ...) \
+ _asm->generateFunctionCallImp(t, isel_stringIfy(function), function, __VA_ARGS__)
+
+ int prepareVariableArguments(IR::ExprList* args);
+
+ typedef VM::Value (*ActivationMethod)(VM::ExecutionContext *, VM::String *name, VM::Value *args, int argc);
+ typedef VM::Value (*BuiltinMethod)(VM::ExecutionContext *, VM::Value *args, int argc);
+ void callRuntimeMethodImp(IR::Temp *result, const char* name, ActivationMethod method, IR::Expr *base, IR::ExprList *args);
+ void callRuntimeMethodImp(IR::Temp *result, const char* name, BuiltinMethod method, IR::ExprList *args);
+#define callRuntimeMethod(result, function, ...) \
+ callRuntimeMethodImp(result, isel_stringIfy(function), function, __VA_ARGS__)
+
+
+ VM::ExecutionEngine *_engine;
+ IR::BasicBlock *_block;
+ IR::Function* _function;
+ Assembler* _asm;
+};
+
class ISelFactory: public EvalISelFactory
{
public: