}
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ Comment cmnt(masm_, "[ GenerateSwapElements");
+
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ frame_->CallRuntime(Runtime::kSwapElements, 3);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
Comment cmnt(masm_, "[ GenerateCallFunction");
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast swapping of elements.
+ void GenerateSwapElements(ZoneList<Expression*>* args);
+
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
var pivot = a[pivot_index];
// Issue 95: Keep the pivot element out of the comparisons to avoid
// infinite recursion if comparefn(pivot, pivot) != 0.
- a[pivot_index] = a[from];
- a[from] = pivot;
+ %_SwapElements(a, from, pivot_index);
var low_end = from; // Upper bound of the elements lower than pivot.
var high_start = to; // Lower bound of the elements greater than pivot.
// From low_end to i are elements equal to pivot.
var element = a[i];
var order = %_CallFunction(global_receiver, element, pivot, comparefn);
if (order < 0) {
- a[i] = a[low_end];
- a[low_end] = element;
+ %_SwapElements(a, i, low_end);
i++;
low_end++;
} else if (order > 0) {
high_start--;
- a[i] = a[high_start];
- a[high_start] = element;
+ %_SwapElements(a, i, high_start);
} else { // order == 0
i++;
}
F(RegExpConstructResult, 3, 1) \
F(GetFromCache, 2, 1) \
F(NumberToString, 1, 1) \
+ F(SwapElements, 3, 1) \
F(MathPow, 2, 1) \
F(MathSin, 1, 1) \
F(MathCos, 1, 1) \
}
+class DeferredSwapElements: public DeferredCode {
+ public:
+ DeferredSwapElements(Register object, Register index1, Register index2)
+ : object_(object), index1_(index1), index2_(index2) {
+ set_comment("[ DeferredSwapElements");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+ __ push(object_);
+ __ push(index1_);
+ __ push(index2_);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ // Note: this code assumes that indices are passed are within
+ // elements' bounds and refer to valid (not holes) values.
+ Comment cmnt(masm_, "[ GenerateSwapElements");
+
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ Result index2 = frame_->Pop();
+ index2.ToRegister();
+
+ Result index1 = frame_->Pop();
+ index1.ToRegister();
+
+ Result object = frame_->Pop();
+ object.ToRegister();
+
+ Result tmp1 = allocator()->Allocate();
+ tmp1.ToRegister();
+ Result tmp2 = allocator()->Allocate();
+ tmp2.ToRegister();
+
+ frame_->Spill(object.reg());
+ frame_->Spill(index1.reg());
+ frame_->Spill(index2.reg());
+
+ DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
+ index1.reg(),
+ index2.reg());
+
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
+ deferred->Branch(less);
+ __ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
+ __ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ deferred->Branch(not_zero);
+
+ // Check the object's elements are in fast case.
+ __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
+ __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
+
+ // Smi-tagging is equivalent to multiplying by 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+
+ // Check that both indices are smis.
+ __ mov(tmp2.reg(), index1.reg());
+ __ or_(tmp2.reg(), Operand(index2.reg()));
+ __ test(tmp2.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Bring addresses into index1 and index2.
+ __ lea(index1.reg(), FieldOperand(tmp1.reg(),
+ index1.reg(),
+ times_half_pointer_size, // index1 is Smi
+ FixedArray::kHeaderSize));
+ __ lea(index2.reg(), FieldOperand(tmp1.reg(),
+ index2.reg(),
+ times_half_pointer_size, // index2 is Smi
+ FixedArray::kHeaderSize));
+
+ // Swap elements.
+ __ mov(object.reg(), Operand(index1.reg(), 0));
+ __ mov(tmp2.reg(), Operand(index2.reg(), 0));
+ __ mov(Operand(index2.reg(), 0), object.reg());
+ __ mov(Operand(index1.reg(), 0), tmp2.reg());
+
+ Label done;
+ __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ __ mov(tmp2.reg(), tmp1.reg());
+ RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
+ __ CallStub(&recordWrite1);
+
+ RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
+ __ CallStub(&recordWrite2);
+
+ __ bind(&done);
+
+ deferred->BindExit();
+ frame_->Push(Factory::undefined_value());
+}
+
+
void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
Comment cmnt(masm_, "[ GenerateCallFunction");
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast swapping of elements.
+ void GenerateSwapElements(ZoneList<Expression*>* args);
+
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
- private:
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
// Access check is necessary explicitly since generic stub does not perform
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
+ private:
// Update the inline cache.
void UpdateCaches(LookupResult* lookup,
State state,
}
+static Object* Runtime_SwapElements(Arguments args) {
+ HandleScope handle_scope;
+
+ ASSERT_EQ(3, args.length());
+
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key1 = args.at<Object>(1);
+ Handle<Object> key2 = args.at<Object>(2);
+
+ uint32_t index1, index2;
+ // That must be the most common case.
+ if (object->IsJSObject()
+ && Array::IndexFromObject(*key1, &index1)
+ && Array::IndexFromObject(*key2, &index2)) {
+ Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
+ Handle<Object> tmp1 = GetElement(jsobject, index1);
+ Handle<Object> tmp2 = GetElement(jsobject, index2);
+
+ SetElement(jsobject, index1, tmp2);
+ SetElement(jsobject, index2, tmp1);
+ } else {
+ Handle<Object> tmp1 = GetProperty(object, key1);
+ Handle<Object> tmp2 = GetProperty(object, key2);
+
+ SetProperty(object, key1, tmp2, NONE);
+ SetProperty(object, key2, tmp1, NONE);
+ }
+
+ return Heap::undefined_value();
+}
+
+
// Returns an array that tells you where in the [0, length) interval an array
// might have elements. Can either return keys or intervals. Keys can have
// gaps in (undefined). Intervals can also span over some undefined keys.
F(GetArrayKeys, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
+ F(SwapElements, 3, 1) \
\
/* Getters and Setters */ \
F(DefineAccessor, -1 /* 4 or 5 */, 1) \
}
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ Comment cmnt(masm_, "[ GenerateSwapElements");
+
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ Result result = frame_->CallRuntime(Runtime::kSwapElements, 3);
+ frame_->Push(&result);
+}
+
+
void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
Comment cmnt(masm_, "[ GenerateCallFunction");
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast swapping of elements.
+ void GenerateSwapElements(ZoneList<Expression*>* args);
+
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
// That can only be invoked on Array.prototype.
"FinishArrayPrototypeSetup": true,
+ "_SwapElements": true,
+
// Performance critical function which cannot afford type checks.
"_CallFunction": true,