}
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // TODO(872): implement this.
+ return Heap::undefined_value();
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
}
-static void InstallCustomCallGenerator(
- Handle<JSFunction> holder_function,
- CallStubCompiler::CustomGeneratorOwner owner_flag,
- const char* function_name,
- int id) {
- Handle<JSObject> owner;
- if (owner_flag == CallStubCompiler::FUNCTION) {
- owner = Handle<JSObject>::cast(holder_function);
- } else {
- ASSERT(owner_flag == CallStubCompiler::INSTANCE_PROTOTYPE);
- owner = Handle<JSObject>(
- JSObject::cast(holder_function->instance_prototype()));
+static Handle<JSObject> ResolveCustomCallGeneratorHolder(
+ Handle<Context> global_context,
+ const char* holder_expr) {
+ Handle<GlobalObject> global(global_context->global());
+ const char* period_pos = strchr(holder_expr, '.');
+ if (period_pos == NULL) {
+ return Handle<JSObject>::cast(
+ GetProperty(global, Factory::LookupAsciiSymbol(holder_expr)));
}
+ ASSERT_EQ(".prototype", period_pos);
+ Vector<const char> property(holder_expr, period_pos - holder_expr);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(
+ GetProperty(global, Factory::LookupSymbol(property)));
+ return Handle<JSObject>(JSObject::cast(function->prototype()));
+}
+
+
+static void InstallCustomCallGenerator(Handle<JSObject> holder,
+ const char* function_name,
+ int id) {
Handle<String> name = Factory::LookupAsciiSymbol(function_name);
- Handle<JSFunction> function(JSFunction::cast(owner->GetProperty(*name)));
+ Handle<JSFunction> function(JSFunction::cast(holder->GetProperty(*name)));
function->shared()->set_function_data(Smi::FromInt(id));
}
void Genesis::InstallCustomCallGenerators() {
HandleScope scope;
-#define INSTALL_CALL_GENERATOR(holder_fun, owner_flag, fun_name, name) \
- { \
- Handle<JSFunction> holder(global_context()->holder_fun##_function()); \
- const int id = CallStubCompiler::k##name##CallGenerator; \
- InstallCustomCallGenerator(holder, CallStubCompiler::owner_flag, \
- #fun_name, id); \
+#define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name) \
+ { \
+ Handle<JSObject> holder = ResolveCustomCallGeneratorHolder( \
+ global_context(), #holder_expr); \
+ const int id = CallStubCompiler::k##name##CallGenerator; \
+ InstallCustomCallGenerator(holder, #fun_name, id); \
}
CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
#undef INSTALL_CALL_GENERATOR
}
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
}
-void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
+void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0xC2);
+ emit_sse_operand(dst, src);
+ EMIT(1); // LT == 1
+}
+
+
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x0F);
+ EMIT(0x28);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_sse_operand(dst, src);
}
+
+void Assembler::psllq(XMMRegister reg, int8_t imm8) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x73);
+ emit_sse_operand(esi, reg); // esi == 6
+ EMIT(imm8);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, XMMRegister src);
+
void ucomisd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
+ void cmpltsd(XMMRegister dst, XMMRegister src);
+
+ void movaps(XMMRegister dst, XMMRegister src);
+
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
+ void psllq(XMMRegister reg, int8_t imm8);
+
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
void movntdq(const Operand& dst, XMMRegister src);
} else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
+ } else if (f0byte == 0x28) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movaps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x73) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("psllq %s,%d",
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x54) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("andpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else {
UnimplementedInstruction();
}
NameOfXMMRegister(rm));
data++;
}
+ } else if (b2 == 0xC2) {
+ // Intel manual 2A, Table 3-18.
+ const char* const pseudo_op[] = {
+ "cmpeqsd",
+ "cmpltsd",
+ "cmplesd",
+ "cmpunordsd",
+ "cmpneqsd",
+ "cmpnltsd",
+ "cmpnlesd",
+ "cmpordsd"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[data[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data += 2;
} else {
if (mod != 0x3) {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
" %s",
tmp_buffer_.start());
return instr_len;
-}
+} // NOLINT (function is too long)
//------------------------------------------------------------------------------
}
+void MacroAssembler::LoadPowerOf2(XMMRegister dst,
+ Register scratch,
+ int power) {
+ ASSERT(is_uintn(power + HeapNumber::kExponentBias,
+ HeapNumber::kExponentBits));
+ mov(scratch, Immediate(power + HeapNumber::kExponentBias));
+ movd(dst, Operand(scratch));
+ psllq(dst, HeapNumber::kMantissaBits);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
TypeInfo info,
Label* on_not_int32);
+ void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
+
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
}
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ if (!CpuFeatures::IsSupported(SSE2)) return Heap::undefined_value();
+ CpuFeatures::Scope use_sse2(SSE2);
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into eax.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi);
+
+ // Check if the argument is a heap number and load its value into xmm0.
+ Label slow;
+ __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+ __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+
+ // Check if the argument is strictly positive. Note this also
+ // discards NaN.
+ __ xorpd(xmm1, xmm1);
+ __ ucomisd(xmm0, xmm1);
+ __ j(below_equal, &slow);
+
+ // Do a truncating conversion.
+ __ cvttsd2si(eax, Operand(xmm0));
+
+ // Check if the result fits into a smi. Note this also checks for
+ // 0x80000000 which signals a failed conversion.
+ Label wont_fit_into_smi;
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &wont_fit_into_smi);
+
+ // Smi tag and return.
+ __ SmiTag(eax);
+ __ bind(&smi);
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is < 2^kMantissaBits.
+ Label already_round;
+ __ bind(&wont_fit_into_smi);
+ __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
+ __ ucomisd(xmm0, xmm1);
+ __ j(above_equal, &already_round);
+
+ // Save a copy of the argument.
+ __ movaps(xmm2, xmm0);
+
+ // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
+ __ addsd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+
+ // Compare the argument and the tentative result to get the right mask:
+ // if xmm2 < xmm0:
+ // xmm2 = 1...1
+ // else:
+ // xmm2 = 0...0
+ __ cmpltsd(xmm2, xmm0);
+
+ // Subtract 1 if the argument was less than the tentative result.
+ __ LoadPowerOf2(xmm1, ebx, 0);
+ __ andpd(xmm1, xmm2);
+ __ subsd(xmm0, xmm1);
+
+ // Return a new heap number.
+ __ AllocateHeapNumber(eax, ebx, edx, &slow);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ ret(2 * kPointerSize);
+
+ // Return the argument (when it's an already round heap number).
+ __ bind(&already_round);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+ __ bind(&miss);
+ // ecx: function name.
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
String* fname) {
ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
switch (generator_id) {
-#define CALL_GENERATOR_CASE(ignored1, ignored2, ignored3, name) \
+#define CALL_GENERATOR_CASE(ignored1, ignored2, name) \
case k##name##CallGenerator: \
return CallStubCompiler::Compile##name##Call(object, \
holder, \
// Installation of custom call generators for the selected builtins is
// handled by the bootstrapper.
//
-// Each entry has a name of a global function (lowercased), a flag
-// controlling whether the generator is set on the function itself or
-// on its instance prototype, a name of a builtin function on the
-// function or its instance prototype (the one the generator is set
-// for), and a name of a generator itself (used to build ids and
-// generator function names).
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(array, INSTANCE_PROTOTYPE, push, ArrayPush) \
- V(array, INSTANCE_PROTOTYPE, pop, ArrayPop) \
- V(string, INSTANCE_PROTOTYPE, charCodeAt, StringCharCodeAt) \
- V(string, INSTANCE_PROTOTYPE, charAt, StringCharAt) \
- V(string, FUNCTION, fromCharCode, StringFromCharCode)
+// Each entry has a name of a global object property holding an object
+// optionally followed by ".prototype" (this controls whether the
+// generator is set on the object itself or, in case it's a function,
+// on the its instance prototype), a name of a builtin function on the
+// object (the one the generator is set for), and a name of the
+// generator (used to build ids and generator function names).
+#define CUSTOM_CALL_IC_GENERATORS(V) \
+ V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, pop, ArrayPop) \
+ V(String.prototype, charCodeAt, StringCharCodeAt) \
+ V(String.prototype, charAt, StringCharAt) \
+ V(String, fromCharCode, StringFromCharCode) \
+ V(Math, floor, MathFloor)
class CallStubCompiler: public StubCompiler {
public:
- enum CustomGeneratorOwner {
- FUNCTION,
- INSTANCE_PROTOTYPE
- };
-
enum {
-#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, ignored3, name) \
+#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, name) \
k##name##CallGenerator,
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
#undef DECLARE_CALL_GENERATOR_ID
JSFunction* function,
String* name);
-#define DECLARE_CALL_GENERATOR(ignored1, ignored2, ignored3, name) \
- Object* Compile##name##Call(Object* object, \
- JSObject* holder, \
- JSGlobalPropertyCell* cell, \
- JSFunction* function, \
+#define DECLARE_CALL_GENERATOR(ignored1, ignored2, name) \
+ Object* Compile##name##Call(Object* object, \
+ JSObject* holder, \
+ JSGlobalPropertyCell* cell, \
+ JSFunction* function, \
String* fname);
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
#undef DECLARE_CALL_GENERATOR
}
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // TODO(872): implement this.
+ return Heap::undefined_value();
+}
+
+
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
}
}
+ // andpd, cmpltsd, movaps, psllq.
+ {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ andpd(xmm0, xmm1);
+ __ andpd(xmm1, xmm2);
+
+ __ cmpltsd(xmm0, xmm1);
+ __ cmpltsd(xmm1, xmm2);
+
+ __ movaps(xmm0, xmm1);
+ __ movaps(xmm1, xmm2);
+
+ __ psllq(xmm0, 17);
+ __ psllq(xmm1, 42);
+ }
+ }
+
__ ret(0);
CodeDesc desc;
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --max-new-space-size=262144
+
+function zero() {
+ var x = 0.5;
+ return (function() { return x - 0.5; })();
+}
+
+function test() {
+ assertEquals(0, Math.floor(0));
+ assertEquals(0, Math.floor(zero()));
+ assertEquals(1/-0, 1/Math.floor(-0)); // 0 == -0, so we use reciprocals.
+ assertEquals(Infinity, Math.floor(Infinity));
+ assertEquals(-Infinity, Math.floor(-Infinity));
+ assertNaN(Math.floor(NaN));
+
+ assertEquals(0, Math.floor(0.1));
+ assertEquals(0, Math.floor(0.5));
+ assertEquals(0, Math.floor(0.7));
+ assertEquals(-1, Math.floor(-0.1));
+ assertEquals(-1, Math.floor(-0.5));
+ assertEquals(-1, Math.floor(-0.7));
+ assertEquals(1, Math.floor(1));
+ assertEquals(1, Math.floor(1.1));
+ assertEquals(1, Math.floor(1.5));
+ assertEquals(1, Math.floor(1.7));
+ assertEquals(-1, Math.floor(-1));
+ assertEquals(-2, Math.floor(-1.1));
+ assertEquals(-2, Math.floor(-1.5));
+ assertEquals(-2, Math.floor(-1.7));
+
+ assertEquals(0, Math.floor(Number.MIN_VALUE));
+ assertEquals(-1, Math.floor(-Number.MIN_VALUE));
+ assertEquals(Number.MAX_VALUE, Math.floor(Number.MAX_VALUE));
+ assertEquals(-Number.MAX_VALUE, Math.floor(-Number.MAX_VALUE));
+ assertEquals(Infinity, Math.floor(Infinity));
+ assertEquals(-Infinity, Math.floor(-Infinity));
+
+ // 2^30 is a smi boundary.
+ var two_30 = 1 << 30;
+
+ assertEquals(two_30, Math.floor(two_30));
+ assertEquals(two_30, Math.floor(two_30 + 0.1));
+ assertEquals(two_30, Math.floor(two_30 + 0.5));
+ assertEquals(two_30, Math.floor(two_30 + 0.7));
+
+ assertEquals(two_30 - 1, Math.floor(two_30 - 1));
+ assertEquals(two_30 - 1, Math.floor(two_30 - 1 + 0.1));
+ assertEquals(two_30 - 1, Math.floor(two_30 - 1 + 0.5));
+ assertEquals(two_30 - 1, Math.floor(two_30 - 1 + 0.7));
+
+ assertEquals(-two_30, Math.floor(-two_30));
+ assertEquals(-two_30, Math.floor(-two_30 + 0.1));
+ assertEquals(-two_30, Math.floor(-two_30 + 0.5));
+ assertEquals(-two_30, Math.floor(-two_30 + 0.7));
+
+ assertEquals(-two_30 + 1, Math.floor(-two_30 + 1));
+ assertEquals(-two_30 + 1, Math.floor(-two_30 + 1 + 0.1));
+ assertEquals(-two_30 + 1, Math.floor(-two_30 + 1 + 0.5));
+ assertEquals(-two_30 + 1, Math.floor(-two_30 + 1 + 0.7));
+
+ // 2^52 is a precision boundary.
+ var two_52 = (1 << 30) * (1 << 22);
+
+ assertEquals(two_52, Math.floor(two_52));
+ assertEquals(two_52, Math.floor(two_52 + 0.1));
+ assertEquals(two_52, two_52 + 0.5);
+ assertEquals(two_52, Math.floor(two_52 + 0.5));
+ assertEquals(two_52 + 1, two_52 + 0.7);
+ assertEquals(two_52 + 1, Math.floor(two_52 + 0.7));
+
+ assertEquals(two_52 - 1, Math.floor(two_52 - 1));
+ assertEquals(two_52 - 1, Math.floor(two_52 - 1 + 0.1));
+ assertEquals(two_52 - 1, Math.floor(two_52 - 1 + 0.5));
+ assertEquals(two_52 - 1, Math.floor(two_52 - 1 + 0.7));
+
+ assertEquals(-two_52, Math.floor(-two_52));
+ assertEquals(-two_52, Math.floor(-two_52 + 0.1));
+ assertEquals(-two_52, Math.floor(-two_52 + 0.5));
+ assertEquals(-two_52, Math.floor(-two_52 + 0.7));
+
+ assertEquals(-two_52 + 1, Math.floor(-two_52 + 1));
+ assertEquals(-two_52 + 1, Math.floor(-two_52 + 1 + 0.1));
+ assertEquals(-two_52 + 1, Math.floor(-two_52 + 1 + 0.5));
+ assertEquals(-two_52 + 1, Math.floor(-two_52 + 1 + 0.7));
+}
+
+
+// Test in a loop to cover the custom IC and GC-related issues.
+for (var i = 0; i < 500; i++) {
+ test();
+}